From 248e84754dd02d4f9a391ba36fd4e916c3dca19f Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Thu, 1 Feb 2024 17:10:39 +0200 Subject: [PATCH] vendor: run `make vendor-update` --- go.mod | 59 +- go.sum | 146 +- .../go/compute/internal/version.go | 2 +- vendor/cloud.google.com/go/iam/CHANGES.md | 7 + .../go/iam/apiv1/iampb/iam_policy.pb.go | 2 +- .../go/iam/apiv1/iampb/options.pb.go | 2 +- .../go/iam/apiv1/iampb/policy.pb.go | 74 +- vendor/cloud.google.com/go/storage/CHANGES.md | 13 + .../go/storage/grpc_client.go | 5 + .../go/storage/http_client.go | 1 + .../go/storage/internal/apiv2/auxiliary.go | 2 +- .../go/storage/internal/apiv2/doc.go | 2 +- .../storage/internal/apiv2/storage_client.go | 2 +- .../go/storage/internal/version.go | 2 +- vendor/cloud.google.com/go/storage/invoke.go | 3 + vendor/cloud.google.com/go/storage/storage.go | 27 + .../internal/resource/resource_identifier.go | 224 ++ .../arm/internal/resource/resource_type.go | 114 + .../sdk/azcore/arm/policy/policy.go | 98 + .../sdk/azcore/arm/runtime/pipeline.go | 65 + .../azcore/arm/runtime/policy_bearer_token.go | 145 + .../azcore/arm/runtime/policy_register_rp.go | 347 +++ .../arm/runtime/policy_trace_namespace.go | 30 + .../sdk/azcore/arm/runtime/runtime.go | 24 + .../sdk/azidentity/CHANGELOG.md | 39 +- .../sdk/azidentity/MIGRATION.md | 6 +- .../azure-sdk-for-go/sdk/azidentity/README.md | 40 +- .../sdk/azidentity/TOKEN_CACHING.MD | 70 + .../sdk/azidentity/TROUBLESHOOTING.md | 46 +- .../sdk/azidentity/assets.json | 2 +- .../sdk/azidentity/authentication_record.go | 95 + .../sdk/azidentity/azidentity.go | 51 +- .../sdk/azidentity/azure_cli_credential.go | 70 +- .../azure_developer_cli_credential.go | 169 ++ .../azure-sdk-for-go/sdk/azidentity/ci.yml | 1 + .../azidentity/client_assertion_credential.go | 26 +- .../client_certificate_credential.go | 25 +- .../azidentity/client_secret_credential.go | 24 +- .../sdk/azidentity/confidential_client.go | 32 +- .../azidentity/default_azure_credential.go | 16 +- .../azidentity/developer_credential_util.go | 38 + .../sdk/azidentity/device_code_credential.go | 54 +- .../sdk/azidentity/environment_credential.go | 4 +- .../azure-sdk-for-go/sdk/azidentity/errors.go | 14 +- .../azure-sdk-for-go/sdk/azidentity/go.work | 6 + .../sdk/azidentity/go.work.sum | 39 + .../interactive_browser_credential.go | 50 +- .../sdk/azidentity/internal/exported.go | 18 + .../sdk/azidentity/internal/internal.go | 31 + .../sdk/azidentity/managed_identity_client.go | 54 +- .../azidentity/managed_identity_credential.go | 20 +- .../sdk/azidentity/on_behalf_of_credential.go | 17 +- .../sdk/azidentity/public_client.go | 133 +- .../username_password_credential.go | 40 +- .../sdk/azidentity/version.go | 5 +- .../sdk/azidentity/workload_identity.go | 11 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 9 + .../config/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 4 + .../credentials/go_module_metadata.go | 2 +- .../feature/s3/manager/CHANGELOG.md | 12 + .../feature/s3/manager/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 4 + .../internal/ini/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/internal/ini/strings.go | 4 - .../aws/aws-sdk-go-v2/service/s3/CHANGELOG.md | 4 + .../aws-sdk-go-v2/service/s3/api_client.go | 36 +- .../service/s3/go_module_metadata.go | 2 +- .../internal/customizations/signer_wrapper.go | 13 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 4 + .../service/sso/go_module_metadata.go | 2 +- .../sso/internal/endpoints/endpoints.go | 8 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 63 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../cheggaaa/pb/v3/termutil/term_win.go | 3 +- vendor/github.com/google/uuid/CHANGELOG.md | 13 + vendor/github.com/google/uuid/hash.go | 6 + vendor/github.com/google/uuid/version7.go | 39 +- .../hashicorp/go-version/CHANGELOG.md | 45 + .../github.com/hashicorp/go-version/LICENSE | 354 +++ .../github.com/hashicorp/go-version/README.md | 66 + .../hashicorp/go-version/constraint.go | 296 ++ .../hashicorp/go-version/version.go | 407 +++ .../go-version/version_collection.go | 17 + .../github.com/klauspost/compress/README.md | 14 +- .../klauspost/compress/flate/deflate.go | 2 +- .../compress/internal/race/norace.go | 13 + .../klauspost/compress/internal/race/race.go | 26 + .../klauspost/compress/s2/decode.go | 6 + .../klauspost/compress/s2/encode_amd64.go | 14 + .../klauspost/compress/s2/reader.go | 16 +- vendor/github.com/klauspost/compress/s2/s2.go | 4 + .../klauspost/compress/s2/writer.go | 14 +- vendor/github.com/klauspost/compress/s2sx.mod | 2 +- .../klauspost/compress/zstd/decodeheader.go | 56 +- .../compress/zstd/encoder_options.go | 6 +- .../klauspost/compress/zstd/frameenc.go | 2 +- .../compress/zstd/fse_decoder_generic.go | 11 +- .../klauspost/compress/zstd/seqdec_amd64.s | 136 +- .../prometheus/prometheus/config/config.go | 103 +- .../prometheus/discovery/manager.go | 2 +- .../prometheus/model/exemplar/exemplar.go | 15 + .../model/histogram/float_histogram.go | 513 ++-- .../prometheus/model/histogram/generic.go | 177 +- .../prometheus/model/histogram/histogram.go | 76 +- .../prometheus/model/histogram/test_utils.go | 52 + .../prometheus/model/labels/labels.go | 221 +- .../prometheus/model/labels/labels_common.go | 235 ++ .../model/labels/labels_stringlabels.go | 232 +- .../prometheus/model/textparse/interface.go | 7 + .../model/textparse/openmetricsparse.go | 7 + .../prometheus/model/textparse/promparse.go | 7 + .../model/textparse/protobufparse.go | 61 +- .../prompb/io/prometheus/client/metrics.pb.go | 177 +- .../prompb/io/prometheus/client/metrics.proto | 11 +- .../prometheus/prometheus/scrape/manager.go | 96 +- .../prometheus/prometheus/scrape/metrics.go | 307 ++ .../prometheus/prometheus/scrape/scrape.go | 420 +-- .../prometheus/prometheus/scrape/target.go | 36 +- .../prometheus/storage/interface.go | 18 +- .../prometheus/prometheus/storage/merge.go | 46 +- .../storage/remote/azuread/azuread.go | 8 +- .../prometheus/storage/remote/codec.go | 8 +- .../prometheus/normalize_label.go | 13 +- .../prometheus/normalize_name.go | 47 +- .../otlptranslator/prometheus/unit_to_ucum.go | 90 + .../prometheusremotewrite/helper.go | 137 +- .../prometheusremotewrite/histograms.go | 82 +- .../prometheusremotewrite/metrics_to_prw.go | 4 +- .../number_data_points.go | 4 +- .../prometheus/storage/remote/read_handler.go | 7 +- .../prometheus/storage/remote/storage.go | 5 +- .../prometheus/storage/remote/write.go | 10 + .../storage/remote/write_handler.go | 16 +- .../prometheus/prometheus/tsdb/block.go | 18 +- .../prometheus/tsdb/chunkenc/chunk.go | 38 +- .../tsdb/chunkenc/float_histogram.go | 8 +- .../prometheus/tsdb/chunkenc/histogram.go | 8 +- .../tsdb/chunkenc/histogram_meta.go | 2 +- .../prometheus/tsdb/chunkenc/varbit.go | 7 +- .../prometheus/tsdb/chunkenc/xor.go | 2 +- .../prometheus/tsdb/chunks/chunks.go | 71 +- .../prometheus/tsdb/chunks/head_chunks.go | 56 +- .../prometheus/prometheus/tsdb/compact.go | 11 +- .../prometheus/prometheus/tsdb/db.go | 168 +- .../prometheus/tsdb/encoding/encoding.go | 5 +- .../prometheus/tsdb/errors/errors.go | 5 +- .../prometheus/prometheus/tsdb/exemplar.go | 19 +- .../prometheus/tsdb/fileutil/mmap.go | 9 +- .../tsdb/fileutil/preallocate_linux.go | 9 +- .../prometheus/prometheus/tsdb/head.go | 266 +- .../prometheus/prometheus/tsdb/head_append.go | 127 +- .../prometheus/prometheus/tsdb/head_read.go | 107 +- .../prometheus/prometheus/tsdb/head_wal.go | 11 +- .../prometheus/prometheus/tsdb/index/index.go | 134 +- .../prometheus/tsdb/index/postings.go | 4 +- .../prometheus/tsdb/index/postingsstats.go | 11 +- .../prometheus/prometheus/tsdb/ooo_head.go | 26 +- .../prometheus/tsdb/ooo_head_read.go | 102 +- .../prometheus/tsdb/ooo_isolation.go | 79 + .../prometheus/prometheus/tsdb/querier.go | 259 +- .../prometheus/tsdb/record/record.go | 41 +- .../prometheus/prometheus/tsdb/repair.go | 3 +- .../prometheus/tsdb/tombstones/tombstones.go | 14 +- .../prometheus/tsdb/tsdbutil/dir_locker.go | 4 +- .../prometheus/tsdb/tsdbutil/histogram.go | 10 + .../prometheus/prometheus/tsdb/wal.go | 15 +- .../prometheus/tsdb/wlog/checkpoint.go | 46 +- .../prometheus/tsdb/wlog/live_reader.go | 12 +- .../prometheus/prometheus/tsdb/wlog/reader.go | 17 +- .../prometheus/tsdb/wlog/watcher.go | 56 +- .../prometheus/prometheus/tsdb/wlog/wlog.go | 54 +- .../util/annotations/annotations.go | 27 +- .../prometheus/util/testutil/context.go | 8 +- .../prometheus/util/testutil/directory.go | 2 +- vendor/github.com/rivo/uniseg/README.md | 30 +- .../github.com/rivo/uniseg/eastasianwidth.go | 78 +- .../rivo/uniseg/emojipresentation.go | 18 +- .../github.com/rivo/uniseg/gen_breaktest.go | 10 +- .../github.com/rivo/uniseg/gen_properties.go | 13 +- vendor/github.com/rivo/uniseg/grapheme.go | 4 +- .../rivo/uniseg/graphemeproperties.go | 58 +- .../github.com/rivo/uniseg/graphemerules.go | 158 +- vendor/github.com/rivo/uniseg/line.go | 10 +- .../github.com/rivo/uniseg/lineproperties.go | 109 +- vendor/github.com/rivo/uniseg/linerules.go | 522 ++-- vendor/github.com/rivo/uniseg/properties.go | 48 +- .../rivo/uniseg/sentenceproperties.go | 54 +- .../github.com/rivo/uniseg/sentencerules.go | 265 +- vendor/github.com/rivo/uniseg/step.go | 4 +- vendor/github.com/rivo/uniseg/width.go | 9 +- .../github.com/rivo/uniseg/wordproperties.go | 71 +- vendor/github.com/rivo/uniseg/wordrules.go | 160 +- .../collector/featuregate/LICENSE | 202 ++ .../collector/featuregate/Makefile | 1 + .../collector/featuregate/README.md | 77 + .../collector/featuregate/flag.go | 65 + .../collector/featuregate/gate.go | 58 + .../collector/featuregate/registry.go | 207 ++ .../collector/featuregate/stage.go | 44 + .../grpc/otelgrpc/stats_handler.go | 81 +- .../grpc/otelgrpc/version.go | 2 +- .../net/http/otelhttp/handler.go | 28 +- .../otelhttp/internal/semconvutil/httpconv.go | 178 +- .../otelhttp/internal/semconvutil/netconv.go | 12 +- .../net/http/otelhttp/version.go | 2 +- vendor/go.opentelemetry.io/otel/CHANGELOG.md | 43 +- .../go.opentelemetry.io/otel/CONTRIBUTING.md | 21 + vendor/go.opentelemetry.io/otel/README.md | 2 +- vendor/go.opentelemetry.io/otel/RELEASING.md | 6 +- .../go.opentelemetry.io/otel/attribute/set.go | 91 +- .../otel/baggage/baggage.go | 332 ++- vendor/go.opentelemetry.io/otel/doc.go | 2 +- .../otel/propagation/trace_context.go | 96 +- .../otel/semconv/v1.20.0/attribute_group.go | 1209 ++++++++ .../otel/semconv/v1.20.0/doc.go | 20 + .../otel/semconv/v1.20.0/event.go | 199 ++ .../otel/semconv/v1.20.0/exception.go | 20 + .../otel/semconv/v1.20.0/http.go | 21 + .../otel/semconv/v1.20.0/resource.go | 2071 +++++++++++++ .../otel/semconv/v1.20.0/schema.go | 20 + .../otel/semconv/v1.20.0/trace.go | 2610 +++++++++++++++++ .../otel/semconv/v1.21.0/doc.go | 4 +- .../otel/trace/tracestate.go | 201 +- vendor/go.opentelemetry.io/otel/version.go | 2 +- vendor/go.opentelemetry.io/otel/versions.yaml | 4 +- vendor/golang.org/x/exp/slices/slices.go | 50 +- vendor/google.golang.org/api/internal/cba.go | 72 +- .../google.golang.org/api/internal/creds.go | 62 +- .../api/internal/settings.go | 28 +- .../google.golang.org/api/internal/version.go | 2 +- .../option/internaloption/internaloption.go | 10 +- .../api/transport/grpc/dial.go | 11 + .../api/transport/http/dial.go | 11 + .../googleapis/api/annotations/client.pb.go | 139 +- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- vendor/google.golang.org/grpc/clientconn.go | 24 +- .../alts/internal/handshaker/handshaker.go | 19 +- .../internal/proto/grpc_gcp/handshaker.pb.go | 176 +- .../grpc/internal/internal.go | 20 +- .../grpc/internal/resolver/unix/unix.go | 4 + ...ive_nonunix.go => tcp_keepalive_others.go} | 2 +- .../grpc/internal/tcp_keepalive_windows.go | 54 + .../grpc/internal/transport/http2_client.go | 10 +- .../grpc/internal/transport/http2_server.go | 7 +- .../grpc/metadata/metadata.go | 13 +- .../grpc/resolver/resolver.go | 10 + vendor/google.golang.org/grpc/rpc_util.go | 8 +- vendor/google.golang.org/grpc/server.go | 43 +- vendor/google.golang.org/grpc/stream.go | 4 +- vendor/google.golang.org/grpc/version.go | 2 +- vendor/google.golang.org/grpc/vet.sh | 8 +- vendor/modules.txt | 70 +- 253 files changed, 16208 insertions(+), 3711 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go create mode 100644 vendor/github.com/hashicorp/go-version/CHANGELOG.md create mode 100644 vendor/github.com/hashicorp/go-version/LICENSE create mode 100644 vendor/github.com/hashicorp/go-version/README.md create mode 100644 vendor/github.com/hashicorp/go-version/constraint.go create mode 100644 vendor/github.com/hashicorp/go-version/version.go create mode 100644 vendor/github.com/hashicorp/go-version/version_collection.go create mode 100644 vendor/github.com/klauspost/compress/internal/race/norace.go create mode 100644 vendor/github.com/klauspost/compress/internal/race/race.go create mode 100644 vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go create mode 100644 vendor/github.com/prometheus/prometheus/model/labels/labels_common.go create mode 100644 vendor/github.com/prometheus/prometheus/scrape/metrics.go create mode 100644 vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/unit_to_ucum.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/ooo_isolation.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/LICENSE create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/Makefile create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/README.md create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/flag.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/gate.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/registry.go create mode 100644 vendor/go.opentelemetry.io/collector/featuregate/stage.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go create mode 100644 vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go rename vendor/google.golang.org/grpc/internal/{tcp_keepalive_nonunix.go => tcp_keepalive_others.go} (96%) create mode 100644 vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go diff --git a/go.mod b/go.mod index 1b5b36a0b..36b5cf2db 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics go 1.20 require ( - cloud.google.com/go/storage v1.36.0 + cloud.google.com/go/storage v1.37.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 github.com/VictoriaMetrics/fastcache v1.12.2 @@ -14,18 +14,18 @@ require ( github.com/VictoriaMetrics/metrics v1.31.0 github.com/VictoriaMetrics/metricsql v0.70.0 github.com/aws/aws-sdk-go-v2 v1.24.1 - github.com/aws/aws-sdk-go-v2/config v1.26.4 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.12 - github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0 + github.com/aws/aws-sdk-go-v2/config v1.26.6 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 + github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 github.com/bmatcuk/doublestar/v4 v4.6.1 github.com/cespare/xxhash/v2 v2.2.0 - github.com/cheggaaa/pb/v3 v3.1.4 + github.com/cheggaaa/pb/v3 v3.1.5 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/googleapis/gax-go/v2 v2.12.0 github.com/influxdata/influxdb v1.11.4 - github.com/klauspost/compress v1.17.4 - github.com/prometheus/prometheus v0.48.1 + github.com/klauspost/compress v1.17.5 + github.com/prometheus/prometheus v0.49.1 github.com/urfave/cli/v2 v2.27.1 github.com/valyala/fastjson v1.6.4 github.com/valyala/fastrand v1.1.0 @@ -36,33 +36,33 @@ require ( golang.org/x/net v0.20.0 golang.org/x/oauth2 v0.16.0 golang.org/x/sys v0.16.0 - google.golang.org/api v0.156.0 + google.golang.org/api v0.161.0 gopkg.in/yaml.v2 v2.4.0 ) require ( cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/compute v1.23.4 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 // indirect + cloud.google.com/go/iam v1.1.6 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 // indirect - github.com/aws/aws-sdk-go v1.49.22 // indirect + github.com/aws/aws-sdk-go v1.50.8 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.15 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.16 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.10 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 // indirect github.com/aws/smithy-go v1.19.0 // indirect @@ -80,9 +80,10 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect + github.com/hashicorp/go-version v1.6.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -102,33 +103,33 @@ require ( github.com/prometheus/common v0.46.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/rivo/uniseg v0.4.4 // indirect + github.com/rivo/uniseg v0.4.6 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/stretchr/testify v1.8.4 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e // indirect go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/collector/featuregate v1.0.1 // indirect go.opentelemetry.io/collector/pdata v1.0.1 // indirect - go.opentelemetry.io/collector/semconv v0.92.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/collector/semconv v0.93.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect + go.opentelemetry.io/otel v1.22.0 // indirect + go.opentelemetry.io/otel/metric v1.22.0 // indirect + go.opentelemetry.io/otel/trace v1.22.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.18.0 // indirect - golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect + golang.org/x/exp v0.0.0-20240119083558-1b970713d09a // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 // indirect - google.golang.org/grpc v1.60.1 // indirect + google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe // indirect + google.golang.org/grpc v1.61.0 // indirect google.golang.org/protobuf v1.32.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index c843c2a64..17c404271 100644 --- a/go.sum +++ b/go.sum @@ -21,14 +21,14 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.23.4 h1:EBT9Nw4q3zyE7G45Wvv3MzolIrCJEuHys5muLY0wvAw= +cloud.google.com/go/compute v1.23.4/go.mod h1:/EJMj55asU6kAFnuZET8zqgwgJ9FvXWXOkkfQZa4ioI= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -38,13 +38,13 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= +cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v4 v4.2.1 h1:UPeCRD+XY7QlaGQte2EVI2iOcWvUYA2XY8w5T/8v0NQ= @@ -57,6 +57,7 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaC github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Code-Hex/go-generics-cache v1.3.1 h1:i8rLwyhoyhaerr7JpjtYjJZUcCbWOdiYO3fZXLiEC4g= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI= github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI= @@ -82,26 +83,26 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.49.22 h1:r01+cQJ3cORQI1PJxG8af0jzrZpUOL9L+/3kU2x1geU= -github.com/aws/aws-sdk-go v1.49.22/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.50.8 h1:gY0WoOW+/Wz6XmYSgDH9ge3wnAevYDSQWPxxJvqAkP4= +github.com/aws/aws-sdk-go v1.50.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.24.1 h1:xAojnj+ktS95YZlDf0zxWBkbFtymPeDP+rvUQIH3uAU= github.com/aws/aws-sdk-go-v2 v1.24.1/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= -github.com/aws/aws-sdk-go-v2/config v1.26.4 h1:Juj7LhtxNudNUlfX22K5AnLafO+v4eq9PA3VWSCIQs4= -github.com/aws/aws-sdk-go-v2/config v1.26.4/go.mod h1:tioqQ7wvxMYnTDpoTTLHhV3Zh+z261i/f2oz+ds8eNI= -github.com/aws/aws-sdk-go-v2/credentials v1.16.15 h1:P0/m1LU08MF2kRzx4P//+7lNjiJod1z4xI2WpWhdpTQ= -github.com/aws/aws-sdk-go-v2/credentials v1.16.15/go.mod h1:pgtMCf7Dx4GWw5EpHOTc2Sy17LIP0A0N2C9nQ83pQ/0= +github.com/aws/aws-sdk-go-v2/config v1.26.6 h1:Z/7w9bUqlRI0FFQpetVuFYEsjzE3h7fpU6HuGmfPL/o= +github.com/aws/aws-sdk-go-v2/config v1.26.6/go.mod h1:uKU6cnDmYCvJ+pxO9S4cWDb2yWWIH5hra+32hVh1MI4= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16 h1:8q6Rliyv0aUFAVtzaldUEcS+T5gbadPbWdV1WcAddK8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.16/go.mod h1:UHVZrdUsv63hPXFo1H7c5fEneoVo9UXiz36QG1GEPi0= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11 h1:c5I5iH+DZcH3xOIMlz3/tCKJDaHFwYEmxvlh2fAcFo8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.11/go.mod h1:cRrYDYAMUohBJUtUnOhydaMHtiK/1NZ0Otc9lIb6O0Y= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.12 h1:0FMZy36RSYvcvVzEf1xbNdebLHZewW40QWP+P8jCMVk= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.12/go.mod h1:+chyahvarkb3HibkNei9IQEM9P5cWD5w2kgXCa3Hh0I= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 h1:2MUXyGW6dVaQz6aqycpbdLIH1NMcUI6kW6vQ0RabGYg= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15/go.mod h1:aHbhbR6WEQgHAiRj41EQ2W47yOYwNtIkWTXmcAtYqj8= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 h1:vF+Zgd9s+H4vOXd5BMaPWykta2a6Ih0AKLq/X6NYKn4= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10/go.mod h1:6BkRjejp/GR4411UGqkX8+wFMbFbqsUIimfK4XjOKR4= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 h1:nYPe006ktcqUji8S2mqXf9c/7NdiKriOwMvWQHgYztw= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10/go.mod h1:6UV4SZkVvmODfXKql4LCbaZUpF7HO2BX38FgBf9ZOLw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 h1:n3GDfwqF2tzEkXlv5cuy4iy7LpKDtqDMcNLfZDu9rls= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 h1:5oE2WzJE56/mVveuDZPJESKlg/00AaS2pY2QZcnxg4M= github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10/go.mod h1:FHbKWQtRBYUz4vO5WBWjzMD2by126ny5y/1EoaWoLfI= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= @@ -112,10 +113,10 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10 h1:DBYTXwIG github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.10/go.mod h1:wohMUQiFdzo0NtxbBg0mSRGZ4vL3n0dKjLTINdcIino= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10 h1:KOxnQeWy5sXyS37fdKEvAsGHOr9fa/qvwxfJurR/BzE= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.10/go.mod h1:jMx5INQFYFYB3lQD9W0D8Ohgq6Wnl7NYOJ2TQndbulI= -github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0 h1:PJTdBMsyvra6FtED7JZtDpQrIAflYDHFoZAu/sKYkwU= -github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 h1:dGrs+Q/WzhsiUKh82SfTVN66QzyulXuMDTV/G8ZxOac= -github.com/aws/aws-sdk-go-v2/service/sso v1.18.6/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= +github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 h1:5XNlsBsEvBZBMO6p82y+sqpWg8j5aBCe+5C2GBFgqBQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1/go.mod h1:4qXHrG1Ne3VGIMZPCB8OjH/pLFO94sKABIusjh0KWPU= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 h1:eajuO3nykDPdYicLlP3AGgOyVN3MOlFmZv7WGTuJPow= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.7/go.mod h1:+mJNDdF+qiUlNKNC3fxn74WWNN+sOiGOEImje+3ScPM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7 h1:QPMJf+Jw8E1l7zqhZmMlFw6w1NmfkfiSK8mS4zOx3BA= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.7/go.mod h1:ykf3COxYI0UJmxcfcxcVuz7b6uADi1FkiUz6Eb7AgM8= github.com/aws/aws-sdk-go-v2/service/sts v1.26.7 h1:NzO4Vrau795RkUdSHKEwiR01FaGzGOH1EETJ+5QHnm0= @@ -132,14 +133,14 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheggaaa/pb/v3 v3.1.4 h1:DN8j4TVVdKu3WxVwcRKu0sG00IIU6FewoABZzXbRQeo= -github.com/cheggaaa/pb/v3 v3.1.4/go.mod h1:6wVjILNBaXMs8c21qRiaUM8BR82erfgau1DQ4iUXmSA= +github.com/cheggaaa/pb/v3 v3.1.5 h1:QuuUzeM2WsAqG2gMqtzaWithDJv0i+i6UlnwSCI4QLk= +github.com/cheggaaa/pb/v3 v3.1.5/go.mod h1:CrxkeghYTXi1lQBEI7jSn+3svI3cuc19haAj6jM60XI= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -148,10 +149,10 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= -github.com/digitalocean/godo v1.104.1 h1:SZNxjAsskM/su0YW9P8Wx3gU0W1Z13b6tZlYNpl5BnA= +github.com/digitalocean/godo v1.106.0 h1:m5iErwl3xHovGFlawd50n54ntgXHt1BLsvU6BXsVxEU= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= -github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= +github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= @@ -166,7 +167,7 @@ github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -188,7 +189,7 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= +github.com/go-resty/resty/v2 v2.10.0 h1:Qla4W/+TMmv0fOeeRqzEpXPLfTUnR5HZ1+lGs+CkiCo= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -262,19 +263,19 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= -github.com/gophercloud/gophercloud v1.7.0 h1:fyJGKh0LBvIZKLvBWvQdIgkaV5yTM3Jh9EYUh+UNCAs= +github.com/gophercloud/gophercloud v1.8.0 h1:TM3Jawprb2NrdOnvcHhWJalmKmAmOGgfZElM/3oBYCk= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww= github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= -github.com/hashicorp/consul/api v1.25.1 h1:CqrdhYzc8XZuPnhIYZWH45toM0LB9ZeYr/gvpLVI3PE= +github.com/hashicorp/consul/api v1.26.1 h1:5oSXOO5fboPZeW5SN+TdGFP/BILDgBm19OrPZ/pICIM= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -283,6 +284,8 @@ github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4= @@ -293,7 +296,7 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/influxdata/influxdb v1.11.4 h1:H3pVW+/tWQ4lkHhZxVQ13Ov1hmhHYaAzz8L5aq3ZNtw= github.com/influxdata/influxdb v1.11.4/go.mod h1:VO6X2zlamfmEf+Esc9dR+7UQhdE/krspWNEZPwxCrp0= -github.com/ionos-cloud/sdk-go/v6 v6.1.9 h1:Iq3VIXzeEbc8EbButuACgfLMiY5TPVWUPNrF+Vsddo4= +github.com/ionos-cloud/sdk-go/v6 v6.1.10 h1:3815Q2Hw/wc4cJ8wD7bwfsmDsdfIEp80B7BQMj0YP2w= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -314,8 +317,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.5 h1:d4vBd+7CHydUqpFBgUEKkSdtSugf9YFmSkvUYPquI5E= +github.com/klauspost/compress v1.17.5/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -327,7 +330,7 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.23.0 h1:s0ReCZtuN9Z1IoUN9w1RLeYO1dMZUGPwOQ/IBFsBHtU= +github.com/linode/linodego v1.25.0 h1:zYMz0lTasD503jBu3tSRhzEmXHQN1zptCw5o71ibyyU= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= @@ -337,7 +340,7 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.57 h1:Jzi7ApEIzwEPLHWRcafCN9LZSBbqQpxjt/wpgvg7wcM= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -391,11 +394,11 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/prometheus v0.48.1 h1:CTszphSNTXkuCG6O0IfpKdHcJkvvnAAE1GbELKS+NFk= -github.com/prometheus/prometheus v0.48.1/go.mod h1:SRw624aMAxTfryAcP8rOjg4S/sHHaetx2lyJJ2nM83g= +github.com/prometheus/prometheus v0.49.1 h1:90mDvjrFnca2m+0qPSIDr3y7iHPTAagOAElz7j+HtGk= +github.com/prometheus/prometheus v0.49.1/go.mod h1:aDogiyqmv3aBIWDb5z5Sdcxuuf2BOfiJwOIm9JGpMnI= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= -github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rivo/uniseg v0.4.6 h1:Sovz9sDSwbOz9tgUy8JpT+KgCkPYJEN/oYzlJiYTNLg= +github.com/rivo/uniseg v0.4.6/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -452,21 +455,23 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector/featuregate v1.0.1 h1:ok//hLSXttBbyu4sSV1pTx1nKdr5udSmrWy5sFMIIbM= +go.opentelemetry.io/collector/featuregate v1.0.1/go.mod h1:QQXjP4etmJQhkQ20j4P/rapWuItYxoFozg/iIwuKnYg= go.opentelemetry.io/collector/pdata v1.0.1 h1:dGX2h7maA6zHbl5D3AsMnF1c3Nn+3EUftbVCLzeyNvA= go.opentelemetry.io/collector/pdata v1.0.1/go.mod h1:jutXeu0QOXYY8wcZ/hege+YAnSBP3+jpTqYU1+JTI5Y= -go.opentelemetry.io/collector/semconv v0.92.0 h1:3+OGPPuVu4rtrz8qGbpbiw7eKKULj4iJaSDTV52HM40= -go.opentelemetry.io/collector/semconv v0.92.0/go.mod h1:gZ0uzkXsN+J5NpiRcdp9xOhNGQDDui8Y62p15sKrlzo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/collector/semconv v0.93.0 h1:eBlMcVNTwYYsVdAsCVDs4wvVYs75K1xcIDpqj16PG4c= +go.opentelemetry.io/collector/semconv v0.93.0/go.mod h1:gZ0uzkXsN+J5NpiRcdp9xOhNGQDDui8Y62p15sKrlzo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= +go.opentelemetry.io/otel v1.22.0 h1:xS7Ku+7yTFvDfDraDIJVpw7XPyuHlB9MCiqqX5mcJ6Y= +go.opentelemetry.io/otel v1.22.0/go.mod h1:eoV4iAi3Ea8LkAEI9+GFT44O6T/D0GWAVFyZVCC6pMI= +go.opentelemetry.io/otel/metric v1.22.0 h1:lypMQnGyJYeuYPhOM/bgjbFM6WE44W1/T45er4d8Hhg= +go.opentelemetry.io/otel/metric v1.22.0/go.mod h1:evJGjVpZv0mQ5QBRJoBF64yMuOf4xCWdXjK8pzFvliY= go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx83XD0= +go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -493,8 +498,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= +golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -689,7 +694,6 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -706,8 +710,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.156.0 h1:yloYcGbBtVYjLKQe4enCunxvwn3s2w/XPrrhVf6MsvQ= -google.golang.org/api v0.156.0/go.mod h1:bUSmn4KFO0Q+69zo9CNIDp4Psi6BqM0np0CbzKRSiSY= +google.golang.org/api v0.161.0 h1:oYzk/bs26WN10AV7iU7MVJVXBH8oCPS2hHyBiEeFoSU= +google.golang.org/api v0.161.0/go.mod h1:0mu0TpK33qnydLvWqbImq2b1eQ5FHRSDCBzAxX9ZHyw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -745,12 +749,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1 h1:/IWabOtPziuXTEtI1KYCpM6Ss7vaAkeMxk+uXV/xvZs= -google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 h1:OPXtXn7fNMaXwO3JvOmF1QyTc00jsSFFz1vXXBOdCDo= -google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:B5xPO//w8qmBDjGReYLpR6UJPnkldGkCSMoH/2vxJeg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 h1:gphdwh0npgs8elJ4T6J+DQJHPVF7RsuJHCfwztUb4J4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= +google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= +google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe h1:0poefMBYvYbs7g5UkjS6HcxBPaTRAmznle9jnxYoAI8= +google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe h1:bQnxqljG/wqi4NTXu2+DJ3n7APcEA882QZ1JvhQAq9o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -764,8 +768,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= +google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -806,11 +810,11 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= -k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= -k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= +k8s.io/api v0.28.4 h1:8ZBrLjwosLl/NYgv1P7EQLqoO8MGQApnbgH8tu3BMzY= +k8s.io/apimachinery v0.28.4 h1:zOSJe1mc+GxuMnFzD4Z/U1wst50X28ZNsn5bhgIIao8= +k8s.io/client-go v0.28.4 h1:Np5ocjlZcTrkyRJ3+T3PkXDpe4UpatQxj85+xjaD2wY= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/utils v0.0.0-20230711102312-30195339c3c7 h1:ZgnF1KZsYxWIifwSNZFZgNtWE89WI5yiP5WwlfDoIyc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 540ad16ac..27a1970b9 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.23.3" +const Version = "1.23.4" diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md index c4cacb03f..43a179384 100644 --- a/vendor/cloud.google.com/go/iam/CHANGES.md +++ b/vendor/cloud.google.com/go/iam/CHANGES.md @@ -1,6 +1,13 @@ # Changes +## [1.1.6](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.5...iam/v1.1.6) (2024-01-30) + + +### Bug Fixes + +* **iam:** Enable universe domain resolution options ([fd1d569](https://github.com/googleapis/google-cloud-go/commit/fd1d56930fa8a747be35a224611f4797b8aeb698)) + ## [1.1.5](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.4...iam/v1.1.5) (2023-11-01) diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go index 85346a891..b5243e612 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/iam/v1/iam_policy.proto diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go index 68f8d761f..3f854fe49 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/iam/v1/options.proto diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go index eefd1d0e5..dfc60661a 100644 --- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go +++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go @@ -14,7 +14,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.31.0 +// protoc-gen-go v1.32.0 // protoc v4.23.2 // source: google/iam/v1/policy.proto @@ -289,11 +289,11 @@ type Policy struct { // Any operation that affects conditional role bindings must specify version // `3`. This requirement applies to the following operations: // - // * Getting a policy that includes a conditional role binding - // * Adding a conditional role binding to a policy - // * Changing a conditional role binding in a policy - // * Removing any role binding, with or without a condition, from a policy - // that includes conditions + // - Getting a policy that includes a conditional role binding + // - Adding a conditional role binding to a policy + // - Changing a conditional role binding in a policy + // - Removing any role binding, with or without a condition, from a policy + // that includes conditions // // **Important:** If you use IAM Conditions, you must include the `etag` field // whenever you call `setIamPolicy`. If you omit this field, then IAM allows @@ -407,47 +407,43 @@ type Binding struct { // Specifies the principals requesting access for a Google Cloud resource. // `members` can have the following values: // - // * `allUsers`: A special identifier that represents anyone who is - // on the internet; with or without a Google account. + // - `allUsers`: A special identifier that represents anyone who is + // on the internet; with or without a Google account. // - // * `allAuthenticatedUsers`: A special identifier that represents anyone - // who is authenticated with a Google account or a service account. + // - `allAuthenticatedUsers`: A special identifier that represents anyone + // who is authenticated with a Google account or a service account. // - // * `user:{emailid}`: An email address that represents a specific Google - // account. For example, `alice@example.com` . + // - `user:{emailid}`: An email address that represents a specific Google + // account. For example, `alice@example.com` . // + // - `serviceAccount:{emailid}`: An email address that represents a service + // account. For example, `my-other-app@appspot.gserviceaccount.com`. // - // * `serviceAccount:{emailid}`: An email address that represents a service - // account. For example, `my-other-app@appspot.gserviceaccount.com`. + // - `group:{emailid}`: An email address that represents a Google group. + // For example, `admins@example.com`. // - // * `group:{emailid}`: An email address that represents a Google group. - // For example, `admins@example.com`. + // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a user that has been recently deleted. For + // example, `alice@example.com?uid=123456789012345678901`. If the user is + // recovered, this value reverts to `user:{emailid}` and the recovered user + // retains the role in the binding. // - // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a user that has been recently deleted. For - // example, `alice@example.com?uid=123456789012345678901`. If the user is - // recovered, this value reverts to `user:{emailid}` and the recovered user - // retains the role in the binding. - // - // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus - // unique identifier) representing a service account that has been recently - // deleted. For example, - // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. - // If the service account is undeleted, this value reverts to - // `serviceAccount:{emailid}` and the undeleted service account retains the - // role in the binding. - // - // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique - // identifier) representing a Google group that has been recently - // deleted. For example, `admins@example.com?uid=123456789012345678901`. If - // the group is recovered, this value reverts to `group:{emailid}` and the - // recovered group retains the role in the binding. - // - // - // * `domain:{domain}`: The G Suite domain (primary) that represents all the - // users of that domain. For example, `google.com` or `example.com`. + // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus + // unique identifier) representing a service account that has been recently + // deleted. For example, + // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. + // If the service account is undeleted, this value reverts to + // `serviceAccount:{emailid}` and the undeleted service account retains the + // role in the binding. // + // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique + // identifier) representing a Google group that has been recently + // deleted. For example, `admins@example.com?uid=123456789012345678901`. If + // the group is recovered, this value reverts to `group:{emailid}` and the + // recovered group retains the role in the binding. // + // - `domain:{domain}`: The G Suite domain (primary) that represents all the + // users of that domain. For example, `google.com` or `example.com`. Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"` // The condition that is associated with this binding. // diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 8b3fa6fc4..90007144d 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,19 @@ # Changes +## [1.37.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.36.0...storage/v1.37.0) (2024-01-24) + + +### Features + +* **storage:** Add maxAttempts RetryOption ([#9215](https://github.com/googleapis/google-cloud-go/issues/9215)) ([e348cc5](https://github.com/googleapis/google-cloud-go/commit/e348cc5340e127b530e8ee4664fd995e6f038b2c)) +* **storage:** Support IncludeFoldersAsPrefixes ([#9211](https://github.com/googleapis/google-cloud-go/issues/9211)) ([98c9d71](https://github.com/googleapis/google-cloud-go/commit/98c9d7157306de5134547a67c084c248484c9a51)) + + +### Bug Fixes + +* **storage:** Migrate deprecated proto dep ([#9232](https://github.com/googleapis/google-cloud-go/issues/9232)) ([ebbb610](https://github.com/googleapis/google-cloud-go/commit/ebbb610e0f58035fd01ad7893971382d8bbd092f)), refs [#9189](https://github.com/googleapis/google-cloud-go/issues/9189) + ## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14) diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index a51cf9c08..e9e959930 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -420,6 +420,11 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q ctx = setUserProjectMetadata(ctx, s.userProject) } fetch := func(pageSize int, pageToken string) (token string, err error) { + // IncludeFoldersAsPrefixes is not supported for gRPC + // TODO: remove this when support is added in the proto. + if it.query.IncludeFoldersAsPrefixes { + return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC") + } var objects []*storagepb.Object var gitr *gapic.ObjectIterator err = run(it.ctx, func(ctx context.Context) error { diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index 0e157e4ba..fe081b60b 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -348,6 +348,7 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q req.Versions(it.query.Versions) req.IncludeTrailingDelimiter(it.query.IncludeTrailingDelimiter) req.MatchGlob(it.query.MatchGlob) + req.IncludeFoldersAsPrefixes(it.query.IncludeFoldersAsPrefixes) if selection := it.query.toFieldSelection(); selection != "" { req.Fields("nextPageToken", googleapi.Field(selection)) } diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go index c6fd4b341..415b2b585 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go index 8159589ea..ed5089ac7 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go index 648199506..25b122a74 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go @@ -1,4 +1,4 @@ -// Copyright 2023 Google LLC +// Copyright 2024 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 2d5cf890e..e49d8a532 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.36.0" +const Version = "1.37.0" diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go index dc79fd88b..1b52eb5d2 100644 --- a/vendor/cloud.google.com/go/storage/invoke.go +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -70,6 +70,9 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry return internal.Retry(ctx, bo, func() (stop bool, err error) { ctxWithHeaders := setInvocationHeaders(ctx, invocationID, attempts) err = call(ctxWithHeaders) + if retry.maxAttempts != nil && attempts >= *retry.maxAttempts { + return true, err + } attempts++ return !errorFunc(err), err }) diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 78ecbf0e8..f047ef9cd 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -1620,6 +1620,11 @@ type Query struct { // for syntax details. When Delimiter is set in conjunction with MatchGlob, // it must be set to /. MatchGlob string + + // IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of + // prefixes returned by the query. Only applicable if Delimiter is set to /. + // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API. + IncludeFoldersAsPrefixes bool } // attrToFieldMap maps the field names of ObjectAttrs to the underlying field @@ -2076,6 +2081,26 @@ func (wb *withBackoff) apply(config *retryConfig) { config.backoff = &wb.backoff } +// WithMaxAttempts configures the maximum number of times an API call can be made +// in the case of retryable errors. +// For example, if you set WithMaxAttempts(5), the operation will be attempted up to 5 +// times total (initial call plus 4 retries). +// Without this setting, operations will continue retrying indefinitely +// until either the context is canceled or a deadline is reached. +func WithMaxAttempts(maxAttempts int) RetryOption { + return &withMaxAttempts{ + maxAttempts: maxAttempts, + } +} + +type withMaxAttempts struct { + maxAttempts int +} + +func (wb *withMaxAttempts) apply(config *retryConfig) { + config.maxAttempts = &wb.maxAttempts +} + // RetryPolicy describes the available policies for which operations should be // retried. The default is `RetryIdempotent`. type RetryPolicy int @@ -2148,6 +2173,7 @@ type retryConfig struct { backoff *gax.Backoff policy RetryPolicy shouldRetry func(err error) bool + maxAttempts *int } func (r *retryConfig) clone() *retryConfig { @@ -2168,6 +2194,7 @@ func (r *retryConfig) clone() *retryConfig { backoff: bo, policy: r.policy, shouldRetry: r.shouldRetry, + maxAttempts: r.maxAttempts, } } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go new file mode 100644 index 000000000..187fe82b9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go @@ -0,0 +1,224 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package resource + +import ( + "fmt" + "strings" +) + +const ( + providersKey = "providers" + subscriptionsKey = "subscriptions" + resourceGroupsLowerKey = "resourcegroups" + locationsKey = "locations" + builtInResourceNamespace = "Microsoft.Resources" +) + +// RootResourceID defines the tenant as the root parent of all other ResourceID. +var RootResourceID = &ResourceID{ + Parent: nil, + ResourceType: TenantResourceType, + Name: "", +} + +// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`. +// Don't create this type directly, use ParseResourceID instead. +type ResourceID struct { + // Parent is the parent ResourceID of this instance. + // Can be nil if there is no parent. + Parent *ResourceID + + // SubscriptionID is the subscription ID in this resource ID. + // The value can be empty if the resource ID does not contain a subscription ID. + SubscriptionID string + + // ResourceGroupName is the resource group name in this resource ID. + // The value can be empty if the resource ID does not contain a resource group name. + ResourceGroupName string + + // Provider represents the provider name in this resource ID. + // This is only valid when the resource ID represents a resource provider. + // Example: `/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Insights` + Provider string + + // Location is the location in this resource ID. + // The value can be empty if the resource ID does not contain a location name. + Location string + + // ResourceType represents the type of this resource ID. + ResourceType ResourceType + + // Name is the resource name of this resource ID. + Name string + + isChild bool + stringValue string +} + +// ParseResourceID parses a string to an instance of ResourceID +func ParseResourceID(id string) (*ResourceID, error) { + if len(id) == 0 { + return nil, fmt.Errorf("invalid resource ID: id cannot be empty") + } + + if !strings.HasPrefix(id, "/") { + return nil, fmt.Errorf("invalid resource ID: resource id '%s' must start with '/'", id) + } + + parts := splitStringAndOmitEmpty(id, "/") + + if len(parts) < 2 { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + if !strings.EqualFold(parts[0], subscriptionsKey) && !strings.EqualFold(parts[0], providersKey) { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + return appendNext(RootResourceID, parts, id) +} + +// String returns the string of the ResourceID +func (id *ResourceID) String() string { + if len(id.stringValue) > 0 { + return id.stringValue + } + + if id.Parent == nil { + return "" + } + + builder := strings.Builder{} + builder.WriteString(id.Parent.String()) + + if id.isChild { + builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType())) + if len(id.Name) > 0 { + builder.WriteString(fmt.Sprintf("/%s", id.Name)) + } + } else { + builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name)) + } + + id.stringValue = builder.String() + + return id.stringValue +} + +func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID { + id := &ResourceID{} + id.init(parent, chooseResourceType(resourceTypeName, parent), resourceName, true) + return id +} + +func newResourceIDWithResourceType(parent *ResourceID, resourceType ResourceType, resourceName string) *ResourceID { + id := &ResourceID{} + id.init(parent, resourceType, resourceName, true) + return id +} + +func newResourceIDWithProvider(parent *ResourceID, providerNamespace, resourceTypeName, resourceName string) *ResourceID { + id := &ResourceID{} + id.init(parent, NewResourceType(providerNamespace, resourceTypeName), resourceName, false) + return id +} + +func chooseResourceType(resourceTypeName string, parent *ResourceID) ResourceType { + if strings.EqualFold(resourceTypeName, resourceGroupsLowerKey) { + return ResourceGroupResourceType + } else if strings.EqualFold(resourceTypeName, subscriptionsKey) && parent != nil && parent.ResourceType.String() == TenantResourceType.String() { + return SubscriptionResourceType + } + + return parent.ResourceType.AppendChild(resourceTypeName) +} + +func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name string, isChild bool) { + if parent != nil { + id.Provider = parent.Provider + id.SubscriptionID = parent.SubscriptionID + id.ResourceGroupName = parent.ResourceGroupName + id.Location = parent.Location + } + + if resourceType.String() == SubscriptionResourceType.String() { + id.SubscriptionID = name + } + + if resourceType.lastType() == locationsKey { + id.Location = name + } + + if resourceType.String() == ResourceGroupResourceType.String() { + id.ResourceGroupName = name + } + + if resourceType.String() == ProviderResourceType.String() { + id.Provider = name + } + + if parent == nil { + id.Parent = RootResourceID + } else { + id.Parent = parent + } + id.isChild = isChild + id.ResourceType = resourceType + id.Name = name +} + +func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) { + if len(parts) == 0 { + return parent, nil + } + + if len(parts) == 1 { + // subscriptions and resourceGroups are not valid ids without their names + if strings.EqualFold(parts[0], subscriptionsKey) || strings.EqualFold(parts[0], resourceGroupsLowerKey) { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + // resourceGroup must contain either child or provider resource type + if parent.ResourceType.String() == ResourceGroupResourceType.String() { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + return newResourceID(parent, parts[0], ""), nil + } + + if strings.EqualFold(parts[0], providersKey) && (len(parts) == 2 || strings.EqualFold(parts[2], providersKey)) { + //provider resource can only be on a tenant or a subscription parent + if parent.ResourceType.String() != SubscriptionResourceType.String() && parent.ResourceType.String() != TenantResourceType.String() { + return nil, fmt.Errorf("invalid resource ID: %s", id) + } + + return appendNext(newResourceIDWithResourceType(parent, ProviderResourceType, parts[1]), parts[2:], id) + } + + if len(parts) > 3 && strings.EqualFold(parts[0], providersKey) { + return appendNext(newResourceIDWithProvider(parent, parts[1], parts[2], parts[3]), parts[4:], id) + } + + if len(parts) > 1 && !strings.EqualFold(parts[0], providersKey) { + return appendNext(newResourceID(parent, parts[0], parts[1]), parts[2:], id) + } + + return nil, fmt.Errorf("invalid resource ID: %s", id) +} + +func splitStringAndOmitEmpty(v, sep string) []string { + r := make([]string, 0) + for _, s := range strings.Split(v, sep) { + if len(s) == 0 { + continue + } + r = append(r, s) + } + + return r +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go new file mode 100644 index 000000000..ca03ac971 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_type.go @@ -0,0 +1,114 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package resource + +import ( + "fmt" + "strings" +) + +// SubscriptionResourceType is the ResourceType of a subscription +var SubscriptionResourceType = NewResourceType(builtInResourceNamespace, "subscriptions") + +// ResourceGroupResourceType is the ResourceType of a resource group +var ResourceGroupResourceType = NewResourceType(builtInResourceNamespace, "resourceGroups") + +// TenantResourceType is the ResourceType of a tenant +var TenantResourceType = NewResourceType(builtInResourceNamespace, "tenants") + +// ProviderResourceType is the ResourceType of a provider +var ProviderResourceType = NewResourceType(builtInResourceNamespace, "providers") + +// ResourceType represents an Azure resource type, e.g. "Microsoft.Network/virtualNetworks/subnets". +// Don't create this type directly, use ParseResourceType or NewResourceType instead. +type ResourceType struct { + // Namespace is the namespace of the resource type. + // e.g. "Microsoft.Network" in resource type "Microsoft.Network/virtualNetworks/subnets" + Namespace string + + // Type is the full type name of the resource type. + // e.g. "virtualNetworks/subnets" in resource type "Microsoft.Network/virtualNetworks/subnets" + Type string + + // Types is the slice of all the sub-types of this resource type. + // e.g. ["virtualNetworks", "subnets"] in resource type "Microsoft.Network/virtualNetworks/subnets" + Types []string + + stringValue string +} + +// String returns the string of the ResourceType +func (t ResourceType) String() string { + return t.stringValue +} + +// IsParentOf returns true when the receiver is the parent resource type of the child. +func (t ResourceType) IsParentOf(child ResourceType) bool { + if !strings.EqualFold(t.Namespace, child.Namespace) { + return false + } + if len(t.Types) >= len(child.Types) { + return false + } + for i := range t.Types { + if !strings.EqualFold(t.Types[i], child.Types[i]) { + return false + } + } + + return true +} + +// AppendChild creates an instance of ResourceType using the receiver as the parent with childType appended to it. +func (t ResourceType) AppendChild(childType string) ResourceType { + return NewResourceType(t.Namespace, fmt.Sprintf("%s/%s", t.Type, childType)) +} + +// NewResourceType creates an instance of ResourceType using a provider namespace +// such as "Microsoft.Network" and type such as "virtualNetworks/subnets". +func NewResourceType(providerNamespace, typeName string) ResourceType { + return ResourceType{ + Namespace: providerNamespace, + Type: typeName, + Types: splitStringAndOmitEmpty(typeName, "/"), + stringValue: fmt.Sprintf("%s/%s", providerNamespace, typeName), + } +} + +// ParseResourceType parses the ResourceType from a resource type string (e.g. Microsoft.Network/virtualNetworks/subsets) +// or a resource identifier string. +// e.g. /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg/providers/Microsoft.Network/virtualNetworks/vnet/subnets/mySubnet) +func ParseResourceType(resourceIDOrType string) (ResourceType, error) { + // split the path into segments + parts := splitStringAndOmitEmpty(resourceIDOrType, "/") + + // There must be at least a namespace and type name + if len(parts) < 1 { + return ResourceType{}, fmt.Errorf("invalid resource ID or type: %s", resourceIDOrType) + } + + // if the type is just subscriptions, it is a built-in type in the Microsoft.Resources namespace + if len(parts) == 1 { + // Simple resource type + return NewResourceType(builtInResourceNamespace, parts[0]), nil + } else if strings.Contains(parts[0], ".") { + // Handle resource types (Microsoft.Compute/virtualMachines, Microsoft.Network/virtualNetworks/subnets) + // it is a full type name + return NewResourceType(parts[0], strings.Join(parts[1:], "/")), nil + } else { + // Check if ResourceID + id, err := ParseResourceID(resourceIDOrType) + if err != nil { + return ResourceType{}, err + } + return NewResourceType(id.ResourceType.Namespace, id.ResourceType.Type), nil + } +} + +func (t ResourceType) lastType() string { + return t.Types[len(t.Types)-1] +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go new file mode 100644 index 000000000..83cf91e3e --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy/policy.go @@ -0,0 +1,98 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package policy + +import ( + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" +) + +// BearerTokenOptions configures the bearer token policy's behavior. +type BearerTokenOptions struct { + // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests. + // The policy will add a token from each of these tenants to every request. The + // authenticating user or service principal must be a guest in these tenants, and the + // policy's credential must support multitenant authentication. + AuxiliaryTenants []string + + // Scopes contains the list of permission scopes required for the token. + Scopes []string +} + +// RegistrationOptions configures the registration policy's behavior. +// All zero-value fields will be initialized with their default values. +type RegistrationOptions struct { + policy.ClientOptions + + // MaxAttempts is the total number of times to attempt automatic registration + // in the event that an attempt fails. + // The default value is 3. + // Set to a value less than zero to disable the policy. + MaxAttempts int + + // PollingDelay is the amount of time to sleep between polling intervals. + // The default value is 15 seconds. + // A value less than zero means no delay between polling intervals (not recommended). + PollingDelay time.Duration + + // PollingDuration is the amount of time to wait before abandoning polling. + // The default valule is 5 minutes. + // NOTE: Setting this to a small value might cause the policy to prematurely fail. + PollingDuration time.Duration +} + +// ClientOptions contains configuration settings for a client's pipeline. +type ClientOptions struct { + policy.ClientOptions + + // AuxiliaryTenants are additional tenant IDs for authenticating cross-tenant requests. + // The client will add a token from each of these tenants to every request. The + // authenticating user or service principal must be a guest in these tenants, and the + // client's credential must support multitenant authentication. + AuxiliaryTenants []string + + // DisableRPRegistration disables the auto-RP registration policy. Defaults to false. + DisableRPRegistration bool +} + +// Clone return a deep copy of the current options. +func (o *ClientOptions) Clone() *ClientOptions { + if o == nil { + return nil + } + copiedOptions := *o + copiedOptions.Cloud.Services = copyMap(copiedOptions.Cloud.Services) + copiedOptions.Logging.AllowedHeaders = copyArray(copiedOptions.Logging.AllowedHeaders) + copiedOptions.Logging.AllowedQueryParams = copyArray(copiedOptions.Logging.AllowedQueryParams) + copiedOptions.Retry.StatusCodes = copyArray(copiedOptions.Retry.StatusCodes) + copiedOptions.PerRetryPolicies = copyArray(copiedOptions.PerRetryPolicies) + copiedOptions.PerCallPolicies = copyArray(copiedOptions.PerCallPolicies) + return &copiedOptions +} + +// copyMap return a new map with all the key value pair in the src map +func copyMap[K comparable, V any](src map[K]V) map[K]V { + if src == nil { + return nil + } + copiedMap := make(map[K]V) + for k, v := range src { + copiedMap[k] = v + } + return copiedMap +} + +// copyMap return a new array with all the elements in the src array +func copyArray[T any](src []T) []T { + if src == nil { + return nil + } + copiedArray := make([]T, len(src)) + copy(copiedArray, src) + return copiedArray +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go new file mode 100644 index 000000000..302c19cd4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/pipeline.go @@ -0,0 +1,65 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "errors" + "reflect" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" +) + +// NewPipeline creates a pipeline from connection options. Policies from ClientOptions are +// placed after policies from PipelineOptions. The telemetry policy, when enabled, will +// use the specified module and version info. +func NewPipeline(module, version string, cred azcore.TokenCredential, plOpts azruntime.PipelineOptions, options *armpolicy.ClientOptions) (azruntime.Pipeline, error) { + if options == nil { + options = &armpolicy.ClientOptions{} + } + conf, err := getConfiguration(&options.ClientOptions) + if err != nil { + return azruntime.Pipeline{}, err + } + authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{ + AuxiliaryTenants: options.AuxiliaryTenants, + Scopes: []string{conf.Audience + "/.default"}, + }) + perRetry := make([]azpolicy.Policy, len(plOpts.PerRetry), len(plOpts.PerRetry)+1) + copy(perRetry, plOpts.PerRetry) + plOpts.PerRetry = append(perRetry, authPolicy, exported.PolicyFunc(httpTraceNamespacePolicy)) + if !options.DisableRPRegistration { + regRPOpts := armpolicy.RegistrationOptions{ClientOptions: options.ClientOptions} + regPolicy, err := NewRPRegistrationPolicy(cred, ®RPOpts) + if err != nil { + return azruntime.Pipeline{}, err + } + perCall := make([]azpolicy.Policy, len(plOpts.PerCall), len(plOpts.PerCall)+1) + copy(perCall, plOpts.PerCall) + plOpts.PerCall = append(perCall, regPolicy) + } + if plOpts.APIVersion.Name == "" { + plOpts.APIVersion.Name = "api-version" + } + return azruntime.NewPipeline(module, version, plOpts, &options.ClientOptions), nil +} + +func getConfiguration(o *azpolicy.ClientOptions) (cloud.ServiceConfiguration, error) { + c := cloud.AzurePublic + if !reflect.ValueOf(o.Cloud).IsZero() { + c = o.Cloud + } + if conf, ok := c.Services[cloud.ResourceManager]; ok && conf.Endpoint != "" && conf.Audience != "" { + return conf, nil + } else { + return conf, errors.New("provided Cloud field is missing Azure Resource Manager configuration") + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go new file mode 100644 index 000000000..54b3bb78d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go @@ -0,0 +1,145 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/temporal" +) + +const headerAuxiliaryAuthorization = "x-ms-authorization-auxiliary" + +// acquiringResourceState holds data for an auxiliary token request +type acquiringResourceState struct { + ctx context.Context + p *BearerTokenPolicy + tenant string +} + +// acquireAuxToken acquires a token from an auxiliary tenant. Only one thread/goroutine at a time ever calls this function. +func acquireAuxToken(state acquiringResourceState) (newResource azcore.AccessToken, newExpiration time.Time, err error) { + tk, err := state.p.cred.GetToken(state.ctx, azpolicy.TokenRequestOptions{ + EnableCAE: true, + Scopes: state.p.scopes, + TenantID: state.tenant, + }) + if err != nil { + return azcore.AccessToken{}, time.Time{}, err + } + return tk, tk.ExpiresOn, nil +} + +// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential. +type BearerTokenPolicy struct { + auxResources map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState] + btp *azruntime.BearerTokenPolicy + cred azcore.TokenCredential + scopes []string +} + +// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens. +// cred: an azcore.TokenCredential implementation such as a credential object from azidentity +// opts: optional settings. Pass nil to accept default values; this is the same as passing a zero-value options. +func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTokenOptions) *BearerTokenPolicy { + if opts == nil { + opts = &armpolicy.BearerTokenOptions{} + } + p := &BearerTokenPolicy{cred: cred} + p.auxResources = make(map[string]*temporal.Resource[azcore.AccessToken, acquiringResourceState], len(opts.AuxiliaryTenants)) + for _, t := range opts.AuxiliaryTenants { + p.auxResources[t] = temporal.NewResource(acquireAuxToken) + } + p.scopes = make([]string, len(opts.Scopes)) + copy(p.scopes, opts.Scopes) + p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{ + AuthorizationHandler: azpolicy.AuthorizationHandler{ + OnChallenge: p.onChallenge, + OnRequest: p.onRequest, + }, + }) + return p +} + +func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error { + challenge := res.Header.Get(shared.HeaderWWWAuthenticate) + claims, err := parseChallenge(challenge) + if err != nil { + // the challenge contains claims we can't parse + return err + } else if claims != "" { + // request a new token having the specified claims, send the request again + return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes}) + } + // auth challenge didn't include claims, so this is a simple authorization failure + return azruntime.NewResponseError(res) +} + +// onRequest authorizes requests with one or more bearer tokens +func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error { + // authorize the request with a token for the primary tenant + err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes}) + if err != nil || len(b.auxResources) == 0 { + return err + } + // add tokens for auxiliary tenants + as := acquiringResourceState{ + ctx: req.Raw().Context(), + p: b, + } + auxTokens := make([]string, 0, len(b.auxResources)) + for tenant, er := range b.auxResources { + as.tenant = tenant + auxTk, err := er.Get(as) + if err != nil { + return err + } + auxTokens = append(auxTokens, fmt.Sprintf("%s%s", shared.BearerTokenPrefix, auxTk.Token)) + } + req.Raw().Header.Set(headerAuxiliaryAuthorization, strings.Join(auxTokens, ", ")) + return nil +} + +// Do authorizes a request with a bearer token +func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) { + return b.btp.Do(req) +} + +// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token +// that will satisfy conditional access policies. It returns a non-nil error when the given value contains +// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error. +func parseChallenge(wwwAuthenticate string) (string, error) { + claims := "" + var err error + for _, param := range strings.Split(wwwAuthenticate, ",") { + if _, after, found := strings.Cut(param, "claims="); found { + if claims != "" { + // The header contains multiple challenges, at least two of which specify claims. The specs allow this + // but it's unclear what a client should do in this case and there's as yet no concrete example of it. + err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate) + break + } + // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded + claims = strings.Trim(after, `\"=`) + // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42" + if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil { + claims = string(b) + } else { + err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate) + break + } + } + } + return claims, err +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go new file mode 100644 index 000000000..83e15949a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_register_rp.go @@ -0,0 +1,347 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + armpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + azpolicy "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const ( + // LogRPRegistration entries contain information specific to the automatic registration of an RP. + // Entries of this classification are written IFF the policy needs to take any action. + LogRPRegistration log.Event = "RPRegistration" +) + +// init sets any default values +func setDefaults(r *armpolicy.RegistrationOptions) { + if r.MaxAttempts == 0 { + r.MaxAttempts = 3 + } else if r.MaxAttempts < 0 { + r.MaxAttempts = 0 + } + if r.PollingDelay == 0 { + r.PollingDelay = 15 * time.Second + } else if r.PollingDelay < 0 { + r.PollingDelay = 0 + } + if r.PollingDuration == 0 { + r.PollingDuration = 5 * time.Minute + } +} + +// NewRPRegistrationPolicy creates a policy object configured using the specified options. +// The policy controls whether an unregistered resource provider should automatically be +// registered. See https://aka.ms/rps-not-found for more information. +func NewRPRegistrationPolicy(cred azcore.TokenCredential, o *armpolicy.RegistrationOptions) (azpolicy.Policy, error) { + if o == nil { + o = &armpolicy.RegistrationOptions{} + } + conf, err := getConfiguration(&o.ClientOptions) + if err != nil { + return nil, err + } + authPolicy := NewBearerTokenPolicy(cred, &armpolicy.BearerTokenOptions{Scopes: []string{conf.Audience + "/.default"}}) + p := &rpRegistrationPolicy{ + endpoint: conf.Endpoint, + pipeline: runtime.NewPipeline(shared.Module, shared.Version, runtime.PipelineOptions{PerRetry: []azpolicy.Policy{authPolicy}}, &o.ClientOptions), + options: *o, + } + // init the copy + setDefaults(&p.options) + return p, nil +} + +type rpRegistrationPolicy struct { + endpoint string + pipeline runtime.Pipeline + options armpolicy.RegistrationOptions +} + +func (r *rpRegistrationPolicy) Do(req *azpolicy.Request) (*http.Response, error) { + if r.options.MaxAttempts == 0 { + // policy is disabled + return req.Next() + } + const registeredState = "Registered" + var rp string + var resp *http.Response + for attempts := 0; attempts < r.options.MaxAttempts; attempts++ { + var err error + // make the original request + resp, err = req.Next() + // getting a 409 is the first indication that the RP might need to be registered, check error response + if err != nil || resp.StatusCode != http.StatusConflict { + return resp, err + } + var reqErr requestError + if err = runtime.UnmarshalAsJSON(resp, &reqErr); err != nil { + return resp, err + } + if reqErr.ServiceError == nil { + // missing service error info. just return the response + // to the caller so its error unmarshalling will kick in + return resp, err + } + if !isUnregisteredRPCode(reqErr.ServiceError.Code) { + // not a 409 due to unregistered RP. just return the response + // to the caller so its error unmarshalling will kick in + return resp, err + } + // RP needs to be registered. start by getting the subscription ID from the original request + subID, err := getSubscription(req.Raw().URL.Path) + if err != nil { + return resp, err + } + // now get the RP from the error + rp, err = getProvider(reqErr) + if err != nil { + return resp, err + } + logRegistrationExit := func(v interface{}) { + log.Writef(LogRPRegistration, "END registration for %s: %v", rp, v) + } + log.Writef(LogRPRegistration, "BEGIN registration for %s", rp) + // create client and make the registration request + // we use the scheme and host from the original request + rpOps := &providersOperations{ + p: r.pipeline, + u: r.endpoint, + subID: subID, + } + if _, err = rpOps.Register(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, rp); err != nil { + logRegistrationExit(err) + return resp, err + } + + // RP was registered, however we need to wait for the registration to complete + pollCtx, pollCancel := context.WithTimeout(&shared.ContextWithDeniedValues{Context: req.Raw().Context()}, r.options.PollingDuration) + var lastRegState string + for { + // get the current registration state + getResp, err := rpOps.Get(pollCtx, rp) + if err != nil { + pollCancel() + logRegistrationExit(err) + return resp, err + } + if getResp.Provider.RegistrationState != nil && !strings.EqualFold(*getResp.Provider.RegistrationState, lastRegState) { + // registration state has changed, or was updated for the first time + lastRegState = *getResp.Provider.RegistrationState + log.Writef(LogRPRegistration, "registration state is %s", lastRegState) + } + if strings.EqualFold(lastRegState, registeredState) { + // registration complete + pollCancel() + logRegistrationExit(lastRegState) + break + } + // wait before trying again + select { + case <-time.After(r.options.PollingDelay): + // continue polling + case <-pollCtx.Done(): + pollCancel() + logRegistrationExit(pollCtx.Err()) + return resp, pollCtx.Err() + } + } + // RP was successfully registered, retry the original request + err = req.RewindBody() + if err != nil { + return resp, err + } + } + // if we get here it means we exceeded the number of attempts + return resp, fmt.Errorf("exceeded attempts to register %s", rp) +} + +var unregisteredRPCodes = []string{ + "MissingSubscriptionRegistration", + "MissingRegistrationForResourceProvider", + "Subscription Not Registered", + "SubscriptionNotRegistered", +} + +func isUnregisteredRPCode(errorCode string) bool { + for _, code := range unregisteredRPCodes { + if strings.EqualFold(errorCode, code) { + return true + } + } + return false +} + +func getSubscription(path string) (string, error) { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1], nil + } + } + return "", fmt.Errorf("failed to obtain subscription ID from %s", path) +} + +func getProvider(re requestError) (string, error) { + if len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0].Target, nil + } + return "", errors.New("unexpected empty Details") +} + +// minimal error definitions to simplify detection +type requestError struct { + ServiceError *serviceError `json:"error"` +} + +type serviceError struct { + Code string `json:"code"` + Details []serviceErrorDetails `json:"details"` +} + +type serviceErrorDetails struct { + Code string `json:"code"` + Target string `json:"target"` +} + +/////////////////////////////////////////////////////////////////////////////////////////////// +// the following code was copied from module armresources, providers.go and models.go +// only the minimum amount of code was copied to get this working and some edits were made. +/////////////////////////////////////////////////////////////////////////////////////////////// + +type providersOperations struct { + p runtime.Pipeline + u string + subID string +} + +// Get - Gets the specified resource provider. +func (client *providersOperations) Get(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) { + req, err := client.getCreateRequest(ctx, resourceProviderNamespace) + if err != nil { + return providerResponse{}, err + } + resp, err := client.p.Do(req) + if err != nil { + return providerResponse{}, err + } + result, err := client.getHandleResponse(resp) + if err != nil { + return providerResponse{}, err + } + return result, nil +} + +// getCreateRequest creates the Get request. +func (client *providersOperations) getCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}" + urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace)) + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID)) + req, err := runtime.NewRequest(ctx, http.MethodGet, runtime.JoinPaths(client.u, urlPath)) + if err != nil { + return nil, err + } + query := req.Raw().URL.Query() + query.Set("api-version", "2019-05-01") + req.Raw().URL.RawQuery = query.Encode() + return req, nil +} + +// getHandleResponse handles the Get response. +func (client *providersOperations) getHandleResponse(resp *http.Response) (providerResponse, error) { + if !runtime.HasStatusCode(resp, http.StatusOK) { + return providerResponse{}, exported.NewResponseError(resp) + } + result := providerResponse{RawResponse: resp} + err := runtime.UnmarshalAsJSON(resp, &result.Provider) + if err != nil { + return providerResponse{}, err + } + return result, err +} + +// Register - Registers a subscription with a resource provider. +func (client *providersOperations) Register(ctx context.Context, resourceProviderNamespace string) (providerResponse, error) { + req, err := client.registerCreateRequest(ctx, resourceProviderNamespace) + if err != nil { + return providerResponse{}, err + } + resp, err := client.p.Do(req) + if err != nil { + return providerResponse{}, err + } + result, err := client.registerHandleResponse(resp) + if err != nil { + return providerResponse{}, err + } + return result, nil +} + +// registerCreateRequest creates the Register request. +func (client *providersOperations) registerCreateRequest(ctx context.Context, resourceProviderNamespace string) (*azpolicy.Request, error) { + urlPath := "/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register" + urlPath = strings.ReplaceAll(urlPath, "{resourceProviderNamespace}", url.PathEscape(resourceProviderNamespace)) + urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subID)) + req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.u, urlPath)) + if err != nil { + return nil, err + } + query := req.Raw().URL.Query() + query.Set("api-version", "2019-05-01") + req.Raw().URL.RawQuery = query.Encode() + return req, nil +} + +// registerHandleResponse handles the Register response. +func (client *providersOperations) registerHandleResponse(resp *http.Response) (providerResponse, error) { + if !runtime.HasStatusCode(resp, http.StatusOK) { + return providerResponse{}, exported.NewResponseError(resp) + } + result := providerResponse{RawResponse: resp} + err := runtime.UnmarshalAsJSON(resp, &result.Provider) + if err != nil { + return providerResponse{}, err + } + return result, err +} + +// ProviderResponse is the response envelope for operations that return a Provider type. +type providerResponse struct { + // Resource provider information. + Provider *provider + + // RawResponse contains the underlying HTTP response. + RawResponse *http.Response +} + +// Provider - Resource provider information. +type provider struct { + // The provider ID. + ID *string `json:"id,omitempty"` + + // The namespace of the resource provider. + Namespace *string `json:"namespace,omitempty"` + + // The registration policy of the resource provider. + RegistrationPolicy *string `json:"registrationPolicy,omitempty"` + + // The registration state of the resource provider. + RegistrationState *string `json:"registrationState,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go new file mode 100644 index 000000000..6cea18424 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_trace_namespace.go @@ -0,0 +1,30 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import ( + "net/http" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing" +) + +// httpTraceNamespacePolicy is a policy that adds the az.namespace attribute to the current Span +func httpTraceNamespacePolicy(req *policy.Request) (resp *http.Response, err error) { + rawTracer := req.Raw().Context().Value(shared.CtxWithTracingTracer{}) + if tracer, ok := rawTracer.(tracing.Tracer); ok && tracer.Enabled() { + rt, err := resource.ParseResourceType(req.Raw().URL.Path) + if err == nil { + // add the namespace attribute to the current span + span := tracer.SpanFromContext(req.Raw().Context()) + span.SetAttributes(tracing.Attribute{Key: shared.TracingNamespaceAttrName, Value: rt.Namespace}) + } + } + return req.Next() +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go new file mode 100644 index 000000000..1400d4379 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/runtime.go @@ -0,0 +1,24 @@ +//go:build go1.16 +// +build go1.16 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package runtime + +import "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" + +func init() { + cloud.AzureChina.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{ + Audience: "https://management.core.chinacloudapi.cn", + Endpoint: "https://management.chinacloudapi.cn", + } + cloud.AzureGovernment.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{ + Audience: "https://management.core.usgovcloudapi.net", + Endpoint: "https://management.usgovcloudapi.net", + } + cloud.AzurePublic.Services[cloud.ResourceManager] = cloud.ServiceConfiguration{ + Audience: "https://management.core.windows.net/", + Endpoint: "https://management.azure.com", + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md index 7ea119ab3..71dcb5f3e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md @@ -1,5 +1,38 @@ # Release History +## 1.5.1 (2024-01-17) + +### Bugs Fixed +* `InteractiveBrowserCredential` handles `AdditionallyAllowedTenants` correctly + +## 1.5.0 (2024-01-16) + +### Breaking Changes +> These changes affect only code written against a beta version such as v1.5.0-beta.1 +* Removed persistent token caching. It will return in v1.6.0-beta.1 + +### Bugs Fixed +* Credentials now preserve MSAL headers e.g. X-Client-Sku + +### Other Changes +* Upgraded dependencies + +## 1.5.0-beta.2 (2023-11-07) + +### Features Added +* `DefaultAzureCredential` and `ManagedIdentityCredential` support Azure ML managed identity +* Added spans for distributed tracing. + +## 1.5.0-beta.1 (2023-10-10) + +### Features Added +* Optional persistent token caching for most credentials. Set `TokenCachePersistenceOptions` + on a credential's options to enable and configure this. See the package documentation for + this version and [TOKEN_CACHING.md](https://aka.ms/azsdk/go/identity/caching) for more + details. +* `AzureDeveloperCLICredential` authenticates with the Azure Developer CLI (`azd`). This + credential is also part of the `DefaultAzureCredential` authentication flow. + ## 1.4.0 (2023-10-10) ### Bugs Fixed @@ -94,14 +127,14 @@ ### Features Added * By default, credentials set client capability "CP1" to enable support for [Continuous Access Evaluation (CAE)](https://docs.microsoft.com/azure/active-directory/develop/app-resilience-continuous-access-evaluation). - This indicates to Azure Active Directory that your application can handle CAE claims challenges. + This indicates to Microsoft Entra ID that your application can handle CAE claims challenges. You can disable this behavior by setting the environment variable "AZURE_IDENTITY_DISABLE_CP1" to "true". * `InteractiveBrowserCredentialOptions.LoginHint` enables pre-populating the login prompt with a username ([#15599](https://github.com/Azure/azure-sdk-for-go/pull/15599)) * Service principal and user credentials support ADFS authentication on Azure Stack. Specify "adfs" as the credential's tenant. * Applications running in private or disconnected clouds can prevent credentials from - requesting Azure AD instance metadata by setting the `DisableInstanceDiscovery` + requesting Microsoft Entra instance metadata by setting the `DisableInstanceDiscovery` field on credential options. * Many credentials can now be configured to authenticate in multiple tenants. The options types for these credentials have an `AdditionallyAllowedTenants` field @@ -454,4 +487,4 @@ ## 0.1.0 (2020-07-23) ### Features Added -* Initial Release. Azure Identity library that provides Azure Active Directory token authentication support for the SDK. +* Initial Release. Azure Identity library that provides Microsoft Entra token authentication support for the SDK. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md index 4ac53eb7b..1a6492023 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md @@ -1,6 +1,6 @@ # Migrating from autorest/adal to azidentity -`azidentity` provides Azure Active Directory (Azure AD) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. +`azidentity` provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) authentication for the newest Azure SDK modules (`github.com/azure-sdk-for-go/sdk/...`). Older Azure SDK packages (`github.com/azure-sdk-for-go/services/...`) use types from `github.com/go-autorest/autorest/adal` instead. This guide shows common authentication code using `autorest/adal` and its equivalent using `azidentity`. @@ -18,7 +18,7 @@ This guide shows common authentication code using `autorest/adal` and its equiva ### `autorest/adal` -Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires an Azure AD endpoint and tenant: +Token providers require a token audience (resource identifier) and an instance of `adal.OAuthConfig`, which requires a Microsoft Entra endpoint and tenant: ```go import "github.com/Azure/go-autorest/autorest/adal" @@ -284,7 +284,7 @@ if err == nil { } ``` -Note that `azidentity` credentials use the Azure AD v2.0 endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/v2-permissions-and-consent). +Note that `azidentity` credentials use the Microsoft Entra endpoint, which requires OAuth 2 scopes instead of the resource identifiers `autorest/adal` expects. For more information, see [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/permissions-consent-overview). ## Use azidentity credentials with older packages diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md index da0baa9ad..b6ad2d39f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md @@ -1,9 +1,9 @@ # Azure Identity Client Module for Go -The Azure Identity module provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. +The Azure Identity module provides Microsoft Entra ID ([formerly Azure Active Directory](https://learn.microsoft.com/azure/active-directory/fundamentals/new-name)) token authentication support across the Azure SDK. It includes a set of `TokenCredential` implementations, which can be used with Azure SDK clients supporting token authentication. [![PkgGoDev](https://pkg.go.dev/badge/github.com/Azure/azure-sdk-for-go/sdk/azidentity)](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity) -| [Azure Active Directory documentation](https://docs.microsoft.com/azure/active-directory/) +| [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/) | [Source code](https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/azidentity) # Getting started @@ -35,6 +35,12 @@ signed in to the [Azure CLI](https://docs.microsoft.com/cli/azure). To sign in t When no default browser is available, `az login` will use the device code authentication flow. This can also be selected manually by running `az login --use-device-code`. +#### Authenticate via the Azure Developer CLI + +Developers coding outside of an IDE can also use the [Azure Developer CLI](https://aka.ms/azure-dev) to authenticate. Applications using the `DefaultAzureCredential` or the `AzureDeveloperCLICredential` can use the account logged in to the Azure Developer CLI to authenticate calls in their application when running locally. + +To authenticate with the Azure Developer CLI, run `azd auth login`. On a system with a default web browser, `azd` will launch the browser to authenticate. On systems without a default web browser, run `azd auth login --use-device-code` to use the device code authentication flow. + ## Key concepts ### Credentials @@ -44,9 +50,7 @@ service client to authenticate requests. Service clients across the Azure SDK accept a credential instance when they are constructed, and use that credential to authenticate requests. -The `azidentity` module focuses on OAuth authentication with Azure Active -Directory (AAD). It offers a variety of credential types capable of acquiring -an Azure AD access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. +The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID. It offers a variety of credential types capable of acquiring a Microsoft Entra access token. See [Credential Types](#credential-types "Credential Types") for a list of this module's credential types. ### DefaultAzureCredential @@ -58,6 +62,7 @@ an Azure AD access token. See [Credential Types](#credential-types "Credential T 1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity. 1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it. 1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity. +1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account. > Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types. @@ -152,6 +157,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |Credential|Usage |-|- |[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI +|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI ## Environment Variables @@ -161,16 +167,16 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application -|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant |`AZURE_CLIENT_SECRET`|one of the application's client secrets #### Service principal with certificate |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application -|`AZURE_TENANT_ID`|ID of the application's Azure Active Directory tenant +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application +|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant |`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key |`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any @@ -178,22 +184,30 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil) |variable name|value |-|- -|`AZURE_CLIENT_ID`|ID of an Azure Active Directory application +|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application |`AZURE_USERNAME`|a username (usually an email address) |`AZURE_PASSWORD`|that user's password Configuration is attempted in the above order. For example, if values for a client secret and certificate are both present, the client secret will be used. +## Token caching + +Token caching is an `azidentity` feature that allows apps to: + +* Cache tokens in memory (default) or on disk (opt-in). +* Improve resilience and performance. +* Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. + +For more details, see the [token caching documentation](https://aka.ms/azsdk/go/identity/caching). + ## Troubleshooting ### Error Handling Credentials return an `error` when they fail to authenticate or lack data they require to authenticate. For guidance on resolving errors from specific credential types, see the [troubleshooting guide](https://aka.ms/azsdk/go/identity/troubleshoot). -For more details on handling specific Azure Active Directory errors please refer to the -Azure Active Directory -[error code documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes). +For more details on handling specific Microsoft Entra errors, see the Microsoft Entra [error code documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes). ### Logging diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD new file mode 100644 index 000000000..c0d660146 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -0,0 +1,70 @@ +## Token caching in the Azure Identity client module + +*Token caching* is a feature provided by the Azure Identity library that allows apps to: + +- Improve their resilience and performance. +- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. +- Reduce the number of times the user is prompted to authenticate. + +When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token. + +Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested. + +### In-memory token caching + +*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe. + +**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library. + +#### Caching cannot be disabled + +As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance. + +### Persistent token caching + +> Only azidentity v1.5.0-beta versions support persistent token caching + +*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems. + +| Operating system | Storage mechanism | +|------------------|---------------------------------------| +| Linux | kernel key retention service (keyctl) | +| macOS | Keychain | +| Windows | DPAPI | + +By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform. +However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access. + +With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which: + +- Makes the app more resilient to failures. +- Ensures the app can continue to function during an Entra ID outage or disruption. +- Avoids having to prompt users to authenticate each time the process is restarted. + +>IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains. + +#### Example code + +See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#pkg-overview) for code examples demonstrating how to configure persistent caching and access cached data. + +### Credentials supporting token caching + +The following table indicates the state of in-memory and persistent caching in each credential type. + +**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache). + +| Credential | In-memory token caching | Persistent token caching | +|--------------------------------|---------------------------------------------------------------------|--------------------------| +| `AzureCLICredential` | Not Supported | Not Supported | +| `AzureDeveloperCLICredential` | Not Supported | Not Supported | +| `ClientAssertionCredential` | Supported | Supported | +| `ClientCertificateCredential` | Supported | Supported | +| `ClientSecretCredential` | Supported | Supported | +| `DefaultAzureCredential` | Supported if the target credential in the default chain supports it | Not Supported | +| `DeviceCodeCredential` | Supported | Supported | +| `EnvironmentCredential` | Supported | Not Supported | +| `InteractiveBrowserCredential` | Supported | Supported | +| `ManagedIdentityCredential` | Supported | Not Supported | +| `OnBehalfOfCredential` | Supported | Supported | +| `UsernamePasswordCredential` | Supported | Supported | +| `WorkloadIdentityCredential` | Supported | Supported | diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index fef099813..832c599eb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -8,7 +8,8 @@ This troubleshooting guide covers failure investigation techniques, common error - [Permission issues](#permission-issues) - [Find relevant information in errors](#find-relevant-information-in-errors) - [Enable and configure logging](#enable-and-configure-logging) -- [Troubleshoot AzureCliCredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) +- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) - [Troubleshoot ClientCertificateCredential authentication issues](#troubleshoot-clientcertificatecredential-authentication-issues) - [Troubleshoot ClientSecretCredential authentication issues](#troubleshoot-clientsecretcredential-authentication-issues) - [Troubleshoot DefaultAzureCredential authentication issues](#troubleshoot-defaultazurecredential-authentication-issues) @@ -23,7 +24,7 @@ This troubleshooting guide covers failure investigation techniques, common error ## Handle azidentity errors -Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Azure Active Directory (Azure AD). Depending on the application, these errors may or may not be recoverable. +Any service client method that makes a request to the service may return an error due to authentication failure. This is because the credential authenticates on the first call to the service and on any subsequent call that needs to refresh an access token. Authentication errors include a description of the failure and possibly an error message from Microsoft Entra ID. Depending on the application, these errors may or may not be recoverable. ### Permission issues @@ -31,7 +32,7 @@ Service client errors with a status code of 401 or 403 often indicate that authe ## Find relevant information in errors -Authentication errors can include responses from Azure AD and often contain information helpful in diagnosis. Consider the following error message: +Authentication errors can include responses from Microsoft Entra ID and often contain information helpful in diagnosis. Consider the following error message: ``` ClientSecretCredential authentication failed @@ -57,9 +58,9 @@ This error contains several pieces of information: - __Failing Credential Type__: The type of credential that failed to authenticate. This can be helpful when diagnosing issues with chained credential types such as `DefaultAzureCredential` or `ChainedTokenCredential`. -- __Azure AD Error Code and Message__: The error code and message returned by Azure AD. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/reference-aadsts-error-codes#aadsts-error-codes) has more information on AADSTS error codes. +- __Microsoft Entra ID Error Code and Message__: The error code and message returned by Microsoft Entra ID. This can give insight into the specific reason the request failed. For instance, in this case authentication failed because the provided client secret is incorrect. [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/reference-error-codes#aadsts-error-codes) has more information on AADSTS error codes. -- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Azure AD failures. +- __Correlation ID and Timestamp__: The correlation ID and timestamp identify the request in server-side logs. This information can be useful to support engineers diagnosing unexpected Microsoft Entra failures. ### Enable and configure logging @@ -96,17 +97,17 @@ azlog.SetEvents(azidentity.EventAuthentication) | Error Code | Issue | Mitigation | |---|---|---| -|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS7000215|An invalid client secret was provided.|Ensure the secret provided to the credential constructor is valid. If unsure, create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS7000222|An expired client secret was provided.|Create a new client secret using the Azure portal. Details on creating a new client secret are in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-2-create-a-new-application-secret).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| ## Troubleshoot ClientCertificateCredential authentication issues | Error Code | Description | Mitigation | |---|---|---| -|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Azure AD documentation](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| -|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Azure AD instructions](https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| +|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal#option-1-upload-a-certificate).| +|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal).| ## Troubleshoot UsernamePasswordCredential authentication issues @@ -172,7 +173,7 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio |"no azure identity found for request clientID"|The application attempted to authenticate before an identity was assigned to its pod|Verify the pod is labeled correctly. This also occurs when a correctly labeled pod authenticates before the identity is ready. To prevent initialization races, configure NMI to set the Retry-After header in its responses as described in [Pod Identity documentation](https://azure.github.io/aad-pod-identity/docs/configure/feature_flags/#set-retry-after-header-in-nmi-response). -## Troubleshoot AzureCliCredential authentication issues +## Troubleshoot AzureCLICredential authentication issues | Error Message |Description| Mitigation | |---|---|---| @@ -195,6 +196,29 @@ az account get-access-token --output json --resource https://management.core.win > This command's output will contain an access token and SHOULD NOT BE SHARED, to avoid compromising account security. + +## Troubleshoot AzureDeveloperCLICredential authentication issues + +| Error Message |Description| Mitigation | +|---|---|---| +|Azure Developer CLI not found on path|The Azure Developer CLI isn't installed or couldn't be found.|| +|Please run "azd auth login"|No account is logged into the Azure Developer CLI, or the login has expired.|| + +#### Verify the Azure Developer CLI can obtain tokens + +You can manually verify that the Azure Developer CLI is properly authenticated and can obtain tokens. First, use the `config` command to verify the account that is currently logged in to the Azure Developer CLI. + +```sh +azd config list +``` + +Once you've verified the Azure Developer CLI is using correct account, you can validate that it's able to obtain tokens for this account. + +```sh +azd auth token --output json --scope https://management.core.windows.net/.default +``` +>Note that output of this command will contain a valid access token, and SHOULD NOT BE SHARED to avoid compromising account security. + ## Troubleshoot `WorkloadIdentityCredential` authentication issues diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index 47e77f88e..173ce2b3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_6225ab0470" + "Tag": "go/azidentity_db4a26f583" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go new file mode 100644 index 000000000..ada4d6501 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go @@ -0,0 +1,95 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" +) + +var supportedAuthRecordVersions = []string{"1.0"} + +// authenticationRecord is non-secret account information about an authenticated user that user credentials such as +// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication +// data. Call these credentials' Authenticate method to get an authenticationRecord for a user. +type authenticationRecord struct { + // Authority is the URL of the authority that issued the token. + Authority string `json:"authority"` + + // ClientID is the ID of the application that authenticated the user. + ClientID string `json:"clientId"` + + // HomeAccountID uniquely identifies the account. + HomeAccountID string `json:"homeAccountId"` + + // TenantID identifies the tenant in which the user authenticated. + TenantID string `json:"tenantId"` + + // Username is the user's preferred username. + Username string `json:"username"` + + // Version of the AuthenticationRecord. + Version string `json:"version"` +} + +// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord +func (a *authenticationRecord) UnmarshalJSON(b []byte) error { + // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we + // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally + // different type enables this by assigning all the fields without recursing into this method. + type r authenticationRecord + err := json.Unmarshal(b, (*r)(a)) + if err != nil { + return err + } + if a.Version == "" { + return errors.New("AuthenticationRecord must have a version") + } + for _, v := range supportedAuthRecordVersions { + if a.Version == v { + return nil + } + } + return fmt.Errorf("unsupported AuthenticationRecord version %q. This module supports %v", a.Version, supportedAuthRecordVersions) +} + +// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued. +func (a *authenticationRecord) account() public.Account { + return public.Account{ + Environment: a.Authority, + HomeAccountID: a.HomeAccountID, + PreferredUsername: a.Username, + } +} + +func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) { + u, err := url.Parse(ar.IDToken.Issuer) + if err != nil { + return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) + } + tenant := ar.IDToken.TenantID + if tenant == "" { + tenant = strings.Trim(u.Path, "/") + } + username := ar.IDToken.PreferredUsername + if username == "" { + username = ar.IDToken.UPN + } + return authenticationRecord{ + Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host), + ClientID: ar.IDToken.Audience, + HomeAccountID: ar.Account.HomeAccountID, + TenantID: tenant, + Username: username, + Version: "1.0", + }, nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index 10b742ce1..c3bcfb56c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -15,12 +15,12 @@ import ( "net/http" "net/url" "os" - "regexp" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -41,6 +41,10 @@ const ( organizationsTenantID = "organizations" developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" + + traceNamespace = "Microsoft.Entra" + traceOpGetToken = "GetToken" + traceOpAuthenticate = "Authenticate" ) var ( @@ -49,6 +53,9 @@ var ( errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") ) +// tokenCachePersistenceOptions contains options for persistent token caching +type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions + // setAuthorityHost initializes the authority host for credentials. Precedence is: // 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user // 2. value of AZURE_AUTHORITY_HOST @@ -109,29 +116,20 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) } -// validTenantID return true is it receives a valid tenantID, returns false otherwise +func alphanumeric(r rune) bool { + return ('0' <= r && r <= '9') || ('a' <= r && r <= 'z') || ('A' <= r && r <= 'Z') +} + func validTenantID(tenantID string) bool { - match, err := regexp.MatchString("^[0-9a-zA-Z-.]+$", tenantID) - if err != nil { - return false + for _, r := range tenantID { + if !(alphanumeric(r) || r == '.' || r == '-') { + return false + } } - return match + return true } -func newPipelineAdapter(opts *azcore.ClientOptions) pipelineAdapter { - pl := runtime.NewPipeline(component, version, runtime.PipelineOptions{}, opts) - return pipelineAdapter{pl: pl} -} - -type pipelineAdapter struct { - pl runtime.Pipeline -} - -func (p pipelineAdapter) CloseIdleConnections() { - // do nothing -} - -func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { +func doForClient(client *azcore.Client, r *http.Request) (*http.Response, error) { req, err := runtime.NewRequest(r.Context(), r.Method, r.URL.String()) if err != nil { return nil, err @@ -153,7 +151,18 @@ func (p pipelineAdapter) Do(r *http.Request) (*http.Response, error) { return nil, err } } - resp, err := p.pl.Do(req) + + // copy headers to the new request, ignoring any for which the new request has a value + h := req.Raw().Header + for key, vals := range r.Header { + if _, has := h[key]; !has { + for _, val := range vals { + h.Add(key, val) + } + } + } + + resp, err := client.Pipeline().Do(req) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index 55a0d6543..43577ab3c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -14,7 +14,6 @@ import ( "fmt" "os" "os/exec" - "regexp" "runtime" "strings" "sync" @@ -25,13 +24,9 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/internal/log" ) -const ( - credNameAzureCLI = "AzureCLICredential" - timeoutCLIRequest = 10 * time.Second -) +const credNameAzureCLI = "AzureCLICredential" -// used by tests to fake invoking the CLI -type azureCLITokenProvider func(ctx context.Context, resource string, tenantID string) ([]byte, error) +type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscription string) ([]byte, error) // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { @@ -39,17 +34,25 @@ type AzureCLICredentialOptions struct { // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the // logged in account can access. AdditionallyAllowedTenants []string + + // subscription is the name or ID of a subscription. Set this to acquire tokens for an account other + // than the Azure CLI's current account. + subscription string + // TenantID identifies the tenant the credential should authenticate in. // Defaults to the CLI's default tenant, which is typically the home tenant of the logged in user. TenantID string - tokenProvider azureCLITokenProvider + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking az + tokenProvider azTokenProvider } // init returns an instance of AzureCLICredentialOptions initialized with default values. func (o *AzureCLICredentialOptions) init() { if o.tokenProvider == nil { - o.tokenProvider = defaultTokenProvider + o.tokenProvider = defaultAzTokenProvider } } @@ -65,6 +68,14 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent if options != nil { cp = *options } + for _, r := range cp.subscription { + if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') { + return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.subscription) + } + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } cp.init() cp.AdditionallyAllowedTenants = resolveAdditionalTenants(cp.AdditionallyAllowedTenants) return &AzureCLICredential{mu: &sync.Mutex{}, opts: cp}, nil @@ -73,50 +84,51 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent // GetToken requests a token from the Azure CLI. This credential doesn't cache tokens, so every call invokes the CLI. // This method is called automatically by Azure SDK clients. func (c *AzureCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} if len(opts.Scopes) != 1 { - return azcore.AccessToken{}, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + return at, errors.New(credNameAzureCLI + ": GetToken() requires exactly one scope") + } + if !validScope(opts.Scopes[0]) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureCLI, opts.Scopes[0]) } tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureCLI, c.opts.AdditionallyAllowedTenants) if err != nil { - return azcore.AccessToken{}, err + return at, err } - // pass the CLI an AAD v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes - opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} c.mu.Lock() defer c.mu.Unlock() - b, err := c.opts.tokenProvider(ctx, opts.Scopes[0], tenant) - if err != nil { - return azcore.AccessToken{}, err + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant, c.opts.subscription) + if err == nil { + at, err = c.createAccessToken(b) } - at, err := c.createAccessToken(b) if err != nil { - return azcore.AccessToken{}, err + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err } msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureCLI, strings.Join(opts.Scopes, ", ")) log.Write(EventAuthentication, msg) return at, nil } -var defaultTokenProvider azureCLITokenProvider = func(ctx context.Context, resource string, tenantID string) ([]byte, error) { - match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) - if err != nil { - return nil, err - } - if !match { - return nil, fmt.Errorf(`%s: unexpected scope "%s". Only alphanumeric characters and ".", ";", "-", and "/" are allowed`, credNameAzureCLI, resource) - } - +// defaultAzTokenProvider invokes the Azure CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []string, tenantID, subscription string) ([]byte, error) { + // pass the CLI a Microsoft Entra ID v1 resource because we don't know which CLI version is installed and older ones don't support v2 scopes + resource := strings.TrimSuffix(scopes[0], defaultSuffix) // set a default timeout for this authentication iff the application hasn't done so already var cancel context.CancelFunc if _, hasDeadline := ctx.Deadline(); !hasDeadline { - ctx, cancel = context.WithTimeout(ctx, timeoutCLIRequest) + ctx, cancel = context.WithTimeout(ctx, cliTimeout) defer cancel() } - commandLine := "az account get-access-token -o json --resource " + resource if tenantID != "" { commandLine += " --tenant " + tenantID } + if subscription != "" { + // subscription needs quotes because it may contain spaces + commandLine += ` --subscription "` + subscription + `"` + } var cliCmd *exec.Cmd if runtime.GOOS == "windows" { dir := os.Getenv("SYSTEMROOT") diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go new file mode 100644 index 000000000..cbe7c4c2d --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -0,0 +1,169 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "runtime" + "strings" + "sync" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/internal/log" +) + +const credNameAzureDeveloperCLI = "AzureDeveloperCLICredential" + +type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) ([]byte, error) + +// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. +type AzureDeveloperCLICredentialOptions struct { + // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition + // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the + // logged in account can access. + AdditionallyAllowedTenants []string + + // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, + // which is the tenant of the selected Azure subscription. + TenantID string + + // inDefaultChain is true when the credential is part of DefaultAzureCredential + inDefaultChain bool + // tokenProvider is used by tests to fake invoking azd + tokenProvider azdTokenProvider +} + +// AzureDeveloperCLICredential authenticates as the identity logged in to the [Azure Developer CLI]. +// +// [Azure Developer CLI]: https://learn.microsoft.com/azure/developer/azure-developer-cli/overview +type AzureDeveloperCLICredential struct { + mu *sync.Mutex + opts AzureDeveloperCLICredentialOptions +} + +// NewAzureDeveloperCLICredential constructs an AzureDeveloperCLICredential. Pass nil to accept default options. +func NewAzureDeveloperCLICredential(options *AzureDeveloperCLICredentialOptions) (*AzureDeveloperCLICredential, error) { + cp := AzureDeveloperCLICredentialOptions{} + if options != nil { + cp = *options + } + if cp.TenantID != "" && !validTenantID(cp.TenantID) { + return nil, errInvalidTenantID + } + if cp.tokenProvider == nil { + cp.tokenProvider = defaultAzdTokenProvider + } + return &AzureDeveloperCLICredential{mu: &sync.Mutex{}, opts: cp}, nil +} + +// GetToken requests a token from the Azure Developer CLI. This credential doesn't cache tokens, so every call invokes azd. +// This method is called automatically by Azure SDK clients. +func (c *AzureDeveloperCLICredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + at := azcore.AccessToken{} + if len(opts.Scopes) == 0 { + return at, errors.New(credNameAzureDeveloperCLI + ": GetToken() requires at least one scope") + } + for _, scope := range opts.Scopes { + if !validScope(scope) { + return at, fmt.Errorf("%s.GetToken(): invalid scope %q", credNameAzureDeveloperCLI, scope) + } + } + tenant, err := resolveTenant(c.opts.TenantID, opts.TenantID, credNameAzureDeveloperCLI, c.opts.AdditionallyAllowedTenants) + if err != nil { + return at, err + } + c.mu.Lock() + defer c.mu.Unlock() + b, err := c.opts.tokenProvider(ctx, opts.Scopes, tenant) + if err == nil { + at, err = c.createAccessToken(b) + } + if err != nil { + err = unavailableIfInChain(err, c.opts.inDefaultChain) + return at, err + } + msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", credNameAzureDeveloperCLI, strings.Join(opts.Scopes, ", ")) + log.Write(EventAuthentication, msg) + return at, nil +} + +// defaultAzTokenProvider invokes the Azure Developer CLI to acquire a token. It assumes +// callers have verified that all string arguments are safe to pass to the CLI. +var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes []string, tenant string) ([]byte, error) { + // set a default timeout for this authentication iff the application hasn't done so already + var cancel context.CancelFunc + if _, hasDeadline := ctx.Deadline(); !hasDeadline { + ctx, cancel = context.WithTimeout(ctx, cliTimeout) + defer cancel() + } + commandLine := "azd auth token -o json" + if tenant != "" { + commandLine += " --tenant-id " + tenant + } + for _, scope := range scopes { + commandLine += " --scope " + scope + } + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + dir := os.Getenv("SYSTEMROOT") + if dir == "" { + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, "environment variable 'SYSTEMROOT' has no value") + } + cliCmd = exec.CommandContext(ctx, "cmd.exe", "/c", commandLine) + cliCmd.Dir = dir + } else { + cliCmd = exec.CommandContext(ctx, "/bin/sh", "-c", commandLine) + cliCmd.Dir = "/bin" + } + cliCmd.Env = os.Environ() + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + output, err := cliCmd.Output() + if err != nil { + msg := stderr.String() + var exErr *exec.ExitError + if errors.As(err, &exErr) && exErr.ExitCode() == 127 || strings.HasPrefix(msg, "'azd' is not recognized") { + msg = "Azure Developer CLI not found on path" + } else if strings.Contains(msg, "azd auth login") { + msg = `please run "azd auth login" from a command prompt to authenticate before using this credential` + } + if msg == "" { + msg = err.Error() + } + return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg) + } + return output, nil +} + +func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) { + t := struct { + AccessToken string `json:"token"` + ExpiresOn string `json:"expiresOn"` + }{} + err := json.Unmarshal(tk, &t) + if err != nil { + return azcore.AccessToken{}, err + } + exp, err := time.Parse("2006-01-02T15:04:05Z", t.ExpiresOn) + if err != nil { + return azcore.AccessToken{}, fmt.Errorf("error parsing token expiration time %q: %v", t.ExpiresOn, err) + } + return azcore.AccessToken{ + ExpiresOn: exp.UTC(), + Token: t.AccessToken, + }, nil +} + +var _ azcore.TokenCredential = (*AzureDeveloperCLICredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 9002ea0b0..d077682c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -25,6 +25,7 @@ stages: - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml parameters: RunLiveTests: true + UsePipelineProxy: false ServiceDirectory: 'azidentity' CloudConfig: Public: diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index 303d5fc09..fc3df68eb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -20,9 +21,9 @@ const credNameAssertion = "ClientAssertionCredential" // ClientAssertionCredential authenticates an application with assertions provided by a callback function. // This credential is for advanced scenarios. [ClientCertificateCredential] has a more convenient API for // the most common assertion scenario, authenticating a service principal with a certificate. See -// [Azure AD documentation] for details of the assertion format. +// [Microsoft Entra ID documentation] for details of the assertion format. // -// [Azure AD documentation]: https://docs.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/active-directory-certificate-credentials#assertion-format type ClientAssertionCredential struct { client *confidentialClient } @@ -35,11 +36,15 @@ type ClientAssertionCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. @@ -56,9 +61,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c }, ) msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) if err != nil { @@ -67,9 +73,13 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c return &ClientAssertionCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientAssertionCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameAssertion+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ClientAssertionCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index d3300e305..607533f48 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" "golang.org/x/crypto/pkcs12" ) @@ -29,15 +30,20 @@ type ClientCertificateCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // SendCertificateChain controls whether the credential sends the public certificate chain in the x5c // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. // Defaults to False. SendCertificateChain bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientCertificateCredential authenticates a service principal with a certificate. @@ -58,10 +64,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - SendX5C: options.SendCertificateChain, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) if err != nil { @@ -70,9 +77,13 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return &ClientCertificateCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientCertificateCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameCert+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } // ParseCertificates loads certificates and a private key, in PEM or PKCS12 format, for use with NewClientCertificateCredential. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index d2ff7582b..9e6772e9b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -24,11 +25,15 @@ type ClientSecretCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientSecretCredential authenticates an application with a client secret. @@ -46,20 +51,25 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, } c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) if err != nil { return nil, err } - return &ClientSecretCredential{c}, nil + return &ClientSecretCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *ClientSecretCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameSecret+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ClientSecretCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 4853a9a00..854267bdb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -10,6 +10,7 @@ import ( "context" "errors" "fmt" + "net/http" "os" "strings" "sync" @@ -17,6 +18,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -28,6 +30,7 @@ type confidentialClientOptions struct { // Assertion for on-behalf-of authentication Assertion string DisableInstanceDiscovery, SendX5C bool + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // confidentialClient wraps the MSAL confidential client @@ -40,6 +43,7 @@ type confidentialClient struct { name string opts confidentialClientOptions region string + azClient *azcore.Client } func newConfidentialClient(tenantID, clientID, name string, cred confidential.Credential, opts confidentialClientOptions) (*confidentialClient, error) { @@ -50,6 +54,14 @@ func newConfidentialClient(tenantID, clientID, name string, cred confidential.Cr if err != nil { return nil, err } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &opts.ClientOptions) + if err != nil { + return nil, err + } opts.AdditionallyAllowedTenants = resolveAdditionalTenants(opts.AdditionallyAllowedTenants) return &confidentialClient{ caeMu: &sync.Mutex{}, @@ -62,6 +74,7 @@ func newConfidentialClient(tenantID, clientID, name string, cred confidential.Cr opts: opts, region: os.Getenv(azureRegionalAuthorityName), tenantID: tenantID, + azClient: client, }, nil } @@ -132,10 +145,15 @@ func (c *confidentialClient) client(ctx context.Context, tro policy.TokenRequest } func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { + cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } authority := runtime.JoinPaths(c.host, c.tenantID) o := []confidential.Option{ confidential.WithAzureRegion(c.region), - confidential.WithHTTPClient(newPipelineAdapter(&c.opts.ClientOptions)), + confidential.WithCache(cache), + confidential.WithHTTPClient(c), } if enableCAE { o = append(o, confidential.WithClientCapabilities(cp1)) @@ -149,8 +167,18 @@ func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClie return confidential.New(authority, c.clientID, c.cred, o...) } -// resolveTenant returns the correct tenant for a token request given the client's +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's // configuration, or an error when that configuration doesn't allow the specified tenant func (c *confidentialClient) resolveTenant(specified string) (string, error) { return resolveTenant(c.tenantID, specified, c.name, c.opts.AdditionallyAllowedTenants) } + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (c *confidentialClient) CloseIdleConnections() { + // do nothing +} + +func (c *confidentialClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(c.azClient, r) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 7647c60b1..35aeef867 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -30,7 +30,7 @@ type DefaultAzureCredentialOptions struct { // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -49,6 +49,7 @@ type DefaultAzureCredentialOptions struct { // more control over its configuration. // - [ManagedIdentityCredential] // - [AzureCLICredential] +// - [AzureDeveloperCLICredential] // // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for @@ -117,6 +118,17 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err}) } + azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{ + AdditionallyAllowedTenants: additionalTenants, + TenantID: options.TenantID, + }) + if err == nil { + creds = append(creds, azdCred) + } else { + errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error()) + creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err}) + } + if len(errorMessages) > 0 { log.Writef(EventAuthentication, "NewDefaultAzureCredential failed to initialize some credentials:\n\t%s", strings.Join(errorMessages, "\n\t")) } @@ -129,7 +141,7 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default return &DefaultAzureCredential{chain: chain}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *DefaultAzureCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { return c.chain.GetToken(ctx, opts) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go new file mode 100644 index 000000000..d8b952f53 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/developer_credential_util.go @@ -0,0 +1,38 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package azidentity + +import ( + "errors" + "time" +) + +// cliTimeout is the default timeout for authentication attempts via CLI tools +const cliTimeout = 10 * time.Second + +// unavailableIfInChain returns err or, if the credential was invoked by DefaultAzureCredential, a +// credentialUnavailableError having the same message. This ensures DefaultAzureCredential will try +// the next credential in its chain (another developer credential). +func unavailableIfInChain(err error, inDefaultChain bool) error { + if err != nil && inDefaultChain { + var unavailableErr *credentialUnavailableError + if !errors.As(err, &unavailableErr) { + err = newCredentialUnavailableError(credNameAzureDeveloperCLI, err.Error()) + } + } + return err +} + +// validScope is for credentials authenticating via external tools. The authority validates scopes for all other credentials. +func validScope(scope string) bool { + for _, r := range scope { + if !(alphanumeric(r) || r == '.' || r == '-' || r == '_' || r == '/' || r == ':') { + return false + } + } + return true +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index d245c269a..1b7a28370 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameDeviceCode = "DeviceCodeCredential" @@ -23,19 +24,34 @@ type DeviceCodeCredentialOptions struct { // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // ClientID is the ID of the application users will authenticate to. // Defaults to the ID of an Azure development application. ClientID string + + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, [DeviceCodeCredential.GetToken] will return [ErrAuthenticationRequired] when user + // interaction is necessary to acquire a token. + disableAutomaticAuthentication bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. Required for single-tenant // applications. TenantID string + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions + // UserPrompt controls how the credential presents authentication instructions. The credential calls // this function with authentication details when it receives a device code. By default, the credential // prints these details to stdout. @@ -63,14 +79,14 @@ type DeviceCodeMessage struct { UserCode string `json:"user_code"` // VerificationURL is the URL at which the user must authenticate. VerificationURL string `json:"verification_uri"` - // Message is user instruction from Azure Active Directory. + // Message is user instruction from Microsoft Entra ID. Message string `json:"message"` } // DeviceCodeCredential acquires tokens for a user via the device code flow, which has the -// user browse to an Azure Active Directory URL, enter a code, and authenticate. It's useful +// user browse to a Microsoft Entra URL, enter a code, and authenticate. It's useful // for authenticating a user in an environment without a web browser, such as an SSH session. -// If a web browser is available, InteractiveBrowserCredential is more convenient because it +// If a web browser is available, [InteractiveBrowserCredential] is more convenient because it // automatically opens a browser to the login page. type DeviceCodeCredential struct { client *publicClient @@ -84,10 +100,13 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC } cp.init() msalOpts := publicClientOptions{ - AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, - ClientOptions: cp.ClientOptions, - DeviceCodePrompt: cp.UserPrompt, - DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DeviceCodePrompt: cp.UserPrompt, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + Record: cp.authenticationRecord, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) if err != nil { @@ -97,10 +116,23 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC return &DeviceCodeCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. It will begin the device code flow and poll until the user completes authentication. +// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. It will begin the device code flow and poll until the user completes authentication. // This method is called automatically by Azure SDK clients. func (c *DeviceCodeCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*DeviceCodeCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index 7ecd928e0..42f84875e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -25,7 +25,7 @@ type EnvironmentCredentialOptions struct { azcore.ClientOptions // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -156,7 +156,7 @@ func NewEnvironmentCredential(options *EnvironmentCredentialOptions) (*Environme return nil, errors.New("incomplete environment variable configuration. Only AZURE_TENANT_ID and AZURE_CLIENT_ID are set") } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *EnvironmentCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { return c.cred.GetToken(ctx, opts) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index e1a21e003..335d2b7dc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -18,6 +18,10 @@ import ( msal "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" ) +// errAuthenticationRequired indicates a credential's Authenticate method must be called to acquire a token +// because user interaction is required and the credential is configured not to automatically prompt the user. +var errAuthenticationRequired error = &credentialUnavailableError{"can't acquire a token without user interaction. Call Authenticate to interactively authenticate a user"} + // getResponseFromError retrieves the response carried by // an AuthenticationFailedError or MSAL CallErr, if any func getResponseFromError(err error) *http.Response { @@ -53,7 +57,13 @@ func (e *AuthenticationFailedError) Error() string { } msg := &bytes.Buffer{} fmt.Fprintf(msg, e.credType+" authentication failed\n") - fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + if e.RawResponse.Request != nil { + fmt.Fprintf(msg, "%s %s://%s%s\n", e.RawResponse.Request.Method, e.RawResponse.Request.URL.Scheme, e.RawResponse.Request.URL.Host, e.RawResponse.Request.URL.Path) + } else { + // this happens when the response is created from a custom HTTP transporter, + // which doesn't guarantee to bind the original request to the response + fmt.Fprintln(msg, "Request information not available") + } fmt.Fprintln(msg, "--------------------------------------------------------------------------------") fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) fmt.Fprintln(msg, "--------------------------------------------------------------------------------") @@ -74,6 +84,8 @@ func (e *AuthenticationFailedError) Error() string { switch e.credType { case credNameAzureCLI: anchor = "azure-cli" + case credNameAzureDeveloperCLI: + anchor = "azd" case credNameCert: anchor = "client-cert" case credNameSecret: diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work new file mode 100644 index 000000000..04ea962b4 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work @@ -0,0 +1,6 @@ +go 1.18 + +use ( + . + ./cache +) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum new file mode 100644 index 000000000..7cd86b001 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum @@ -0,0 +1,39 @@ +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= +golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 08f3efbf3..bd8296983 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameBrowser = "InteractiveBrowserCredential" @@ -22,26 +23,40 @@ type InteractiveBrowserCredentialOptions struct { // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // ClientID is the ID of the application users will authenticate to. // Defaults to the ID of an Azure development application. ClientID string + // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, [InteractiveBrowserCredential.GetToken] will return [ErrAuthenticationRequired] when + // user interaction is necessary to acquire a token. + disableAutomaticAuthentication bool + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool // LoginHint pre-populates the account prompt with a username. Users may choose to authenticate a different account. LoginHint string - // RedirectURL is the URL Azure Active Directory will redirect to with the access token. This is required + + // RedirectURL is the URL Microsoft Entra ID will redirect to with the access token. This is required // only when setting ClientID, and must match a redirect URI in the application's registration. // Applications which have registered "http://localhost" as a redirect URI need not set this option. RedirectURL string - // TenantID is the Azure Active Directory tenant the credential authenticates in. Defaults to the + // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. TenantID string + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } func (o *InteractiveBrowserCredentialOptions) init() { @@ -66,10 +81,14 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption } cp.init() msalOpts := publicClientOptions{ - ClientOptions: cp.ClientOptions, - DisableInstanceDiscovery: cp.DisableInstanceDiscovery, - LoginHint: cp.LoginHint, - RedirectURL: cp.RedirectURL, + AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + ClientOptions: cp.ClientOptions, + DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableInstanceDiscovery: cp.DisableInstanceDiscovery, + LoginHint: cp.LoginHint, + Record: cp.authenticationRecord, + RedirectURL: cp.RedirectURL, + TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) if err != nil { @@ -78,9 +97,22 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption return &InteractiveBrowserCredential{client: c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *InteractiveBrowserCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*InteractiveBrowserCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go new file mode 100644 index 000000000..b1b4d5c8b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go @@ -0,0 +1,18 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +// TokenCachePersistenceOptions contains options for persistent token caching +type TokenCachePersistenceOptions struct { + // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text + // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts + // encryption before falling back to plaintext storage. + AllowUnencryptedStorage bool + + // Name identifies the cache. Set this to isolate data from other applications. + Name string +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go new file mode 100644 index 000000000..c1498b464 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go @@ -0,0 +1,31 @@ +//go:build go1.18 +// +build go1.18 + +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "errors" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" +) + +var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching") + +// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to +// use a persistent cache must first import the cache module, which will replace this function +// with a platform-specific implementation. +var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) { + if o == nil { + return nil, nil + } + return nil, errMissingImport +} + +// CacheFilePath returns the path to the cache file for the given name. +// Defining it in this package makes it available to azidentity tests. +var CacheFilePath = func(name string) (string, error) { + return "", errMissingImport +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index fdc3c1f67..7c25cb8bd 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -28,12 +28,14 @@ import ( const ( arcIMDSEndpoint = "IMDS_ENDPOINT" + defaultIdentityClientID = "DEFAULT_IDENTITY_CLIENT_ID" identityEndpoint = "IDENTITY_ENDPOINT" identityHeader = "IDENTITY_HEADER" identityServerThumbprint = "IDENTITY_SERVER_THUMBPRINT" headerMetadata = "Metadata" imdsEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" msiEndpoint = "MSI_ENDPOINT" + msiSecret = "MSI_SECRET" imdsAPIVersion = "2018-02-01" azureArcAPIVersion = "2019-08-15" serviceFabricAPIVersion = "2019-07-01-preview" @@ -47,6 +49,7 @@ type msiType int const ( msiTypeAppService msiType = iota msiTypeAzureArc + msiTypeAzureML msiTypeCloudShell msiTypeIMDS msiTypeServiceFabric @@ -55,7 +58,7 @@ const ( // managedIdentityClient provides the base for authenticating in managed identity environments // This type includes an runtime.Pipeline and TokenCredentialOptions. type managedIdentityClient struct { - pipeline runtime.Pipeline + azClient *azcore.Client msiType msiType endpoint string id ManagedIDKind @@ -135,13 +138,27 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag c.msiType = msiTypeAzureArc } } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { - env = "Cloud Shell" c.endpoint = endpoint - c.msiType = msiTypeCloudShell + if _, ok := os.LookupEnv(msiSecret); ok { + env = "Azure ML" + c.msiType = msiTypeAzureML + } else { + env = "Cloud Shell" + c.msiType = msiTypeCloudShell + } } else { setIMDSRetryOptionDefaults(&cp.Retry) } - c.pipeline = runtime.NewPipeline(component, version, runtime.PipelineOptions{}, &cp) + + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &cp) + if err != nil { + return nil, err + } + c.azClient = client if log.Should(EventAuthentication) { log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) @@ -168,7 +185,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi return azcore.AccessToken{}, err } - resp, err := c.pipeline.Do(msg) + resp, err := c.azClient.Pipeline().Do(msg) if err != nil { return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) } @@ -247,6 +264,8 @@ func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id Manage return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) } return c.createAzureArcAuthRequest(ctx, id, scopes, key) + case msiTypeAzureML: + return c.createAzureMLAuthRequest(ctx, id, scopes) case msiTypeServiceFabric: return c.createServiceFabricAuthRequest(ctx, id, scopes) case msiTypeCloudShell: @@ -296,6 +315,29 @@ func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, return request, nil } +func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { + request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) + if err != nil { + return nil, err + } + request.Raw().Header.Set("secret", os.Getenv(msiSecret)) + q := request.Raw().URL.Query() + q.Add("api-version", "2017-09-01") + q.Add("resource", strings.Join(scopes, " ")) + q.Add("clientid", os.Getenv(defaultIdentityClientID)) + if id != nil { + if id.idKind() == miResourceID { + log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") + q.Set("clientid", "") + q.Set(qpResID, id.String()) + } else { + q.Set("clientid", id.String()) + } + } + request.Raw().URL.RawQuery = q.Encode() + return request, nil +} + func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { request, err := runtime.NewRequest(ctx, http.MethodGet, c.endpoint) if err != nil { @@ -330,7 +372,7 @@ func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resour q.Add("resource", strings.Join(resources, " ")) request.Raw().URL.RawQuery = q.Encode() // send the initial request to get the short-lived secret key - response, err := c.pipeline.Do(request) + response, err := c.azClient.Pipeline().Do(request) if err != nil { return "", err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index 35c5e6725..dcd278bef 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -67,8 +68,8 @@ type ManagedIdentityCredentialOptions struct { // ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a -// user-assigned identity. See Azure Active Directory documentation for more information about managed identities: -// https://docs.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview +// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: +// https://learn.microsoft.com/azure/active-directory/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { client *confidentialClient mic *managedIdentityClient @@ -92,7 +93,9 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M clientID = options.ID.String() } // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value - c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{}) + c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{ + ClientOptions: options.ClientOptions, + }) if err != nil { return nil, err } @@ -101,13 +104,18 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M // GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + if len(opts.Scopes) != 1 { - err := fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) + err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) return azcore.AccessToken{}, err } - // managed identity endpoints require an AADv1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - return c.client.GetToken(ctx, opts) + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go index 2b360b681..5e67cf021 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/on_behalf_of_credential.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) @@ -21,9 +22,9 @@ const credNameOBO = "OnBehalfOfCredential" // OnBehalfOfCredential authenticates a service principal via the on-behalf-of flow. This is typically used by // middle-tier services that authorize requests to other services with a delegated user identity. Because this // is not an interactive authentication flow, an application using it must have admin consent for any delegated -// permissions before requesting tokens for them. See [Azure Active Directory documentation] for more details. +// permissions before requesting tokens for them. See [Microsoft Entra ID documentation] for more details. // -// [Azure Active Directory documentation]: https://docs.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow +// [Microsoft Entra ID documentation]: https://learn.microsoft.com/azure/active-directory/develop/v2-oauth2-on-behalf-of-flow type OnBehalfOfCredential struct { client *confidentialClient } @@ -36,11 +37,13 @@ type OnBehalfOfCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // SendCertificateChain applies only when the credential is configured to authenticate with a certificate. // This setting controls whether the credential sends the public certificate chain in the x5c header of each // token request's JWT. This is required for, and only used in, Subject Name/Issuer (SNI) authentication. @@ -84,9 +87,13 @@ func newOnBehalfOfCredential(tenantID, clientID, userAssertion string, cred conf return &OnBehalfOfCredential{c}, nil } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (o *OnBehalfOfCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return o.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameOBO+"."+traceOpGetToken, o.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := o.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*OnBehalfOfCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index 6512d3e25..63c31190d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -8,38 +8,52 @@ package azidentity import ( "context" + "errors" "fmt" + "net/http" "strings" "sync" "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" + + // this import ensures well-known configurations in azcore/cloud have ARM audiences for Authenticate() + _ "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime" ) type publicClientOptions struct { azcore.ClientOptions - AdditionallyAllowedTenants []string - DeviceCodePrompt func(context.Context, DeviceCodeMessage) error - DisableInstanceDiscovery bool - LoginHint, RedirectURL string - Username, Password string + AdditionallyAllowedTenants []string + DeviceCodePrompt func(context.Context, DeviceCodeMessage) error + DisableAutomaticAuthentication bool + DisableInstanceDiscovery bool + LoginHint, RedirectURL string + Record authenticationRecord + TokenCachePersistenceOptions *tokenCachePersistenceOptions + Username, Password string } // publicClient wraps the MSAL public client type publicClient struct { - account public.Account cae, noCAE msalPublicClient caeMu, noCAEMu, clientMu *sync.Mutex clientID, tenantID string + defaultScope []string host string name string opts publicClientOptions + record authenticationRecord + azClient *azcore.Client } +var errScopeRequired = errors.New("authenticating in this environment requires specifying a scope in TokenRequestOptions") + func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*publicClient, error) { if !validTenantID(tenantID) { return nil, errInvalidTenantID @@ -48,19 +62,76 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p if err != nil { return nil, err } + // if the application specified a cloud configuration, use its ARM audience as the default scope for Authenticate() + audience := o.Cloud.Services[cloud.ResourceManager].Audience + if audience == "" { + // no cloud configuration, or no ARM audience, specified; try to map the host to a well-known one (all of which have a trailing slash) + if !strings.HasSuffix(host, "/") { + host += "/" + } + switch host { + case cloud.AzureChina.ActiveDirectoryAuthorityHost: + audience = cloud.AzureChina.Services[cloud.ResourceManager].Audience + case cloud.AzureGovernment.ActiveDirectoryAuthorityHost: + audience = cloud.AzureGovernment.Services[cloud.ResourceManager].Audience + case cloud.AzurePublic.ActiveDirectoryAuthorityHost: + audience = cloud.AzurePublic.Services[cloud.ResourceManager].Audience + } + } + // if we didn't come up with an audience, the application will have to specify a scope for Authenticate() + var defaultScope []string + if audience != "" { + defaultScope = []string{audience + defaultSuffix} + } + client, err := azcore.NewClient(module, version, runtime.PipelineOptions{ + Tracing: runtime.TracingOptions{ + Namespace: traceNamespace, + }, + }, &o.ClientOptions) + if err != nil { + return nil, err + } o.AdditionallyAllowedTenants = resolveAdditionalTenants(o.AdditionallyAllowedTenants) return &publicClient{ - caeMu: &sync.Mutex{}, - clientID: clientID, - clientMu: &sync.Mutex{}, - host: host, - name: name, - noCAEMu: &sync.Mutex{}, - opts: o, - tenantID: tenantID, + caeMu: &sync.Mutex{}, + clientID: clientID, + clientMu: &sync.Mutex{}, + defaultScope: defaultScope, + host: host, + name: name, + noCAEMu: &sync.Mutex{}, + opts: o, + record: o.Record, + tenantID: tenantID, + azClient: client, }, nil } +func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) { + if tro == nil { + tro = &policy.TokenRequestOptions{} + } + if len(tro.Scopes) == 0 { + if p.defaultScope == nil { + return authenticationRecord{}, errScopeRequired + } + tro.Scopes = p.defaultScope + } + client, mu, err := p.client(*tro) + if err != nil { + return authenticationRecord{}, err + } + mu.Lock() + defer mu.Unlock() + _, err = p.reqToken(ctx, client, *tro) + if err == nil { + scope := strings.Join(tro.Scopes, ", ") + msg := fmt.Sprintf("%s.Authenticate() acquired a token for scope %q", p.name, scope) + log.Write(EventAuthentication, msg) + } + return p.record, err +} + // GetToken requests an access token from MSAL, checking the cache first. func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { if len(tro.Scopes) < 1 { @@ -76,10 +147,13 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti } mu.Lock() defer mu.Unlock() - ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.account), public.WithClaims(tro.Claims), public.WithTenantID(tenant)) + ar, err := client.AcquireTokenSilent(ctx, tro.Scopes, public.WithSilentAccount(p.record.account()), public.WithClaims(tro.Claims), public.WithTenantID(tenant)) if err == nil { return p.token(ar, err) } + if p.opts.DisableAutomaticAuthentication { + return azcore.AccessToken{}, errAuthenticationRequired + } at, err := p.reqToken(ctx, client, tro) if err == nil { msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) @@ -148,9 +222,14 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, } func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { + cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE) + if err != nil { + return nil, err + } o := []public.Option{ public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), - public.WithHTTPClient(newPipelineAdapter(&p.opts.ClientOptions)), + public.WithCache(cache), + public.WithHTTPClient(p), } if enableCAE { o = append(o, public.WithClientCapabilities(cp1)) @@ -163,7 +242,7 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { if err == nil { - p.account = ar.Account + p.record, err = newAuthenticationRecord(ar) } else { res := getResponseFromError(err) err = newAuthenticationFailedError(p.name, err.Error(), res, err) @@ -171,8 +250,24 @@ func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToke return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err } -// resolveTenant returns the correct tenant for a token request given the client's +// resolveTenant returns the correct WithTenantID() argument for a token request given the client's // configuration, or an error when that configuration doesn't allow the specified tenant func (p *publicClient) resolveTenant(specified string) (string, error) { - return resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants) + t, err := resolveTenant(p.tenantID, specified, p.name, p.opts.AdditionallyAllowedTenants) + if t == p.tenantID { + // callers pass this value to MSAL's WithTenantID(). There's no need to redundantly specify + // the client's default tenant and doing so is an error when that tenant is "organizations" + t = "" + } + return t, err +} + +// these methods satisfy the MSAL ops.HTTPClient interface + +func (p *publicClient) CloseIdleConnections() { + // do nothing +} + +func (p *publicClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(p.azClient, r) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index f787ec0ce..294ed81e9 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameUserPassword = "UsernamePasswordCredential" @@ -23,11 +24,19 @@ type UsernamePasswordCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + + // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // to enable the credential to use data from a previous authentication. + authenticationRecord authenticationRecord + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + + // tokenCachePersistenceOptions enables persistent token caching when not nil. + tokenCachePersistenceOptions *tokenCachePersistenceOptions } // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, @@ -45,11 +54,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st options = &UsernamePasswordCredentialOptions{} } opts := publicClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - Password: password, - Username: username, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Record: options.authenticationRecord, + TokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + Username: username, } c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) if err != nil { @@ -58,9 +69,22 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st return &UsernamePasswordCredential{client: c}, err } -// GetToken requests an access token from Azure Active Directory. This method is called automatically by Azure SDK clients. +// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. +func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.Authenticate(ctx, opts) + return tk, err +} + +// GetToken requests an access token from Microsoft Entra ID. This method is called automatically by Azure SDK clients. func (c *UsernamePasswordCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return c.client.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := c.client.GetToken(ctx, opts) + return tk, err } var _ azcore.TokenCredential = (*UsernamePasswordCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 65e74e31e..e8caeea71 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -10,6 +10,9 @@ const ( // UserAgent is the string to be used in the user agent string when making requests. component = "azidentity" + // module is the fully qualified name of the module used in telemetry and distributed tracing. + module = "github.com/Azure/azure-sdk-for-go/sdk/" + component + // Version is the semantic version (see http://semver.org) of this module. - version = "v1.4.0" + version = "v1.5.1" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go index 7e016324d..3e43e788e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -15,6 +15,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" ) const credNameWorkloadIdentity = "WorkloadIdentityCredential" @@ -41,7 +42,7 @@ type WorkloadIdentityCredentialOptions struct { // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. ClientID string // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or - // private clouds such as Azure Stack. It determines whether the credential requests Azure AD instance metadata + // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool @@ -93,9 +94,13 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( return &w, nil } -// GetToken requests an access token from Azure Active Directory. Azure SDK clients call this method automatically. +// GetToken requests an access token from Microsoft Entra ID. Azure SDK clients call this method automatically. func (w *WorkloadIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { - return w.cred.GetToken(ctx, opts) + var err error + ctx, endSpan := runtime.StartSpan(ctx, credNameWorkloadIdentity+"."+traceOpGetToken, w.cred.client.azClient.Tracer(), nil) + defer func() { endSpan(err) }() + tk, err := w.cred.GetToken(ctx, opts) + return tk, err } // getAssertion returns the specified file's content, which is expected to be a Kubernetes service account token. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 0cd31f00e..31feb8b1c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,12 @@ +# v1.26.6 (2024-01-22) + +* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.26.5 (2024-01-18) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.26.4 (2024-01-16) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 6de4538a4..51f6e3320 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.26.4" +const goModuleVersion = "1.26.6" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 6671a41c9..989c4ea39 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.16.16 (2024-01-18) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.16.15 (2024-01-16) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index c8bcd2dce..fe92184dc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.16.15" +const goModuleVersion = "1.16.16" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md index 4ab05fda6..fe53e9a5c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md @@ -1,3 +1,15 @@ +# v1.15.15 (2024-01-24) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.14 (2024-01-22) + +* **Dependency Update**: Updated to the latest SDK module versions + +# v1.15.13 (2024-01-18) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.15.12 (2024-01-16) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go index ab1332ac2..6fe1aa681 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go @@ -3,4 +3,4 @@ package manager // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.15.12" +const goModuleVersion = "1.15.15" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index adbbf4adc..9a0b5e037 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.7.3 (2024-01-22) + +* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons. + # v1.7.2 (2023-12-08) * **Bug Fix**: Correct loading of [services *] sections into shared config. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index f0673f3a0..1d68f8346 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.7.2" +const goModuleVersion = "1.7.3" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go index 661588c22..ed77d0835 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go @@ -67,12 +67,8 @@ func unquote(s string) string { // applies various legacy conversions to property values: // - remote wrapping single/doublequotes -// - expand escaped quote and newline sequences func legacyStrconv(s string) string { s = unquote(s) - s = strings.ReplaceAll(s, `\"`, `"`) - s = strings.ReplaceAll(s, `\'`, `'`) - s = strings.ReplaceAll(s, `\n`, "\n") return s } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md index f83de2a5e..89a5a0d52 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.48.1 (2024-01-24) + +* No change notes available for this release. + # v1.48.0 (2024-01-05) * **Feature**: Support smithy sigv4a trait for codegen. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go index 5e5f27b2d..db35814d3 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/api_client.go @@ -56,10 +56,10 @@ func New(options Options, optFns ...func(*Options)) *Client { resolveHTTPSignerV4(&options) - resolveHTTPSignerV4a(&options) - resolveEndpointResolverV2(&options) + resolveHTTPSignerV4a(&options) + resolveAuthSchemeResolver(&options) for _, fn := range optFns { @@ -68,8 +68,6 @@ func New(options Options, optFns ...func(*Options)) *Client { finalizeRetryMaxAttempts(&options) - resolveCredentialProvider(&options) - ignoreAnonymousAuth(&options) resolveExpressCredentials(&options) @@ -111,8 +109,6 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf finalizeClientEndpointResolverOptions(&options) - resolveCredentialProvider(&options) - finalizeOperationExpressCredentials(&options, *c) finalizeOperationEndpointAuthResolver(&options) @@ -496,33 +492,6 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error { return nil } -func resolveCredentialProvider(o *Options) { - if o.Credentials == nil { - return - } - - if _, ok := o.Credentials.(v4a.CredentialsProvider); ok { - return - } - - if aws.IsCredentialsProvider(o.Credentials, (*aws.AnonymousCredentials)(nil)) { - return - } - - o.Credentials = &v4a.SymmetricCredentialAdaptor{SymmetricProvider: o.Credentials} -} - -func swapWithCustomHTTPSignerMiddleware(stack *middleware.Stack, o Options) error { - mw := s3cust.NewSignHTTPRequestMiddleware(s3cust.SignHTTPRequestMiddlewareOptions{ - CredentialsProvider: o.Credentials, - V4Signer: o.HTTPSignerV4, - V4aSigner: o.httpSignerV4a, - LogSigning: o.ClientLogMode.IsSigning(), - }) - - return s3cust.RegisterSigningMiddleware(stack, mw) -} - type httpSignerV4a interface { SignHTTP(ctx context.Context, credentials v4a.Credentials, r *http.Request, payloadHash, service string, regionSet []string, signingTime time.Time, @@ -540,7 +509,6 @@ func newDefaultV4aSigner(o Options) *v4a.Signer { return v4a.NewSigner(func(so *v4a.SignerOptions) { so.Logger = o.Logger so.LogSigning = o.ClientLogMode.IsSigning() - so.DisableURIPathEscaping = true }) } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go index 77e3ee12f..bff6ac9a7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -3,4 +3,4 @@ package s3 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.48.0" +const goModuleVersion = "1.48.1" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go index cc2bf9c13..756823cb7 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations/signer_wrapper.go @@ -182,15 +182,12 @@ func (p *PresignHTTPRequestMiddleware) HandleFinalize( switch signerVersion { case "aws.auth#sigv4a": - v4aCredentialProvider, ok := p.credentialsProvider.(v4a.CredentialsProvider) - if !ok { - return out, metadata, fmt.Errorf("invalid credential-provider provided for sigV4a Signer") - } - mw := v4a.NewPresignHTTPRequestMiddleware(v4a.PresignHTTPRequestMiddlewareOptions{ - CredentialsProvider: v4aCredentialProvider, - Presigner: p.v4aSigner, - LogSigning: p.logSigning, + CredentialsProvider: &v4a.SymmetricCredentialAdaptor{ + SymmetricProvider: p.credentialsProvider, + }, + Presigner: p.v4aSigner, + LogSigning: p.logSigning, }) return mw.HandleFinalize(ctx, in, next) case "aws.auth#sigv4": diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 9d5847a05..46dea1b50 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.18.7 (2024-01-18) + +* No change notes available for this release. + # v1.18.6 (2024-01-04) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index d2e5a8ab8..857a6af7e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.6" +const goModuleVersion = "1.18.7" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go index f044afde4..c8f7c09e4 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go @@ -283,6 +283,14 @@ var defaultPartitions = endpoints.Partitions{ Region: "il-central-1", }, }, + endpoints.EndpointKey{ + Region: "me-central-1", + }: endpoints.Endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: endpoints.CredentialScope{ + Region: "me-central-1", + }, + }, endpoints.EndpointKey{ Region: "me-south-1", }: endpoints.Endpoint{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index d8ee8b14e..6d5013fca 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -3450,6 +3450,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -3477,6 +3480,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -22901,6 +22907,14 @@ var awsPartition = partition{ Region: "il-central-1", }, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "portal.sso.me-central-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-central-1", + }, + }, endpointKey{ Region: "me-south-1", }: endpoint{ @@ -29084,6 +29098,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "us-east-1", }: endpoint{}, @@ -30366,6 +30383,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -33990,9 +34010,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -34014,9 +34043,18 @@ var awsPartition = partition{ endpointKey{ Region: "ui-ap-northeast-1", }: endpoint{}, + endpointKey{ + Region: "ui-ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ui-ap-southeast-1", + }: endpoint{}, endpointKey{ Region: "ui-ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ui-ca-central-1", + }: endpoint{}, endpointKey{ Region: "ui-eu-central-1", }: endpoint{}, @@ -35850,6 +35888,13 @@ var awscnPartition = partition{ }, }, }, + "quicksight": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + }, + }, "ram": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -43393,6 +43438,15 @@ var awsisoPartition = partition{ }, "datasync": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-iso-west-1", }: endpoint{ @@ -43402,6 +43456,15 @@ var awsisoPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "us-iso-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov", + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 8f8370e8a..4885dbc90 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.49.22" +const SDKVersion = "1.50.8" diff --git a/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go b/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go index 75c7e2c39..99397a0dc 100644 --- a/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go +++ b/vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go @@ -138,9 +138,8 @@ func lockEcho() (err error) { } newState := oldState - const ENABLE_ECHO_INPUT = 0x0004 const ENABLE_LINE_INPUT = 0x0002 - newState = newState & (^(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT)) + newState = newState & (^ENABLE_LINE_INPUT) if _, _, e := syscall.Syscall(setConsoleMode.Addr(), 2, uintptr(syscall.Stdout), uintptr(newState), 0); e != 0 { err = fmt.Errorf("Can't set terminal settings: %v", e) return diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md index c9fb829dc..7ec5ac7ea 100644 --- a/vendor/github.com/google/uuid/CHANGELOG.md +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -1,5 +1,18 @@ # Changelog +## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16) + + +### Features + +* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3)) + + +### Bug Fixes + +* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06)) +* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6)) + ## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12) diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go index b404f4bec..dc60082d3 100644 --- a/vendor/github.com/google/uuid/hash.go +++ b/vendor/github.com/google/uuid/hash.go @@ -17,6 +17,12 @@ var ( NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) Nil UUID // empty UUID, all zeros + + // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1. + Max = UUID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + } ) // NewHash returns a new UUID derived from the hash of space concatenated with diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go index ba9dd5eb6..3167b643d 100644 --- a/vendor/github.com/google/uuid/version7.go +++ b/vendor/github.com/google/uuid/version7.go @@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) { // makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6]) // uuid[8] already has the right version number (Variant is 10) -// see function NewV7 and NewV7FromReader +// see function NewV7 and NewV7FromReader func makeV7(uuid []byte) { /* 0 1 2 3 @@ -52,7 +52,7 @@ func makeV7(uuid []byte) { +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | unix_ts_ms | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ - | unix_ts_ms | ver | rand_a | + | unix_ts_ms | ver | rand_a (12 bit seq) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |var| rand_b | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ @@ -61,7 +61,7 @@ func makeV7(uuid []byte) { */ _ = uuid[15] // bounds check - t := timeNow().UnixMilli() + t, s := getV7Time() uuid[0] = byte(t >> 40) uuid[1] = byte(t >> 32) @@ -70,6 +70,35 @@ func makeV7(uuid []byte) { uuid[4] = byte(t >> 8) uuid[5] = byte(t) - uuid[6] = 0x70 | (uuid[6] & 0x0F) - // uuid[8] has already has right version + uuid[6] = 0x70 | (0x0F & byte(s>>8)) + uuid[7] = byte(s) +} + +// lastV7time is the last time we returned stored as: +// +// 52 bits of time in milliseconds since epoch +// 12 bits of (fractional nanoseconds) >> 8 +var lastV7time int64 + +const nanoPerMilli = 1000000 + +// getV7Time returns the time in milliseconds and nanoseconds / 256. +// The returned (milli << 12 + seq) is guarenteed to be greater than +// (milli << 12 + seq) returned by any previous call to getV7Time. +func getV7Time() (milli, seq int64) { + timeMu.Lock() + defer timeMu.Unlock() + + nano := timeNow().UnixNano() + milli = nano / nanoPerMilli + // Sequence number is between 0 and 3906 (nanoPerMilli>>8) + seq = (nano - milli*nanoPerMilli) >> 8 + now := milli<<12 + seq + if now <= lastV7time { + now = lastV7time + 1 + milli = now >> 12 + seq = now & 0xfff + } + lastV7time = now + return milli, seq } diff --git a/vendor/github.com/hashicorp/go-version/CHANGELOG.md b/vendor/github.com/hashicorp/go-version/CHANGELOG.md new file mode 100644 index 000000000..5f16dd140 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/CHANGELOG.md @@ -0,0 +1,45 @@ +# 1.6.0 (June 28, 2022) + +FEATURES: + +- Add `Prerelease` function to `Constraint` to return true if the version includes a prerelease field ([#100](https://github.com/hashicorp/go-version/pull/100)) + +# 1.5.0 (May 18, 2022) + +FEATURES: + +- Use `encoding` `TextMarshaler` & `TextUnmarshaler` instead of JSON equivalents ([#95](https://github.com/hashicorp/go-version/pull/95)) +- Add JSON handlers to allow parsing from/to JSON ([#93](https://github.com/hashicorp/go-version/pull/93)) + +# 1.4.0 (January 5, 2022) + +FEATURES: + + - Introduce `MustConstraints()` ([#87](https://github.com/hashicorp/go-version/pull/87)) + - `Constraints`: Introduce `Equals()` and `sort.Interface` methods ([#88](https://github.com/hashicorp/go-version/pull/88)) + +# 1.3.0 (March 31, 2021) + +Please note that CHANGELOG.md does not exist in the source code prior to this release. + +FEATURES: + - Add `Core` function to return a version without prerelease or metadata ([#85](https://github.com/hashicorp/go-version/pull/85)) + +# 1.2.1 (June 17, 2020) + +BUG FIXES: + - Prevent `Version.Equal` method from panicking on `nil` encounter ([#73](https://github.com/hashicorp/go-version/pull/73)) + +# 1.2.0 (April 23, 2019) + +FEATURES: + - Add `GreaterThanOrEqual` and `LessThanOrEqual` helper methods ([#53](https://github.com/hashicorp/go-version/pull/53)) + +# 1.1.0 (Jan 07, 2019) + +FEATURES: + - Add `NewSemver` constructor ([#45](https://github.com/hashicorp/go-version/pull/45)) + +# 1.0.0 (August 24, 2018) + +Initial release. diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE new file mode 100644 index 000000000..c33dcc7c9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/LICENSE @@ -0,0 +1,354 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md new file mode 100644 index 000000000..4d2505090 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/README.md @@ -0,0 +1,66 @@ +# Versioning Library for Go +[![Build Status](https://circleci.com/gh/hashicorp/go-version/tree/main.svg?style=svg)](https://circleci.com/gh/hashicorp/go-version/tree/main) +[![GoDoc](https://godoc.org/github.com/hashicorp/go-version?status.svg)](https://godoc.org/github.com/hashicorp/go-version) + +go-version is a library for parsing versions and version constraints, +and verifying versions against a set of constraints. go-version +can sort a collection of versions properly, handles prerelease/beta +versions, can increment versions, etc. + +Versions used with go-version must follow [SemVer](http://semver.org/). + +## Installation and Usage + +Package documentation can be found on +[GoDoc](http://godoc.org/github.com/hashicorp/go-version). + +Installation can be done with a normal `go get`: + +``` +$ go get github.com/hashicorp/go-version +``` + +#### Version Parsing and Comparison + +```go +v1, err := version.NewVersion("1.2") +v2, err := version.NewVersion("1.5+metadata") + +// Comparison example. There is also GreaterThan, Equal, and just +// a simple Compare that returns an int allowing easy >=, <=, etc. +if v1.LessThan(v2) { + fmt.Printf("%s is less than %s", v1, v2) +} +``` + +#### Version Constraints + +```go +v1, err := version.NewVersion("1.2") + +// Constraints example. +constraints, err := version.NewConstraint(">= 1.0, < 1.4") +if constraints.Check(v1) { + fmt.Printf("%s satisfies constraints %s", v1, constraints) +} +``` + +#### Version Sorting + +```go +versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} +versions := make([]*version.Version, len(versionsRaw)) +for i, raw := range versionsRaw { + v, _ := version.NewVersion(raw) + versions[i] = v +} + +// After this, the versions are properly sorted +sort.Sort(version.Collection(versions)) +``` + +## Issues and Contributing + +If you find an issue with this library, please report an issue. If you'd +like, we welcome any contributions. Fork this library and submit a pull +request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go new file mode 100644 index 000000000..da5d1aca1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/constraint.go @@ -0,0 +1,296 @@ +package version + +import ( + "fmt" + "reflect" + "regexp" + "sort" + "strings" +) + +// Constraint represents a single constraint for a version, such as +// ">= 1.0". +type Constraint struct { + f constraintFunc + op operator + check *Version + original string +} + +func (c *Constraint) Equals(con *Constraint) bool { + return c.op == con.op && c.check.Equal(con.check) +} + +// Constraints is a slice of constraints. We make a custom type so that +// we can add methods to it. +type Constraints []*Constraint + +type constraintFunc func(v, c *Version) bool + +var constraintOperators map[string]constraintOperation + +type constraintOperation struct { + op operator + f constraintFunc +} + +var constraintRegexp *regexp.Regexp + +func init() { + constraintOperators = map[string]constraintOperation{ + "": {op: equal, f: constraintEqual}, + "=": {op: equal, f: constraintEqual}, + "!=": {op: notEqual, f: constraintNotEqual}, + ">": {op: greaterThan, f: constraintGreaterThan}, + "<": {op: lessThan, f: constraintLessThan}, + ">=": {op: greaterThanEqual, f: constraintGreaterThanEqual}, + "<=": {op: lessThanEqual, f: constraintLessThanEqual}, + "~>": {op: pessimistic, f: constraintPessimistic}, + } + + ops := make([]string, 0, len(constraintOperators)) + for k := range constraintOperators { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegexp = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + VersionRegexpRaw)) +} + +// NewConstraint will parse one or more constraints from the given +// constraint string. The string must be a comma-separated list of +// constraints. +func NewConstraint(v string) (Constraints, error) { + vs := strings.Split(v, ",") + result := make([]*Constraint, len(vs)) + for i, single := range vs { + c, err := parseSingle(single) + if err != nil { + return nil, err + } + + result[i] = c + } + + return Constraints(result), nil +} + +// MustConstraints is a helper that wraps a call to a function +// returning (Constraints, error) and panics if error is non-nil. +func MustConstraints(c Constraints, err error) Constraints { + if err != nil { + panic(err) + } + + return c +} + +// Check tests if a version satisfies all the constraints. +func (cs Constraints) Check(v *Version) bool { + for _, c := range cs { + if !c.Check(v) { + return false + } + } + + return true +} + +// Equals compares Constraints with other Constraints +// for equality. This may not represent logical equivalence +// of compared constraints. +// e.g. even though '>0.1,>0.2' is logically equivalent +// to '>0.2' it is *NOT* treated as equal. +// +// Missing operator is treated as equal to '=', whitespaces +// are ignored and constraints are sorted before comaparison. +func (cs Constraints) Equals(c Constraints) bool { + if len(cs) != len(c) { + return false + } + + // make copies to retain order of the original slices + left := make(Constraints, len(cs)) + copy(left, cs) + sort.Stable(left) + right := make(Constraints, len(c)) + copy(right, c) + sort.Stable(right) + + // compare sorted slices + for i, con := range left { + if !con.Equals(right[i]) { + return false + } + } + + return true +} + +func (cs Constraints) Len() int { + return len(cs) +} + +func (cs Constraints) Less(i, j int) bool { + if cs[i].op < cs[j].op { + return true + } + if cs[i].op > cs[j].op { + return false + } + + return cs[i].check.LessThan(cs[j].check) +} + +func (cs Constraints) Swap(i, j int) { + cs[i], cs[j] = cs[j], cs[i] +} + +// Returns the string format of the constraints +func (cs Constraints) String() string { + csStr := make([]string, len(cs)) + for i, c := range cs { + csStr[i] = c.String() + } + + return strings.Join(csStr, ",") +} + +// Check tests if a constraint is validated by the given version. +func (c *Constraint) Check(v *Version) bool { + return c.f(v, c.check) +} + +// Prerelease returns true if the version underlying this constraint +// contains a prerelease field. +func (c *Constraint) Prerelease() bool { + return len(c.check.Prerelease()) > 0 +} + +func (c *Constraint) String() string { + return c.original +} + +func parseSingle(v string) (*Constraint, error) { + matches := constraintRegexp.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed constraint: %s", v) + } + + check, err := NewVersion(matches[2]) + if err != nil { + return nil, err + } + + cop := constraintOperators[matches[1]] + + return &Constraint{ + f: cop.f, + op: cop.op, + check: check, + original: v, + }, nil +} + +func prereleaseCheck(v, c *Version) bool { + switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { + case cPre && vPre: + // A constraint with a pre-release can only match a pre-release version + // with the same base segments. + return reflect.DeepEqual(c.Segments64(), v.Segments64()) + + case !cPre && vPre: + // A constraint without a pre-release can only match a version without a + // pre-release. + return false + + case cPre && !vPre: + // OK, except with the pessimistic operator + case !cPre && !vPre: + // OK + } + return true +} + +//------------------------------------------------------------------- +// Constraint functions +//------------------------------------------------------------------- + +type operator rune + +const ( + equal operator = '=' + notEqual operator = '≠' + greaterThan operator = '>' + lessThan operator = '<' + greaterThanEqual operator = '≥' + lessThanEqual operator = '≤' + pessimistic operator = '~' +) + +func constraintEqual(v, c *Version) bool { + return v.Equal(c) +} + +func constraintNotEqual(v, c *Version) bool { + return !v.Equal(c) +} + +func constraintGreaterThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == 1 +} + +func constraintLessThan(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) == -1 +} + +func constraintGreaterThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) >= 0 +} + +func constraintLessThanEqual(v, c *Version) bool { + return prereleaseCheck(v, c) && v.Compare(c) <= 0 +} + +func constraintPessimistic(v, c *Version) bool { + // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases + if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { + return false + } + + // If the version being checked is naturally less than the constraint, then there + // is no way for the version to be valid against the constraint + if v.LessThan(c) { + return false + } + // We'll use this more than once, so grab the length now so it's a little cleaner + // to write the later checks + cs := len(c.segments) + + // If the version being checked has less specificity than the constraint, then there + // is no way for the version to be valid against the constraint + if cs > len(v.segments) { + return false + } + + // Check the segments in the constraint against those in the version. If the version + // being checked, at any point, does not have the same values in each index of the + // constraints segments, then it cannot be valid against the constraint. + for i := 0; i < c.si-1; i++ { + if v.segments[i] != c.segments[i] { + return false + } + } + + // Check the last part of the segment in the constraint. If the version segment at + // this index is less than the constraints segment at this index, then it cannot + // be valid against the constraint + if c.segments[cs-1] > v.segments[cs-1] { + return false + } + + // If nothing has rejected the version by now, it's valid + return true +} diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go new file mode 100644 index 000000000..e87df6990 --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version.go @@ -0,0 +1,407 @@ +package version + +import ( + "bytes" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// The compiled regular expression used to test the validity of a version. +var ( + versionRegexp *regexp.Regexp + semverRegexp *regexp.Regexp +) + +// The raw regular expression string used for testing the validity +// of a version. +const ( + VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` + + // SemverRegexpRaw requires a separator between version and prerelease + SemverRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + + `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + + `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + + `?` +) + +// Version represents a single version. +type Version struct { + metadata string + pre string + segments []int64 + si int + original string +} + +func init() { + versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") + semverRegexp = regexp.MustCompile("^" + SemverRegexpRaw + "$") +} + +// NewVersion parses the given version and returns a new +// Version. +func NewVersion(v string) (*Version, error) { + return newVersion(v, versionRegexp) +} + +// NewSemver parses the given version and returns a new +// Version that adheres strictly to SemVer specs +// https://semver.org/ +func NewSemver(v string) (*Version, error) { + return newVersion(v, semverRegexp) +} + +func newVersion(v string, pattern *regexp.Regexp) (*Version, error) { + matches := pattern.FindStringSubmatch(v) + if matches == nil { + return nil, fmt.Errorf("Malformed version: %s", v) + } + segmentsStr := strings.Split(matches[1], ".") + segments := make([]int64, len(segmentsStr)) + for i, str := range segmentsStr { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return nil, fmt.Errorf( + "Error parsing version: %s", err) + } + + segments[i] = val + } + + // Even though we could support more than three segments, if we + // got less than three, pad it with 0s. This is to cover the basic + // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum + for i := len(segments); i < 3; i++ { + segments = append(segments, 0) + } + + pre := matches[7] + if pre == "" { + pre = matches[4] + } + + return &Version{ + metadata: matches[10], + pre: pre, + segments: segments, + si: len(segmentsStr), + original: v, + }, nil +} + +// Must is a helper that wraps a call to a function returning (*Version, error) +// and panics if error is non-nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + + return v +} + +// Compare compares this version to another version. This +// returns -1, 0, or 1 if this version is smaller, equal, +// or larger than the other version, respectively. +// +// If you want boolean results, use the LessThan, Equal, +// GreaterThan, GreaterThanOrEqual or LessThanOrEqual methods. +func (v *Version) Compare(other *Version) int { + // A quick, efficient equality check + if v.String() == other.String() { + return 0 + } + + segmentsSelf := v.Segments64() + segmentsOther := other.Segments64() + + // If the segments are the same, we must compare on prerelease info + if reflect.DeepEqual(segmentsSelf, segmentsOther) { + preSelf := v.Prerelease() + preOther := other.Prerelease() + if preSelf == "" && preOther == "" { + return 0 + } + if preSelf == "" { + return 1 + } + if preOther == "" { + return -1 + } + + return comparePrereleases(preSelf, preOther) + } + + // Get the highest specificity (hS), or if they're equal, just use segmentSelf length + lenSelf := len(segmentsSelf) + lenOther := len(segmentsOther) + hS := lenSelf + if lenSelf < lenOther { + hS = lenOther + } + // Compare the segments + // Because a constraint could have more/less specificity than the version it's + // checking, we need to account for a lopsided or jagged comparison + for i := 0; i < hS; i++ { + if i > lenSelf-1 { + // This means Self had the lower specificity + // Check to see if the remaining segments in Other are all zeros + if !allZero(segmentsOther[i:]) { + // if not, it means that Other has to be greater than Self + return -1 + } + break + } else if i > lenOther-1 { + // this means Other had the lower specificity + // Check to see if the remaining segments in Self are all zeros - + if !allZero(segmentsSelf[i:]) { + //if not, it means that Self has to be greater than Other + return 1 + } + break + } + lhs := segmentsSelf[i] + rhs := segmentsOther[i] + if lhs == rhs { + continue + } else if lhs < rhs { + return -1 + } + // Otherwis, rhs was > lhs, they're not equal + return 1 + } + + // if we got this far, they're equal + return 0 +} + +func allZero(segs []int64) bool { + for _, s := range segs { + if s != 0 { + return false + } + } + return true +} + +func comparePart(preSelf string, preOther string) int { + if preSelf == preOther { + return 0 + } + + var selfInt int64 + selfNumeric := true + selfInt, err := strconv.ParseInt(preSelf, 10, 64) + if err != nil { + selfNumeric = false + } + + var otherInt int64 + otherNumeric := true + otherInt, err = strconv.ParseInt(preOther, 10, 64) + if err != nil { + otherNumeric = false + } + + // if a part is empty, we use the other to decide + if preSelf == "" { + if otherNumeric { + return -1 + } + return 1 + } + + if preOther == "" { + if selfNumeric { + return 1 + } + return -1 + } + + if selfNumeric && !otherNumeric { + return -1 + } else if !selfNumeric && otherNumeric { + return 1 + } else if !selfNumeric && !otherNumeric && preSelf > preOther { + return 1 + } else if selfInt > otherInt { + return 1 + } + + return -1 +} + +func comparePrereleases(v string, other string) int { + // the same pre release! + if v == other { + return 0 + } + + // split both pre releases for analyse their parts + selfPreReleaseMeta := strings.Split(v, ".") + otherPreReleaseMeta := strings.Split(other, ".") + + selfPreReleaseLen := len(selfPreReleaseMeta) + otherPreReleaseLen := len(otherPreReleaseMeta) + + biggestLen := otherPreReleaseLen + if selfPreReleaseLen > otherPreReleaseLen { + biggestLen = selfPreReleaseLen + } + + // loop for parts to find the first difference + for i := 0; i < biggestLen; i = i + 1 { + partSelfPre := "" + if i < selfPreReleaseLen { + partSelfPre = selfPreReleaseMeta[i] + } + + partOtherPre := "" + if i < otherPreReleaseLen { + partOtherPre = otherPreReleaseMeta[i] + } + + compare := comparePart(partSelfPre, partOtherPre) + // if parts are equals, continue the loop + if compare != 0 { + return compare + } + } + + return 0 +} + +// Core returns a new version constructed from only the MAJOR.MINOR.PATCH +// segments of the version, without prerelease or metadata. +func (v *Version) Core() *Version { + segments := v.Segments64() + segmentsOnly := fmt.Sprintf("%d.%d.%d", segments[0], segments[1], segments[2]) + return Must(NewVersion(segmentsOnly)) +} + +// Equal tests if two versions are equal. +func (v *Version) Equal(o *Version) bool { + if v == nil || o == nil { + return v == o + } + + return v.Compare(o) == 0 +} + +// GreaterThan tests if this version is greater than another version. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// GreaterThanOrEqual tests if this version is greater than or equal to another version. +func (v *Version) GreaterThanOrEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + +// LessThan tests if this version is less than another version. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// LessThanOrEqual tests if this version is less than or equal to another version. +func (v *Version) LessThanOrEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + +// Metadata returns any metadata that was part of the version +// string. +// +// Metadata is anything that comes after the "+" in the version. +// For example, with "1.2.3+beta", the metadata is "beta". +func (v *Version) Metadata() string { + return v.metadata +} + +// Prerelease returns any prerelease data that is part of the version, +// or blank if there is no prerelease data. +// +// Prerelease information is anything that comes after the "-" in the +// version (but before any metadata). For example, with "1.2.3-beta", +// the prerelease information is "beta". +func (v *Version) Prerelease() string { + return v.pre +} + +// Segments returns the numeric segments of the version as a slice of ints. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments() []int { + segmentSlice := make([]int, len(v.segments)) + for i, v := range v.segments { + segmentSlice[i] = int(v) + } + return segmentSlice +} + +// Segments64 returns the numeric segments of the version as a slice of int64s. +// +// This excludes any metadata or pre-release information. For example, +// for a version "1.2.3-beta", segments will return a slice of +// 1, 2, 3. +func (v *Version) Segments64() []int64 { + result := make([]int64, len(v.segments)) + copy(result, v.segments) + return result +} + +// String returns the full version string included pre-release +// and metadata information. +// +// This value is rebuilt according to the parsed segments and other +// information. Therefore, ambiguities in the version string such as +// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and +// missing parts (1.0 => 1.0.0) will be made into a canonicalized form +// as shown in the parenthesized examples. +func (v *Version) String() string { + var buf bytes.Buffer + fmtParts := make([]string, len(v.segments)) + for i, s := range v.segments { + // We can ignore err here since we've pre-parsed the values in segments + str := strconv.FormatInt(s, 10) + fmtParts[i] = str + } + fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original parsed version as-is, including any +// potential whitespace, `v` prefix, etc. +func (v *Version) Original() string { + return v.original +} + +// UnmarshalText implements encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(b []byte) error { + temp, err := NewVersion(string(b)) + if err != nil { + return err + } + + *v = *temp + + return nil +} + +// MarshalText implements encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go new file mode 100644 index 000000000..cc888d43e --- /dev/null +++ b/vendor/github.com/hashicorp/go-version/version_collection.go @@ -0,0 +1,17 @@ +package version + +// Collection is a type that implements the sort.Interface interface +// so that versions can be sorted. +type Collection []*Version + +func (v Collection) Len() int { + return len(v) +} + +func (v Collection) Less(i, j int) bool { + return v[i].LessThan(v[j]) +} + +func (v Collection) Swap(i, j int) { + v[i], v[j] = v[j], v[i] +} diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 7e83f583c..c918f11d8 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,18 @@ This package provides various compression algorithms. # changelog +* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4) + * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887 + * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886 + * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892 + * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890 + * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891 + +* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3) + * fse: Fix max header size https://github.com/klauspost/compress/pull/881 + * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877 + * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883 + * Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2) * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 @@ -554,7 +566,7 @@ For direct deflate use, NewStatelessWriter and StatelessDeflate are available. S A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: -``` +```go // replace 'ioutil.Discard' with your output. gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) if err != nil { diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index de912e187..66d1657d2 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -212,7 +212,7 @@ func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error { // Should only be used after a start/reset. func (d *compressor) fillWindow(b []byte) { // Do not fill window if we are in store-only or huffman mode. - if d.level <= 0 { + if d.level <= 0 && d.level > -MinCustomWindowSize { return } if d.fast != nil { diff --git a/vendor/github.com/klauspost/compress/internal/race/norace.go b/vendor/github.com/klauspost/compress/internal/race/norace.go new file mode 100644 index 000000000..affbbbb59 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/norace.go @@ -0,0 +1,13 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !race + +package race + +func ReadSlice[T any](s []T) { +} + +func WriteSlice[T any](s []T) { +} diff --git a/vendor/github.com/klauspost/compress/internal/race/race.go b/vendor/github.com/klauspost/compress/internal/race/race.go new file mode 100644 index 000000000..f5e240dcd --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/race/race.go @@ -0,0 +1,26 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build race + +package race + +import ( + "runtime" + "unsafe" +) + +func ReadSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceReadRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} + +func WriteSlice[T any](s []T) { + if len(s) == 0 { + return + } + runtime.RaceWriteRange(unsafe.Pointer(&s[0]), len(s)*int(unsafe.Sizeof(s[0]))) +} diff --git a/vendor/github.com/klauspost/compress/s2/decode.go b/vendor/github.com/klauspost/compress/s2/decode.go index 6c7feafcc..264ffd0a9 100644 --- a/vendor/github.com/klauspost/compress/s2/decode.go +++ b/vendor/github.com/klauspost/compress/s2/decode.go @@ -10,6 +10,8 @@ import ( "errors" "fmt" "strconv" + + "github.com/klauspost/compress/internal/race" ) var ( @@ -63,6 +65,10 @@ func Decode(dst, src []byte) ([]byte, error) { } else { dst = make([]byte, dLen) } + + race.WriteSlice(dst) + race.ReadSlice(src[s:]) + if s2Decode(dst, src[s:]) != 0 { return nil, ErrCorrupt } diff --git a/vendor/github.com/klauspost/compress/s2/encode_amd64.go b/vendor/github.com/klauspost/compress/s2/encode_amd64.go index ebc332ad5..4f45206a4 100644 --- a/vendor/github.com/klauspost/compress/s2/encode_amd64.go +++ b/vendor/github.com/klauspost/compress/s2/encode_amd64.go @@ -3,6 +3,8 @@ package s2 +import "github.com/klauspost/compress/internal/race" + const hasAmd64Asm = true // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It @@ -14,6 +16,9 @@ const hasAmd64Asm = true // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlock(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 @@ -50,6 +55,9 @@ func encodeBlock(dst, src []byte) (d int) { // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetter(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 @@ -86,6 +94,9 @@ func encodeBlockBetter(dst, src []byte) (d int) { // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 @@ -121,6 +132,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) { // len(dst) >= MaxEncodedLen(len(src)) && // minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize func encodeBlockBetterSnappy(dst, src []byte) (d int) { + race.ReadSlice(src) + race.WriteSlice(dst) + const ( // Use 12 bit table when less than... limit12B = 16 << 10 diff --git a/vendor/github.com/klauspost/compress/s2/reader.go b/vendor/github.com/klauspost/compress/s2/reader.go index 2f01a3987..46ead58fe 100644 --- a/vendor/github.com/klauspost/compress/s2/reader.go +++ b/vendor/github.com/klauspost/compress/s2/reader.go @@ -104,12 +104,14 @@ func ReaderIgnoreStreamIdentifier() ReaderOption { // For each chunk with the ID, the callback is called with the content. // Any returned non-nil error will abort decompression. // Only one callback per ID is supported, latest sent will be used. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. func ReaderSkippableCB(id uint8, fn func(r io.Reader) error) ReaderOption { return func(r *Reader) error { if id < 0x80 || id > 0xfd { return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfd (inclusive)") } - r.skippableCB[id] = fn + r.skippableCB[id-0x80] = fn return nil } } @@ -128,7 +130,7 @@ type Reader struct { err error decoded []byte buf []byte - skippableCB [0x80]func(r io.Reader) error + skippableCB [0xff - 0x80]func(r io.Reader) error blockStart int64 // Uncompressed offset at start of current. index *Index @@ -201,7 +203,7 @@ func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { // The supplied slice does not need to be the size of the read. func (r *Reader) skippable(tmp []byte, n int, allowEOF bool, id uint8) (ok bool) { if id < 0x80 { - r.err = fmt.Errorf("interbal error: skippable id < 0x80") + r.err = fmt.Errorf("internal error: skippable id < 0x80") return false } if fn := r.skippableCB[id-0x80]; fn != nil { @@ -1048,15 +1050,17 @@ func (r *Reader) ReadByte() (byte, error) { } // SkippableCB will register a callback for chunks with the specified ID. -// ID must be a Reserved skippable chunks ID, 0x80-0xfe (inclusive). +// ID must be a Reserved skippable chunks ID, 0x80-0xfd (inclusive). // For each chunk with the ID, the callback is called with the content. // Any returned non-nil error will abort decompression. // Only one callback per ID is supported, latest sent will be used. // Sending a nil function will disable previous callbacks. +// You can peek the stream, triggering the callback, by doing a Read with a 0 +// byte buffer. func (r *Reader) SkippableCB(id uint8, fn func(r io.Reader) error) error { - if id < 0x80 || id > chunkTypePadding { + if id < 0x80 || id >= chunkTypePadding { return fmt.Errorf("ReaderSkippableCB: Invalid id provided, must be 0x80-0xfe (inclusive)") } - r.skippableCB[id] = fn + r.skippableCB[id-0x80] = fn return nil } diff --git a/vendor/github.com/klauspost/compress/s2/s2.go b/vendor/github.com/klauspost/compress/s2/s2.go index dae3f731f..72bcb4945 100644 --- a/vendor/github.com/klauspost/compress/s2/s2.go +++ b/vendor/github.com/klauspost/compress/s2/s2.go @@ -37,6 +37,8 @@ package s2 import ( "bytes" "hash/crc32" + + "github.com/klauspost/compress/internal/race" ) /* @@ -112,6 +114,8 @@ var crcTable = crc32.MakeTable(crc32.Castagnoli) // crc implements the checksum specified in section 3 of // https://github.com/google/snappy/blob/master/framing_format.txt func crc(b []byte) uint32 { + race.ReadSlice(b) + c := crc32.Update(0, crcTable, b) return c>>15 | c<<17 + 0xa282ead8 } diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go index 089cd36d8..bba66a876 100644 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -13,6 +13,8 @@ import ( "io" "runtime" "sync" + + "github.com/klauspost/compress/internal/race" ) const ( @@ -271,7 +273,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { return fmt.Errorf("skippable block excessed maximum size") } var header [4]byte - chunkLen := 4 + len(data) + chunkLen := len(data) header[0] = id header[1] = uint8(chunkLen >> 0) header[2] = uint8(chunkLen >> 8) @@ -282,7 +284,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { if err = w.err(err); err != nil { return err } - if n != len(data) { + if n != len(b) { return w.err(io.ErrShortWrite) } w.written += int64(n) @@ -303,9 +305,7 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { if err := write(header[:]); err != nil { return err } - if err := write(data); err != nil { - return err - } + return write(data) } // Create output... @@ -385,6 +385,8 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { buf = buf[len(uncompressed):] // Get an output buffer. obuf := w.buffers.Get().([]byte)[:len(uncompressed)+obufHeaderLen] + race.WriteSlice(obuf) + output := make(chan result) // Queue output now, so we keep order. w.output <- output @@ -393,6 +395,8 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { } w.uncompWritten += int64(len(uncompressed)) go func() { + race.ReadSlice(uncompressed) + checksum := crc(uncompressed) // Set to uncompressed. diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 2263853fc..5a4412f90 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,4 @@ module github.com/klauspost/compress -go 1.16 +go 1.19 diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index f6a240970..6a5a2988b 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -95,42 +95,54 @@ type Header struct { // If there isn't enough input, io.ErrUnexpectedEOF is returned. // The FirstBlock.OK will indicate if enough information was available to decode the first block header. func (h *Header) Decode(in []byte) error { + _, err := h.DecodeAndStrip(in) + return err +} + +// DecodeAndStrip will decode the header from the beginning of the stream +// and on success return the remaining bytes. +// This will decode the frame header and the first block header if enough bytes are provided. +// It is recommended to provide at least HeaderMaxSize bytes. +// If the frame header cannot be read an error will be returned. +// If there isn't enough input, io.ErrUnexpectedEOF is returned. +// The FirstBlock.OK will indicate if enough information was available to decode the first block header. +func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { *h = Header{} if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 b, in := in[:4], in[4:] if string(b) != frameMagic { if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return ErrMagicMismatch + return nil, ErrMagicMismatch } if len(in) < 4 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } h.HeaderSize += 4 h.Skippable = true h.SkippableID = int(b[0] & 0xf) h.SkippableSize = binary.LittleEndian.Uint32(in) - return nil + return in[4:], nil } // Read Window_Descriptor // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } fhd, in := in[0], in[1:] h.HeaderSize++ h.SingleSegment = fhd&(1<<5) != 0 h.HasCheckSum = fhd&(1<<2) != 0 if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") + return nil, errors.New("reserved bit set on frame header") } if !h.SingleSegment { if len(in) < 1 { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } var wd byte wd, in = in[0], in[1:] @@ -148,7 +160,7 @@ func (h *Header) Decode(in []byte) error { size = 4 } if len(in) < int(size) { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:size], in[size:] h.HeaderSize += int(size) @@ -178,7 +190,7 @@ func (h *Header) Decode(in []byte) error { if fcsSize > 0 { h.HasFCS = true if len(in) < fcsSize { - return io.ErrUnexpectedEOF + return nil, io.ErrUnexpectedEOF } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) @@ -199,7 +211,7 @@ func (h *Header) Decode(in []byte) error { // Frame Header done, we will not fail from now on. if len(in) < 3 { - return nil + return in, nil } tmp := in[:3] bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) @@ -209,7 +221,7 @@ func (h *Header) Decode(in []byte) error { cSize := int(bh >> 3) switch blockType { case blockTypeReserved: - return nil + return in, nil case blockTypeRLE: h.FirstBlock.Compressed = true h.FirstBlock.DecompressedSize = cSize @@ -225,5 +237,25 @@ func (h *Header) Decode(in []byte) error { } h.FirstBlock.OK = true - return nil + return in, nil +} + +// AppendTo will append the encoded header to the dst slice. +// There is no error checking performed on the header values. +func (h *Header) AppendTo(dst []byte) ([]byte, error) { + if h.Skippable { + magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} + magic[0] |= byte(h.SkippableID & 0xf) + dst = append(dst, magic[:]...) + f := h.SkippableSize + return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil + } + f := frameHeader{ + ContentSize: h.FrameContentSize, + WindowSize: uint32(h.WindowSize), + SingleSegment: h.SingleSegment, + Checksum: h.HasCheckSum, + DictID: h.DictionaryID, + } + return f.appendTo(dst), nil } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index faaf81921..20671dcb9 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -94,7 +94,7 @@ func WithEncoderConcurrency(n int) EOption { // The value must be a power of two between MinWindowSize and MaxWindowSize. // A larger value will enable better compression but allocate more memory and, // for above-default values, take considerably longer. -// The default value is determined by the compression level. +// The default value is determined by the compression level and max 8MB. func WithWindowSize(n int) EOption { return func(o *encoderOptions) error { switch { @@ -232,9 +232,9 @@ func WithEncoderLevel(l EncoderLevel) EOption { case SpeedDefault: o.windowSize = 8 << 20 case SpeedBetterCompression: - o.windowSize = 16 << 20 + o.windowSize = 8 << 20 case SpeedBestCompression: - o.windowSize = 32 << 20 + o.windowSize = 8 << 20 } } if !o.customALEntropy { diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go index 2f5d5ed45..667ca0679 100644 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go @@ -76,7 +76,7 @@ func (f frameHeader) appendTo(dst []byte) []byte { if f.SingleSegment { dst = append(dst, uint8(f.ContentSize)) } - // Unless SingleSegment is set, framessizes < 256 are nto stored. + // Unless SingleSegment is set, framessizes < 256 are not stored. case 1: f.ContentSize -= 256 dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go index 332e51fe4..8adfebb02 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go @@ -20,10 +20,9 @@ func (s *fseDecoder) buildDtable() error { if v == -1 { s.dt[highThreshold].setAddBits(uint8(i)) highThreshold-- - symbolNext[i] = 1 - } else { - symbolNext[i] = uint16(v) + v = 1 } + symbolNext[i] = uint16(v) } } @@ -35,10 +34,12 @@ func (s *fseDecoder) buildDtable() error { for ss, v := range s.norm[:s.symbolLen] { for i := 0; i < int(v); i++ { s.dt[position].setAddBits(uint8(ss)) - position = (position + step) & tableMask - for position > highThreshold { + for { // lowprob area position = (position + step) & tableMask + if position <= highThreshold { + break + } } } } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 974b99725..5b06174b8 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -157,8 +157,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -177,8 +176,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -197,8 +195,7 @@ sequenceDecs_decode_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -459,8 +456,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R14 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -479,8 +475,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R14 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -499,8 +494,7 @@ sequenceDecs_decode_56_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R14 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R14*1), CX MOVQ DX, R15 MOVQ CX, BX @@ -772,11 +766,10 @@ sequenceDecs_decode_bmi2_fill_2_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -784,11 +777,10 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -796,10 +788,9 @@ sequenceDecs_decode_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1032,11 +1023,10 @@ sequenceDecs_decode_56_bmi2_fill_end: BZHIQ R14, R15, R15 // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R15, CX + SHRXQ R8, R15, R15 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -1044,11 +1034,10 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - MOVQ $0x00001010, R14 - BEXTRQ R14, DI, DI - ADDQ CX, DI + BZHIQ DI, R15, CX + SHRXQ DI, R15, R15 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -1056,10 +1045,9 @@ sequenceDecs_decode_56_bmi2_fill_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R15, CX - MOVQ $0x00001010, R14 - BEXTRQ R14, SI, SI - ADDQ CX, SI + BZHIQ SI, R15, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -1967,8 +1955,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -1987,8 +1974,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2007,8 +1993,7 @@ sequenceDecs_decodeSync_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -2514,11 +2499,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -2526,11 +2510,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -2538,10 +2521,9 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX @@ -3055,8 +3037,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Literal Length State MOVBQZX DI, R13 - SHRQ $0x10, DI - MOVWQZX DI, DI + SHRL $0x10, DI LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3075,8 +3056,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Match Length State MOVBQZX R8, R13 - SHRQ $0x10, R8 - MOVWQZX R8, R8 + SHRL $0x10, R8 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3095,8 +3075,7 @@ sequenceDecs_decodeSync_safe_amd64_ll_update_zero: // Update Offset State MOVBQZX R9, R13 - SHRQ $0x10, R9 - MOVWQZX R9, R9 + SHRL $0x10, R9 LEAQ (BX)(R13*1), CX MOVQ DX, R14 MOVQ CX, BX @@ -3704,11 +3683,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: BZHIQ R13, R14, R14 // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, R8, R8 - ADDQ CX, R8 + BZHIQ R8, R14, CX + SHRXQ R8, R14, R14 + SHRL $0x10, R8 + ADDQ CX, R8 // Load ctx.ofTable MOVQ ctx+16(FP), CX @@ -3716,11 +3694,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(R8*8), R8 // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - MOVQ $0x00001010, R13 - BEXTRQ R13, DI, DI - ADDQ CX, DI + BZHIQ DI, R14, CX + SHRXQ DI, R14, R14 + SHRL $0x10, DI + ADDQ CX, DI // Load ctx.mlTable MOVQ ctx+16(FP), CX @@ -3728,10 +3705,9 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: MOVQ (CX)(DI*8), DI // Update Literal Length State - BZHIQ SI, R14, CX - MOVQ $0x00001010, R13 - BEXTRQ R13, SI, SI - ADDQ CX, SI + BZHIQ SI, R14, CX + SHRL $0x10, SI + ADDQ CX, SI // Load ctx.llTable MOVQ ctx+16(FP), CX diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index f0b359d8e..b832ac9a1 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -19,6 +19,7 @@ import ( "net/url" "os" "path/filepath" + "sort" "strings" "time" @@ -143,18 +144,21 @@ var ( ScrapeInterval: model.Duration(1 * time.Minute), ScrapeTimeout: model.Duration(10 * time.Second), EvaluationInterval: model.Duration(1 * time.Minute), + // When native histogram feature flag is enabled, ScrapeProtocols default + // changes to DefaultNativeHistogramScrapeProtocols. + ScrapeProtocols: DefaultScrapeProtocols, } // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ - // ScrapeTimeout and ScrapeInterval default to the configured - // globals. + // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. ScrapeClassicHistograms: false, MetricsPath: "/metrics", Scheme: "http", HonorLabels: false, HonorTimestamps: true, HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -260,7 +264,7 @@ func (c Config) String() string { return string(b) } -// ScrapeConfigs returns the scrape configurations. +// GetScrapeConfigs returns the scrape configurations. func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) @@ -385,6 +389,11 @@ type GlobalConfig struct { ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` // The default timeout when scraping targets. ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // The protocols to negotiate during a scrape. It tells clients what + // protocol are accepted by Prometheus and with what weight (most wanted is first). + // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, + // OpenMetricsText1.0.0, PrometheusText0.0.4. + ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` // How frequently to evaluate rules by default. EvaluationInterval model.Duration `yaml:"evaluation_interval,omitempty"` // File to which PromQL queries are logged. @@ -414,6 +423,68 @@ type GlobalConfig struct { KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` } +// ScrapeProtocol represents supported protocol for scraping metrics. +type ScrapeProtocol string + +// Validate returns error if given scrape protocol is not supported. +func (s ScrapeProtocol) Validate() error { + if _, ok := ScrapeProtocolsHeaders[s]; !ok { + return fmt.Errorf("unknown scrape protocol %v, supported: %v", + s, func() (ret []string) { + for k := range ScrapeProtocolsHeaders { + ret = append(ret, string(k)) + } + sort.Strings(ret) + return ret + }()) + } + return nil +} + +var ( + PrometheusProto ScrapeProtocol = "PrometheusProto" + PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" + OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" + OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" + + ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ + PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + PrometheusText0_0_4: "text/plain;version=0.0.4", + OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", + OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", + } + + DefaultScrapeProtocols = []ScrapeProtocol{ + OpenMetricsText1_0_0, + OpenMetricsText0_0_1, + PrometheusText0_0_4, + } + DefaultNativeHistogramScrapeProtocols = []ScrapeProtocol{ + PrometheusProto, + OpenMetricsText1_0_0, + OpenMetricsText0_0_1, + PrometheusText0_0_4, + } +) + +// validateAcceptScrapeProtocols return errors if we see problems with accept scrape protocols option. +func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error { + if len(sps) == 0 { + return errors.New("scrape_protocols cannot be empty") + } + dups := map[string]struct{}{} + for _, sp := range sps { + if _, ok := dups[strings.ToLower(string(sp))]; ok { + return fmt.Errorf("duplicated protocol in scrape_protocols, got %v", sps) + } + if err := sp.Validate(); err != nil { + return fmt.Errorf("scrape_protocols: %w", err) + } + dups[strings.ToLower(string(sp))] = struct{}{} + } + return nil +} + // SetDirectory joins any relative file paths with dir. func (c *GlobalConfig) SetDirectory(dir string) { c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile) @@ -459,6 +530,14 @@ func (c *GlobalConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { if gc.EvaluationInterval == 0 { gc.EvaluationInterval = DefaultGlobalConfig.EvaluationInterval } + + if gc.ScrapeProtocols == nil { + gc.ScrapeProtocols = DefaultGlobalConfig.ScrapeProtocols + } + if err := validateAcceptScrapeProtocols(gc.ScrapeProtocols); err != nil { + return fmt.Errorf("%w for global config", err) + } + *c = *gc return nil } @@ -469,7 +548,8 @@ func (c *GlobalConfig) isZero() bool { c.ScrapeInterval == 0 && c.ScrapeTimeout == 0 && c.EvaluationInterval == 0 && - c.QueryLogFile == "" + c.QueryLogFile == "" && + c.ScrapeProtocols == nil } type ScrapeConfigs struct { @@ -492,12 +572,19 @@ type ScrapeConfig struct { ScrapeInterval model.Duration `yaml:"scrape_interval,omitempty"` // The timeout for scraping targets of this config. ScrapeTimeout model.Duration `yaml:"scrape_timeout,omitempty"` + // The protocols to negotiate during a scrape. It tells clients what + // protocol are accepted by Prometheus and with what preference (most wanted is first). + // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, + // OpenMetricsText1.0.0, PrometheusText0.0.4. + ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` // Whether to scrape a classic histogram that is also exposed as a native histogram. ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. Scheme string `yaml:"scheme,omitempty"` + // Indicator whether to request compressed response from the target. + EnableCompression bool `yaml:"enable_compression"` // An uncompressed response body larger than this many bytes will cause the // scrape to fail. 0 means no limit. BodySizeLimit units.Base2Bytes `yaml:"body_size_limit,omitempty"` @@ -579,6 +666,7 @@ func (c *ScrapeConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return nil } +// Validate validates scrape config, but also fills relevant default values from global config if needed. func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c == nil { return errors.New("empty or null scrape config section") @@ -620,6 +708,13 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { c.KeepDroppedTargets = globalConfig.KeepDroppedTargets } + if c.ScrapeProtocols == nil { + c.ScrapeProtocols = globalConfig.ScrapeProtocols + } + if err := validateAcceptScrapeProtocols(c.ScrapeProtocols); err != nil { + return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) + } + return nil } diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 4d6027691..86439d2c9 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -92,7 +92,7 @@ type Provider struct { newSubs map[string]struct{} } -// Discoverer return the Discoverer of the provider +// Discoverer return the Discoverer of the provider. func (p *Provider) Discoverer() Discoverer { return p.d } diff --git a/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go b/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go index 2e39cf689..08f55374e 100644 --- a/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go +++ b/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go @@ -48,3 +48,18 @@ func (e Exemplar) Equals(e2 Exemplar) bool { return e.Value == e2.Value } + +// Sort first by timestamp, then value, then labels. +func Compare(a, b Exemplar) int { + if a.Ts < b.Ts { + return -1 + } else if a.Ts > b.Ts { + return 1 + } + if a.Value < b.Value { + return -1 + } else if a.Value > b.Value { + return 1 + } + return labels.Compare(a.Labels, b.Labels) +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index 41873278c..e090c2be2 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -94,8 +94,8 @@ func (h *FloatHistogram) CopyToSchema(targetSchema int32) *FloatHistogram { Sum: h.Sum, } - c.PositiveSpans, c.PositiveBuckets = mergeToSchema(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema) - c.NegativeSpans, c.NegativeBuckets = mergeToSchema(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema) + c.PositiveSpans, c.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, false) + c.NegativeSpans, c.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, false) return &c } @@ -231,11 +231,8 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { // resulting histogram might have buckets with a population of zero or directly // adjacent spans (offset=0). To normalize those, call the Compact method. // -// The method reconciles differences in the zero threshold and in the schema, -// but the schema of the other histogram must be ≥ the schema of the receiving -// histogram (i.e. must have an equal or higher resolution). This means that the -// schema of the receiving histogram won't change. Its zero threshold, however, -// will change if needed. The other histogram will not be modified in any case. +// The method reconciles differences in the zero threshold and in the schema, and +// changes them if needed. The other histogram will not be modified in any case. // // This method returns a pointer to the receiving histogram for convenience. func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { @@ -268,17 +265,32 @@ func (h *FloatHistogram) Add(other *FloatHistogram) *FloatHistogram { h.Count += other.Count h.Sum += other.Sum - otherPositiveSpans := other.PositiveSpans - otherPositiveBuckets := other.PositiveBuckets - otherNegativeSpans := other.NegativeSpans - otherNegativeBuckets := other.NegativeBuckets - if other.Schema != h.Schema { - otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) - otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) + var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, false, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + return h } @@ -289,17 +301,32 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { h.Count -= other.Count h.Sum -= other.Sum - otherPositiveSpans := other.PositiveSpans - otherPositiveBuckets := other.PositiveBuckets - otherNegativeSpans := other.NegativeSpans - otherNegativeBuckets := other.NegativeBuckets - if other.Schema != h.Schema { - otherPositiveSpans, otherPositiveBuckets = mergeToSchema(other.PositiveSpans, other.PositiveBuckets, other.Schema, h.Schema) - otherNegativeSpans, otherNegativeBuckets = mergeToSchema(other.NegativeSpans, other.NegativeBuckets, other.Schema, h.Schema) + var ( + hPositiveSpans = h.PositiveSpans + hPositiveBuckets = h.PositiveBuckets + hNegativeSpans = h.NegativeSpans + hNegativeBuckets = h.NegativeBuckets + + otherPositiveSpans = other.PositiveSpans + otherPositiveBuckets = other.PositiveBuckets + otherNegativeSpans = other.NegativeSpans + otherNegativeBuckets = other.NegativeBuckets + ) + + switch { + case other.Schema < h.Schema: + hPositiveSpans, hPositiveBuckets = reduceResolution(hPositiveSpans, hPositiveBuckets, h.Schema, other.Schema, false, true) + hNegativeSpans, hNegativeBuckets = reduceResolution(hNegativeSpans, hNegativeBuckets, h.Schema, other.Schema, false, true) + h.Schema = other.Schema + + case other.Schema > h.Schema: + otherPositiveSpans, otherPositiveBuckets = reduceResolution(otherPositiveSpans, otherPositiveBuckets, other.Schema, h.Schema, false, false) + otherNegativeSpans, otherNegativeBuckets = reduceResolution(otherNegativeSpans, otherNegativeBuckets, other.Schema, h.Schema, false, false) } - h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.PositiveSpans, h.PositiveBuckets, otherPositiveSpans, otherPositiveBuckets) - h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, h.NegativeSpans, h.NegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + h.PositiveSpans, h.PositiveBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hPositiveSpans, hPositiveBuckets, otherPositiveSpans, otherPositiveBuckets) + h.NegativeSpans, h.NegativeBuckets = addBuckets(h.Schema, h.ZeroThreshold, true, hNegativeSpans, hNegativeBuckets, otherNegativeSpans, otherNegativeBuckets) + return h } @@ -307,13 +334,17 @@ func (h *FloatHistogram) Sub(other *FloatHistogram) *FloatHistogram { // Exact match is when there are no new buckets (even empty) and no missing buckets, // and all the bucket values match. Spans can have different empty length spans in between, // but they must represent the same bucket layout to match. +// Sum, Count, ZeroCount and bucket values are compared based on their bit patterns +// because this method is about data equality rather than mathematical equality. func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { if h2 == nil { return false } if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || - h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + math.Float64bits(h.ZeroCount) != math.Float64bits(h2.ZeroCount) || + math.Float64bits(h.Count) != math.Float64bits(h2.Count) || + math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { return false } @@ -324,16 +355,44 @@ func (h *FloatHistogram) Equals(h2 *FloatHistogram) bool { return false } - if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + if !floatBucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { return false } - if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + if !floatBucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { return false } return true } +// Size returns the total size of the FloatHistogram, which includes the size of the pointer +// to FloatHistogram, all its fields, and all elements contained in slices. +// NOTE: this is only valid for 64 bit architectures. +func (h *FloatHistogram) Size() int { + // Size of each slice separately. + posSpanSize := len(h.PositiveSpans) * 8 // 8 bytes (int32 + uint32). + negSpanSize := len(h.NegativeSpans) * 8 // 8 bytes (int32 + uint32). + posBucketSize := len(h.PositiveBuckets) * 8 // 8 bytes (float64). + negBucketSize := len(h.NegativeBuckets) * 8 // 8 bytes (float64). + + // Total size of the struct. + + // fh is 8 bytes. + // fh.CounterResetHint is 4 bytes (1 byte bool + 3 bytes padding). + // fh.Schema is 4 bytes. + // fh.ZeroThreshold is 8 bytes. + // fh.ZeroCount is 8 bytes. + // fh.Count is 8 bytes. + // fh.Sum is 8 bytes. + // fh.PositiveSpans is 24 bytes. + // fh.NegativeSpans is 24 bytes. + // fh.PositiveBuckets is 24 bytes. + // fh.NegativeBuckets is 24 bytes. + structSize := 144 + + return structSize + posSpanSize + negSpanSize + posBucketSize + negBucketSize +} + // Compact eliminates empty buckets at the beginning and end of each span, then // merges spans that are consecutive or at most maxEmptyBuckets apart, and // finally splits spans that contain more consecutive empty buckets than @@ -434,25 +493,25 @@ func (h *FloatHistogram) DetectReset(previous *FloatHistogram) bool { } currIt := h.floatBucketIterator(true, h.ZeroThreshold, h.Schema) prevIt := previous.floatBucketIterator(true, h.ZeroThreshold, h.Schema) - if detectReset(currIt, prevIt) { + if detectReset(&currIt, &prevIt) { return true } currIt = h.floatBucketIterator(false, h.ZeroThreshold, h.Schema) prevIt = previous.floatBucketIterator(false, h.ZeroThreshold, h.Schema) - return detectReset(currIt, prevIt) + return detectReset(&currIt, &prevIt) } -func detectReset(currIt, prevIt BucketIterator[float64]) bool { +func detectReset(currIt, prevIt *floatBucketIterator) bool { if !prevIt.Next() { return false // If no buckets in previous histogram, nothing can be reset. } - prevBucket := prevIt.At() + prevBucket := prevIt.strippedAt() if !currIt.Next() { // No bucket in current, but at least one in previous // histogram. Check if any of those are non-zero, in which case // this is a reset. for { - if prevBucket.Count != 0 { + if prevBucket.count != 0 { return true } if !prevIt.Next() { @@ -460,10 +519,10 @@ func detectReset(currIt, prevIt BucketIterator[float64]) bool { } } } - currBucket := currIt.At() + currBucket := currIt.strippedAt() for { // Forward currIt until we find the bucket corresponding to prevBucket. - for currBucket.Index < prevBucket.Index { + for currBucket.index < prevBucket.index { if !currIt.Next() { // Reached end of currIt early, therefore // previous histogram has a bucket that the @@ -471,7 +530,7 @@ func detectReset(currIt, prevIt BucketIterator[float64]) bool { // remaining buckets in the previous histogram // are unpopulated, this is a reset. for { - if prevBucket.Count != 0 { + if prevBucket.count != 0 { return true } if !prevIt.Next() { @@ -479,18 +538,18 @@ func detectReset(currIt, prevIt BucketIterator[float64]) bool { } } } - currBucket = currIt.At() + currBucket = currIt.strippedAt() } - if currBucket.Index > prevBucket.Index { + if currBucket.index > prevBucket.index { // Previous histogram has a bucket the current one does // not have. If it's populated, it's a reset. - if prevBucket.Count != 0 { + if prevBucket.count != 0 { return true } } else { // We have reached corresponding buckets in both iterators. // We can finally compare the counts. - if currBucket.Count < prevBucket.Count { + if currBucket.count < prevBucket.count { return true } } @@ -498,35 +557,39 @@ func detectReset(currIt, prevIt BucketIterator[float64]) bool { // Reached end of prevIt without finding offending buckets. return false } - prevBucket = prevIt.At() + prevBucket = prevIt.strippedAt() } } // PositiveBucketIterator returns a BucketIterator to iterate over all positive // buckets in ascending order (starting next to the zero bucket and going up). func (h *FloatHistogram) PositiveBucketIterator() BucketIterator[float64] { - return h.floatBucketIterator(true, 0, h.Schema) + it := h.floatBucketIterator(true, 0, h.Schema) + return &it } // NegativeBucketIterator returns a BucketIterator to iterate over all negative // buckets in descending order (starting next to the zero bucket and going // down). func (h *FloatHistogram) NegativeBucketIterator() BucketIterator[float64] { - return h.floatBucketIterator(false, 0, h.Schema) + it := h.floatBucketIterator(false, 0, h.Schema) + return &it } // PositiveReverseBucketIterator returns a BucketIterator to iterate over all // positive buckets in descending order (starting at the highest bucket and // going down towards the zero bucket). func (h *FloatHistogram) PositiveReverseBucketIterator() BucketIterator[float64] { - return newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + it := newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + return &it } // NegativeReverseBucketIterator returns a BucketIterator to iterate over all // negative buckets in ascending order (starting at the lowest bucket and going // up towards the zero bucket). func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] { - return newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + it := newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + return &it } // AllBucketIterator returns a BucketIterator to iterate over all negative, @@ -537,8 +600,8 @@ func (h *FloatHistogram) NegativeReverseBucketIterator() BucketIterator[float64] func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ h: h, - leftIter: h.NegativeReverseBucketIterator(), - rightIter: h.PositiveBucketIterator(), + leftIter: newReverseFloatBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false), + rightIter: h.floatBucketIterator(true, 0, h.Schema), state: -1, } } @@ -551,12 +614,37 @@ func (h *FloatHistogram) AllBucketIterator() BucketIterator[float64] { func (h *FloatHistogram) AllReverseBucketIterator() BucketIterator[float64] { return &allFloatBucketIterator{ h: h, - leftIter: h.PositiveReverseBucketIterator(), - rightIter: h.NegativeBucketIterator(), + leftIter: newReverseFloatBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true), + rightIter: h.floatBucketIterator(false, 0, h.Schema), state: -1, } } +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. +// We do not check for h.Count being at least as large as the sum of the +// counts in the buckets because floating point precision issues can +// create false positives here. +func (h *FloatHistogram) Validate() error { + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + var nCount, pCount float64 + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) + if err != nil { + return fmt.Errorf("positive side: %w", err) + } + + return nil +} + // zeroCountForLargerThreshold returns what the histogram's zero count would be // if the ZeroThreshold had the provided larger (or equal) value. If the // provided value is less than the histogram's ZeroThreshold, the method panics. @@ -683,17 +771,18 @@ func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { // targetSchema prior to iterating (without mutating FloatHistogram). func (h *FloatHistogram) floatBucketIterator( positive bool, absoluteStartValue float64, targetSchema int32, -) *floatBucketIterator { +) floatBucketIterator { if targetSchema > h.Schema { panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) } - i := &floatBucketIterator{ + i := floatBucketIterator{ baseBucketIterator: baseBucketIterator[float64, float64]{ schema: h.Schema, positive: positive, }, - targetSchema: targetSchema, - absoluteStartValue: absoluteStartValue, + targetSchema: targetSchema, + absoluteStartValue: absoluteStartValue, + boundReachedStartValue: absoluteStartValue == 0, } if positive { i.spans = h.PositiveSpans @@ -705,11 +794,11 @@ func (h *FloatHistogram) floatBucketIterator( return i } -// reverseFloatbucketiterator is a low-level constructor for reverse bucket iterators. +// reverseFloatBucketIterator is a low-level constructor for reverse bucket iterators. func newReverseFloatBucketIterator( spans []Span, buckets []float64, schema int32, positive bool, -) *reverseFloatBucketIterator { - r := &reverseFloatBucketIterator{ +) reverseFloatBucketIterator { + r := reverseFloatBucketIterator{ baseBucketIterator: baseBucketIterator[float64, float64]{ schema: schema, spans: spans, @@ -737,6 +826,8 @@ type floatBucketIterator struct { targetSchema int32 // targetSchema is the schema to merge to and must be ≤ schema. origIdx int32 // The bucket index within the original schema. absoluteStartValue float64 // Never return buckets with an upper bound ≤ this value. + + boundReachedStartValue bool // Has getBound reached absoluteStartValue already? } func (i *floatBucketIterator) At() Bucket[float64] { @@ -749,74 +840,92 @@ func (i *floatBucketIterator) Next() bool { return false } - // Copy all of these into local variables so that we can forward to the - // next bucket and then roll back if needed. - origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan - span := i.spans[spansIdx] - firstPass := true - i.currCount = 0 - -mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. - for { + if i.schema == i.targetSchema { + // Fast path for the common case. + span := i.spans[i.spansIdx] if i.bucketsIdx == 0 { // Seed origIdx for the first bucket. - origIdx = span.Offset + i.currIdx = span.Offset } else { - origIdx++ + i.currIdx++ } - for idxInSpan >= span.Length { + + for i.idxInSpan >= span.Length { // We have exhausted the current span and have to find a new // one. We even handle pathologic spans of length 0 here. - idxInSpan = 0 - spansIdx++ - if spansIdx >= len(i.spans) { - if firstPass { - return false + i.idxInSpan = 0 + i.spansIdx++ + if i.spansIdx >= len(i.spans) { + return false + } + span = i.spans[i.spansIdx] + i.currIdx += span.Offset + } + + i.currCount = i.buckets[i.bucketsIdx] + i.idxInSpan++ + i.bucketsIdx++ + } else { + // Copy all of these into local variables so that we can forward to the + // next bucket and then roll back if needed. + origIdx, spansIdx, idxInSpan := i.origIdx, i.spansIdx, i.idxInSpan + span := i.spans[spansIdx] + firstPass := true + i.currCount = 0 + + mergeLoop: // Merge together all buckets from the original schema that fall into one bucket in the targetSchema. + for { + if i.bucketsIdx == 0 { + // Seed origIdx for the first bucket. + origIdx = span.Offset + } else { + origIdx++ + } + for idxInSpan >= span.Length { + // We have exhausted the current span and have to find a new + // one. We even handle pathologic spans of length 0 here. + idxInSpan = 0 + spansIdx++ + if spansIdx >= len(i.spans) { + if firstPass { + return false + } + break mergeLoop } + span = i.spans[spansIdx] + origIdx += span.Offset + } + currIdx := targetIdx(origIdx, i.schema, i.targetSchema) + switch { + case firstPass: + i.currIdx = currIdx + firstPass = false + case currIdx != i.currIdx: + // Reached next bucket in targetSchema. + // Do not actually forward to the next bucket, but break out. + break mergeLoop + } + i.currCount += i.buckets[i.bucketsIdx] + idxInSpan++ + i.bucketsIdx++ + i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan + if i.schema == i.targetSchema { + // Don't need to test the next bucket for mergeability + // if we have no schema change anyway. break mergeLoop } - span = i.spans[spansIdx] - origIdx += span.Offset - } - currIdx := i.targetIdx(origIdx) - switch { - case firstPass: - i.currIdx = currIdx - firstPass = false - case currIdx != i.currIdx: - // Reached next bucket in targetSchema. - // Do not actually forward to the next bucket, but break out. - break mergeLoop - } - i.currCount += i.buckets[i.bucketsIdx] - idxInSpan++ - i.bucketsIdx++ - i.origIdx, i.spansIdx, i.idxInSpan = origIdx, spansIdx, idxInSpan - if i.schema == i.targetSchema { - // Don't need to test the next bucket for mergeability - // if we have no schema change anyway. - break mergeLoop } } + // Skip buckets before absoluteStartValue. // TODO(beorn7): Maybe do something more efficient than this recursive call. - if getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { + if !i.boundReachedStartValue && getBound(i.currIdx, i.targetSchema) <= i.absoluteStartValue { return i.Next() } + i.boundReachedStartValue = true return true } -// targetIdx returns the bucket index within i.targetSchema for the given bucket -// index within i.schema. -func (i *floatBucketIterator) targetIdx(idx int32) int32 { - if i.schema == i.targetSchema { - // Fast path for the common case. The below would yield the same - // result, just with more effort. - return idx - } - return ((idx - 1) >> (i.schema - i.targetSchema)) + 1 -} - type reverseFloatBucketIterator struct { baseBucketIterator[float64, float64] idxInSpan int32 // Changed from uint32 to allow negative values for exhaustion detection. @@ -843,8 +952,9 @@ func (i *reverseFloatBucketIterator) Next() bool { } type allFloatBucketIterator struct { - h *FloatHistogram - leftIter, rightIter BucketIterator[float64] + h *FloatHistogram + leftIter reverseFloatBucketIterator + rightIter floatBucketIterator // -1 means we are iterating negative buckets. // 0 means it is time for the zero bucket. // 1 means we are iterating positive buckets. @@ -910,69 +1020,6 @@ func targetIdx(idx, originSchema, targetSchema int32) int32 { return ((idx - 1) >> (originSchema - targetSchema)) + 1 } -// mergeToSchema is used to merge a FloatHistogram's Spans and Buckets (no matter if -// positive or negative) from the original schema to the target schema. -// The target schema must be smaller than the original schema. -func mergeToSchema(originSpans []Span, originBuckets []float64, originSchema, targetSchema int32) ([]Span, []float64) { - var ( - targetSpans []Span // The spans in the target schema. - targetBuckets []float64 // The buckets in the target schema. - bucketIdx int32 // The index of bucket in the origin schema. - lastTargetBucketIdx int32 // The index of the last added target bucket. - origBucketIdx int // The position of a bucket in originBuckets slice. - ) - - for _, span := range originSpans { - // Determine the index of the first bucket in this span. - bucketIdx += span.Offset - for j := 0; j < int(span.Length); j++ { - // Determine the index of the bucket in the target schema from the index in the original schema. - targetBucketIdx := targetIdx(bucketIdx, originSchema, targetSchema) - - switch { - case len(targetSpans) == 0: - // This is the first span in the targetSpans. - span := Span{ - Offset: targetBucketIdx, - Length: 1, - } - targetSpans = append(targetSpans, span) - targetBuckets = append(targetBuckets, originBuckets[0]) - lastTargetBucketIdx = targetBucketIdx - - case lastTargetBucketIdx == targetBucketIdx: - // The current bucket has to be merged into the same target bucket as the previous bucket. - targetBuckets[len(targetBuckets)-1] += originBuckets[origBucketIdx] - - case (lastTargetBucketIdx + 1) == targetBucketIdx: - // The current bucket has to go into a new target bucket, - // and that bucket is next to the previous target bucket, - // so we add it to the current target span. - targetSpans[len(targetSpans)-1].Length++ - targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) - lastTargetBucketIdx++ - - case (lastTargetBucketIdx + 1) < targetBucketIdx: - // The current bucket has to go into a new target bucket, - // and that bucket is separated by a gap from the previous target bucket, - // so we need to add a new target span. - span := Span{ - Offset: targetBucketIdx - lastTargetBucketIdx - 1, - Length: 1, - } - targetSpans = append(targetSpans, span) - targetBuckets = append(targetBuckets, originBuckets[origBucketIdx]) - lastTargetBucketIdx = targetBucketIdx - } - - bucketIdx++ - origBucketIdx++ - } - } - - return targetSpans, targetBuckets -} - // addBuckets adds the buckets described by spansB/bucketsB to the buckets described by spansA/bucketsA, // creating missing buckets in spansA/bucketsA as needed. // It returns the resulting spans/buckets (which must be used instead of the original spansA/bucketsA, @@ -986,8 +1033,8 @@ func addBuckets( spansB []Span, bucketsB []float64, ) ([]Span, []float64) { var ( - iSpan int = -1 - iBucket int = -1 + iSpan = -1 + iBucket = -1 iInSpan int32 indexA int32 indexB int32 @@ -1020,17 +1067,16 @@ func addBuckets( spansA[0].Length++ spansA[0].Offset-- goto nextLoop - } else { - spansA = append(spansA, Span{}) - copy(spansA[1:], spansA) - spansA[0] = Span{Offset: indexB, Length: 1} - if len(spansA) > 1 { - // Convert the absolute offset in the formerly - // first span to a relative offset. - spansA[1].Offset -= indexB + 1 - } - goto nextLoop } + spansA = append(spansA, Span{}) + copy(spansA[1:], spansA) + spansA[0] = Span{Offset: indexB, Length: 1} + if len(spansA) > 1 { + // Convert the absolute offset in the formerly + // first span to a relative offset. + spansA[1].Offset -= indexB + 1 + } + goto nextLoop } else if spansA[0].Offset == indexB { // Just add to first bucket. bucketsA[0] += bucketB @@ -1048,48 +1094,47 @@ func addBuckets( iInSpan += deltaIndex bucketsA[iBucket] += bucketB break - } else { - deltaIndex -= remainingInSpan - iBucket += int(remainingInSpan) - iSpan++ - if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { - // Bucket is in gap behind previous span (or there are no further spans). - bucketsA = append(bucketsA, 0) - copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) - bucketsA[iBucket] = bucketB - switch { - case deltaIndex == 0: - // Directly after previous span, extend previous span. - if iSpan < len(spansA) { - spansA[iSpan].Offset-- - } - iSpan-- - iInSpan = int32(spansA[iSpan].Length) - spansA[iSpan].Length++ - goto nextLoop - case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: - // Directly before next span, extend next span. - iInSpan = 0 + } + deltaIndex -= remainingInSpan + iBucket += int(remainingInSpan) + iSpan++ + if iSpan == len(spansA) || deltaIndex < spansA[iSpan].Offset { + // Bucket is in gap behind previous span (or there are no further spans). + bucketsA = append(bucketsA, 0) + copy(bucketsA[iBucket+1:], bucketsA[iBucket:]) + bucketsA[iBucket] = bucketB + switch { + case deltaIndex == 0: + // Directly after previous span, extend previous span. + if iSpan < len(spansA) { spansA[iSpan].Offset-- - spansA[iSpan].Length++ - goto nextLoop - default: - // No next span, or next span is not directly adjacent to new bucket. - // Add new span. - iInSpan = 0 - if iSpan < len(spansA) { - spansA[iSpan].Offset -= deltaIndex + 1 - } - spansA = append(spansA, Span{}) - copy(spansA[iSpan+1:], spansA[iSpan:]) - spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} - goto nextLoop } - } else { - // Try start of next span. - deltaIndex -= spansA[iSpan].Offset + iSpan-- + iInSpan = int32(spansA[iSpan].Length) + spansA[iSpan].Length++ + goto nextLoop + case iSpan < len(spansA) && deltaIndex == spansA[iSpan].Offset-1: + // Directly before next span, extend next span. iInSpan = 0 + spansA[iSpan].Offset-- + spansA[iSpan].Length++ + goto nextLoop + default: + // No next span, or next span is not directly adjacent to new bucket. + // Add new span. + iInSpan = 0 + if iSpan < len(spansA) { + spansA[iSpan].Offset -= deltaIndex + 1 + } + spansA = append(spansA, Span{}) + copy(spansA[iSpan+1:], spansA[iSpan:]) + spansA[iSpan] = Span{Length: 1, Offset: deltaIndex} + goto nextLoop } + } else { + // Try start of next span. + deltaIndex -= spansA[iSpan].Offset + iInSpan = 0 } } @@ -1102,3 +1147,29 @@ func addBuckets( return spansA, bucketsA } + +func floatBucketsMatch(b1, b2 []float64) bool { + if len(b1) != len(b2) { + return false + } + for i, b := range b1 { + if math.Float64bits(b) != math.Float64bits(b2[i]) { + return false + } + } + return true +} + +// ReduceResolution reduces the float histogram's spans, buckets into target schema. +// The target schema must be smaller than the current float histogram's schema. +func (h *FloatHistogram) ReduceResolution(targetSchema int32) *FloatHistogram { + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + + h.PositiveSpans, h.PositiveBuckets = reduceResolution(h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, false, true) + h.NegativeSpans, h.NegativeBuckets = reduceResolution(h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, false, true) + + h.Schema = targetSchema + return h +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go index dad54cb06..7e1cc4b60 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/generic.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/generic.go @@ -14,11 +14,20 @@ package histogram import ( + "errors" "fmt" "math" "strings" ) +var ( + ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") + ErrHistogramCountMismatch = errors.New("histogram's observation count should equal the number of observations found in the buckets (in absence of NaN)") + ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") + ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") + ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") +) + // BucketCount is a type constraint for the count in a bucket, which can be // float64 (for type FloatHistogram) or uint64 (for type Histogram). type BucketCount interface { @@ -31,7 +40,7 @@ type BucketCount interface { // absolute counts directly). Go type parameters don't allow type // specialization. Therefore, where special treatment of deltas between buckets // vs. absolute counts is important, this information has to be provided as a -// separate boolean parameter "deltaBuckets" +// separate boolean parameter "deltaBuckets". type InternalBucketCount interface { float64 | int64 } @@ -53,6 +62,13 @@ type Bucket[BC BucketCount] struct { Index int32 } +// strippedBucket is Bucket without bound values (which are expensive to calculate +// and not used in certain use cases). +type strippedBucket[BC BucketCount] struct { + count BC + index int32 +} + // String returns a string representation of a Bucket, using the usual // mathematical notation of '['/']' for inclusive bounds and '('/')' for // non-inclusive bounds. @@ -101,13 +117,12 @@ type baseBucketIterator[BC BucketCount, IBC InternalBucketCount] struct { currIdx int32 // The actual bucket index. } -func (b baseBucketIterator[BC, IBC]) At() Bucket[BC] { +func (b *baseBucketIterator[BC, IBC]) At() Bucket[BC] { return b.at(b.schema) } -// at is an internal version of the exported At to enable using a different -// schema. -func (b baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { +// at is an internal version of the exported At to enable using a different schema. +func (b *baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { bucket := Bucket[BC]{ Count: BC(b.currCount), Index: b.currIdx, @@ -124,6 +139,14 @@ func (b baseBucketIterator[BC, IBC]) at(schema int32) Bucket[BC] { return bucket } +// strippedAt returns current strippedBucket (which lacks bucket bounds but is cheaper to compute). +func (b *baseBucketIterator[BC, IBC]) strippedAt() strippedBucket[BC] { + return strippedBucket[BC]{ + count: BC(b.currCount), + index: b.currIdx, + } +} + // compactBuckets is a generic function used by both Histogram.Compact and // FloatHistogram.Compact. Set deltaBuckets to true if the provided buckets are // deltas. Set it to false if the buckets contain absolute counts. @@ -333,16 +356,41 @@ func compactBuckets[IBC InternalBucketCount](buckets []IBC, spans []Span, maxEmp return buckets, spans } -func bucketsMatch[IBC InternalBucketCount](b1, b2 []IBC) bool { - if len(b1) != len(b2) { - return false - } - for i, b := range b1 { - if b != b2[i] { - return false +func checkHistogramSpans(spans []Span, numBuckets int) error { + var spanBuckets int + for n, span := range spans { + if n > 0 && span.Offset < 0 { + return fmt.Errorf("span number %d with offset %d: %w", n+1, span.Offset, ErrHistogramSpanNegativeOffset) } + spanBuckets += int(span.Length) } - return true + if spanBuckets != numBuckets { + return fmt.Errorf("spans need %d buckets, have %d buckets: %w", spanBuckets, numBuckets, ErrHistogramSpansBucketsMismatch) + } + return nil +} + +func checkHistogramBuckets[BC BucketCount, IBC InternalBucketCount](buckets []IBC, count *BC, deltas bool) error { + if len(buckets) == 0 { + return nil + } + + var last IBC + for i := 0; i < len(buckets); i++ { + var c IBC + if deltas { + c = last + buckets[i] + } else { + c = buckets[i] + } + if c < 0 { + return fmt.Errorf("bucket number %d has observation count of %v: %w", i+1, c, ErrHistogramNegativeBucketCount) + } + last = c + *count += BC(c) + } + + return nil } func getBound(idx, schema int32) float64 { @@ -552,3 +600,106 @@ var exponentialBounds = [][]float64{ 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698, }, } + +// reduceResolution reduces the input spans, buckets in origin schema to the spans, buckets in target schema. +// The target schema must be smaller than the original schema. +// Set deltaBuckets to true if the provided buckets are +// deltas. Set it to false if the buckets contain absolute counts. +// Set inplace to true to reuse input slices and avoid allocations (otherwise +// new slices will be allocated for result). +func reduceResolution[IBC InternalBucketCount]( + originSpans []Span, + originBuckets []IBC, + originSchema, + targetSchema int32, + deltaBuckets bool, + inplace bool, +) ([]Span, []IBC) { + var ( + targetSpans []Span // The spans in the target schema. + targetBuckets []IBC // The bucket counts in the target schema. + bucketIdx int32 // The index of bucket in the origin schema. + bucketCountIdx int // The position of a bucket in origin bucket count slice `originBuckets`. + targetBucketIdx int32 // The index of bucket in the target schema. + lastBucketCount IBC // The last visited bucket's count in the origin schema. + lastTargetBucketIdx int32 // The index of the last added target bucket. + lastTargetBucketCount IBC + ) + + if inplace { + // Slice reuse is safe because when reducing the resolution, + // target slices don't grow faster than origin slices are being read. + targetSpans = originSpans[:0] + targetBuckets = originBuckets[:0] + } + + for _, span := range originSpans { + // Determine the index of the first bucket in this span. + bucketIdx += span.Offset + for j := 0; j < int(span.Length); j++ { + // Determine the index of the bucket in the target schema from the index in the original schema. + targetBucketIdx = targetIdx(bucketIdx, originSchema, targetSchema) + + switch { + case len(targetSpans) == 0: + // This is the first span in the targetSpans. + span := Span{ + Offset: targetBucketIdx, + Length: 1, + } + targetSpans = append(targetSpans, span) + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + lastTargetBucketIdx = targetBucketIdx + lastBucketCount = originBuckets[bucketCountIdx] + lastTargetBucketCount = originBuckets[bucketCountIdx] + + case lastTargetBucketIdx == targetBucketIdx: + // The current bucket has to be merged into the same target bucket as the previous bucket. + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets[len(targetBuckets)-1] += lastBucketCount + lastTargetBucketCount += lastBucketCount + } else { + targetBuckets[len(targetBuckets)-1] += originBuckets[bucketCountIdx] + } + + case (lastTargetBucketIdx + 1) == targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is next to the previous target bucket, + // so we add it to the current target span. + targetSpans[len(targetSpans)-1].Length++ + lastTargetBucketIdx++ + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + + case (lastTargetBucketIdx + 1) < targetBucketIdx: + // The current bucket has to go into a new target bucket, + // and that bucket is separated by a gap from the previous target bucket, + // so we need to add a new target span. + span := Span{ + Offset: targetBucketIdx - lastTargetBucketIdx - 1, + Length: 1, + } + targetSpans = append(targetSpans, span) + lastTargetBucketIdx = targetBucketIdx + if deltaBuckets { + lastBucketCount += originBuckets[bucketCountIdx] + targetBuckets = append(targetBuckets, lastBucketCount-lastTargetBucketCount) + lastTargetBucketCount = lastBucketCount + } else { + targetBuckets = append(targetBuckets, originBuckets[bucketCountIdx]) + } + } + + bucketIdx++ + bucketCountIdx++ + } + } + + return targetSpans, targetBuckets +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go index 6d425307c..fb0185a63 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go @@ -17,6 +17,8 @@ import ( "fmt" "math" "strings" + + "golang.org/x/exp/slices" ) // CounterResetHint contains the known information about a counter reset, @@ -148,13 +150,15 @@ func (h *Histogram) ZeroBucket() Bucket[uint64] { // PositiveBucketIterator returns a BucketIterator to iterate over all positive // buckets in ascending order (starting next to the zero bucket and going up). func (h *Histogram) PositiveBucketIterator() BucketIterator[uint64] { - return newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + it := newRegularBucketIterator(h.PositiveSpans, h.PositiveBuckets, h.Schema, true) + return &it } // NegativeBucketIterator returns a BucketIterator to iterate over all negative // buckets in descending order (starting next to the zero bucket and going down). func (h *Histogram) NegativeBucketIterator() BucketIterator[uint64] { - return newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + it := newRegularBucketIterator(h.NegativeSpans, h.NegativeBuckets, h.Schema, false) + return &it } // CumulativeBucketIterator returns a BucketIterator to iterate over a @@ -172,13 +176,16 @@ func (h *Histogram) CumulativeBucketIterator() BucketIterator[uint64] { // Exact match is when there are no new buckets (even empty) and no missing buckets, // and all the bucket values match. Spans can have different empty length spans in between, // but they must represent the same bucket layout to match. +// Sum is compared based on its bit pattern because this method +// is about data equality rather than mathematical equality. func (h *Histogram) Equals(h2 *Histogram) bool { if h2 == nil { return false } if h.Schema != h2.Schema || h.ZeroThreshold != h2.ZeroThreshold || - h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || h.Sum != h2.Sum { + h.ZeroCount != h2.ZeroCount || h.Count != h2.Count || + math.Float64bits(h.Sum) != math.Float64bits(h2.Sum) { return false } @@ -189,10 +196,10 @@ func (h *Histogram) Equals(h2 *Histogram) bool { return false } - if !bucketsMatch(h.PositiveBuckets, h2.PositiveBuckets) { + if !slices.Equal(h.PositiveBuckets, h2.PositiveBuckets) { return false } - if !bucketsMatch(h.NegativeBuckets, h2.NegativeBuckets) { + if !slices.Equal(h.NegativeBuckets, h2.NegativeBuckets) { return false } @@ -321,18 +328,56 @@ func (h *Histogram) ToFloat() *FloatHistogram { } } +// Validate validates consistency between span and bucket slices. Also, buckets are checked +// against negative values. +// For histograms that have not observed any NaN values (based on IsNaN(h.Sum) check), a +// strict h.Count = nCount + pCount + h.ZeroCount check is performed. +// Otherwise, only a lower bound check will be done (h.Count >= nCount + pCount + h.ZeroCount), +// because NaN observations do not increment the values of buckets (but they do increment +// the total h.Count). +func (h *Histogram) Validate() error { + if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { + return fmt.Errorf("negative side: %w", err) + } + if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { + return fmt.Errorf("positive side: %w", err) + } + var nCount, pCount uint64 + err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) + if err != nil { + return fmt.Errorf("negative side: %w", err) + } + err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) + if err != nil { + return fmt.Errorf("positive side: %w", err) + } + + sumOfBuckets := nCount + pCount + h.ZeroCount + if math.IsNaN(h.Sum) { + if sumOfBuckets > h.Count { + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountNotBigEnough) + } + } else { + if sumOfBuckets != h.Count { + return fmt.Errorf("%d observations found in buckets, but the Count field is %d: %w", sumOfBuckets, h.Count, ErrHistogramCountMismatch) + } + } + + return nil +} + type regularBucketIterator struct { baseBucketIterator[uint64, int64] } -func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) *regularBucketIterator { +func newRegularBucketIterator(spans []Span, buckets []int64, schema int32, positive bool) regularBucketIterator { i := baseBucketIterator[uint64, int64]{ schema: schema, spans: spans, buckets: buckets, positive: positive, } - return ®ularBucketIterator{i} + return regularBucketIterator{i} } func (r *regularBucketIterator) Next() bool { @@ -448,3 +493,20 @@ func (c *cumulativeBucketIterator) At() Bucket[uint64] { Index: c.currIdx - 1, } } + +// ReduceResolution reduces the histogram's spans, buckets into target schema. +// The target schema must be smaller than the current histogram's schema. +func (h *Histogram) ReduceResolution(targetSchema int32) *Histogram { + if targetSchema >= h.Schema { + panic(fmt.Errorf("cannot reduce resolution from schema %d to %d", h.Schema, targetSchema)) + } + + h.PositiveSpans, h.PositiveBuckets = reduceResolution( + h.PositiveSpans, h.PositiveBuckets, h.Schema, targetSchema, true, true, + ) + h.NegativeSpans, h.NegativeBuckets = reduceResolution( + h.NegativeSpans, h.NegativeBuckets, h.Schema, targetSchema, true, true, + ) + h.Schema = targetSchema + return h +} diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go new file mode 100644 index 000000000..9e9a711c2 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package histogram + +// GenerateBigTestHistograms generates a slice of histograms with given number of buckets each. +func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { + numSpans := numBuckets / 10 + bucketsPerSide := numBuckets / 2 + spanLength := uint32(bucketsPerSide / numSpans) + // Given all bucket deltas are 1, sum bucketsPerSide + 1. + observationCount := bucketsPerSide * (1 + bucketsPerSide) + + var histograms []*Histogram + for i := 0; i < numHistograms; i++ { + h := &Histogram{ + Count: uint64(i + observationCount), + ZeroCount: uint64(i), + ZeroThreshold: 1e-128, + Sum: 18.4 * float64(i+1), + Schema: 2, + NegativeSpans: make([]Span, numSpans), + PositiveSpans: make([]Span, numSpans), + NegativeBuckets: make([]int64, bucketsPerSide), + PositiveBuckets: make([]int64, bucketsPerSide), + } + + for j := 0; j < numSpans; j++ { + s := Span{Offset: 1, Length: spanLength} + h.NegativeSpans[j] = s + h.PositiveSpans[j] = s + } + + for j := 0; j < bucketsPerSide; j++ { + h.NegativeBuckets[j] = 1 + h.PositiveBuckets[j] = 1 + } + + histograms = append(histograms, h) + } + return histograms +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 3dc3049b1..bf67224bb 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -17,32 +17,12 @@ package labels import ( "bytes" - "encoding/json" - "strconv" "strings" "github.com/cespare/xxhash/v2" - "github.com/prometheus/common/model" "golang.org/x/exp/slices" ) -// Well-known label names used by Prometheus components. -const ( - MetricName = "__name__" - AlertName = "alertname" - BucketLabel = "le" - InstanceName = "instance" - - labelSep = '\xfe' -) - -var seps = []byte{'\xff'} - -// Label is a key/value pair of strings. -type Label struct { - Name, Value string -} - // Labels is a sorted set of labels. Order has to be guaranteed upon // instantiation. type Labels []Label @@ -51,23 +31,6 @@ func (ls Labels) Len() int { return len(ls) } func (ls Labels) Swap(i, j int) { ls[i], ls[j] = ls[j], ls[i] } func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name } -func (ls Labels) String() string { - var b bytes.Buffer - - b.WriteByte('{') - for i, l := range ls { - if i > 0 { - b.WriteByte(',') - b.WriteByte(' ') - } - b.WriteString(l.Name) - b.WriteByte('=') - b.WriteString(strconv.Quote(l.Value)) - } - b.WriteByte('}') - return b.String() -} - // Bytes returns ls as a byte slice. // It uses an byte invalid character as a separator and so should not be used for printing. func (ls Labels) Bytes(buf []byte) []byte { @@ -84,40 +47,6 @@ func (ls Labels) Bytes(buf []byte) []byte { return b.Bytes() } -// MarshalJSON implements json.Marshaler. -func (ls Labels) MarshalJSON() ([]byte, error) { - return json.Marshal(ls.Map()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (ls *Labels) UnmarshalJSON(b []byte) error { - var m map[string]string - - if err := json.Unmarshal(b, &m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - -// MarshalYAML implements yaml.Marshaler. -func (ls Labels) MarshalYAML() (interface{}, error) { - return ls.Map(), nil -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]string - - if err := unmarshal(&m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. func (ls Labels) MatchLabels(on bool, names ...string) Labels { @@ -318,19 +247,6 @@ func (ls Labels) WithoutEmpty() Labels { return ls } -// IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { - for _, l := range ls { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return false - } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { - return false - } - } - return true -} - // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { if len(ls) != len(o) { @@ -344,15 +260,6 @@ func Equal(ls, o Labels) bool { return true } -// Map returns a string map of the labels. -func (ls Labels) Map() map[string]string { - m := make(map[string]string, len(ls)) - for _, l := range ls { - m[l.Name] = l.Value - } - return m -} - // EmptyLabels returns n empty Labels value, for convenience. func EmptyLabels() Labels { return Labels{} @@ -368,15 +275,6 @@ func New(ls ...Label) Labels { return set } -// FromMap returns new sorted Labels from the given map. -func FromMap(m map[string]string) Labels { - l := make([]Label, 0, len(m)) - for k, v := range m { - l = append(l, Label{Name: k, Value: v}) - } - return New(l...) -} - // FromStrings creates new labels from pairs of strings. func FromStrings(ss ...string) Labels { if len(ss)%2 != 0 { @@ -460,118 +358,6 @@ func (ls Labels) ReleaseStrings(release func(string)) { } } -// Builder allows modifying Labels. -type Builder struct { - base Labels - del []string - add []Label -} - -// NewBuilder returns a new LabelsBuilder. -func NewBuilder(base Labels) *Builder { - b := &Builder{ - del: make([]string, 0, 5), - add: make([]Label, 0, 5), - } - b.Reset(base) - return b -} - -// Reset clears all current state for the builder. -func (b *Builder) Reset(base Labels) { - b.base = base - b.del = b.del[:0] - b.add = b.add[:0] - for _, l := range b.base { - if l.Value == "" { - b.del = append(b.del, l.Name) - } - } -} - -// Del deletes the label of the given name. -func (b *Builder) Del(ns ...string) *Builder { - for _, n := range ns { - for i, a := range b.add { - if a.Name == n { - b.add = append(b.add[:i], b.add[i+1:]...) - } - } - b.del = append(b.del, n) - } - return b -} - -// Keep removes all labels from the base except those with the given names. -func (b *Builder) Keep(ns ...string) *Builder { -Outer: - for _, l := range b.base { - for _, n := range ns { - if l.Name == n { - continue Outer - } - } - b.del = append(b.del, l.Name) - } - return b -} - -// Set the name/value pair as a label. A value of "" means delete that label. -func (b *Builder) Set(n, v string) *Builder { - if v == "" { - // Empty labels are the same as missing labels. - return b.Del(n) - } - for i, a := range b.add { - if a.Name == n { - b.add[i].Value = v - return b - } - } - b.add = append(b.add, Label{Name: n, Value: v}) - - return b -} - -func (b *Builder) Get(n string) string { - // Del() removes entries from .add but Set() does not remove from .del, so check .add first. - for _, a := range b.add { - if a.Name == n { - return a.Value - } - } - if slices.Contains(b.del, n) { - return "" - } - return b.base.Get(n) -} - -// Range calls f on each label in the Builder. -func (b *Builder) Range(f func(l Label)) { - // Stack-based arrays to avoid heap allocation in most cases. - var addStack [128]Label - var delStack [128]string - // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). - origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) - b.base.Range(func(l Label) { - if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { - f(l) - } - }) - for _, a := range origAdd { - f(a) - } -} - -func contains(s []Label, n string) bool { - for _, a := range s { - if a.Name == n { - return true - } - } - return false -} - // Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. func (b *Builder) Labels() Labels { @@ -617,6 +403,13 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } +// Add a name/value pair, using []byte instead of string. +// The '-tags stringlabels' version of this function is unsafe, hence the name. +// This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: string(name), Value: string(value)}) +} + // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go new file mode 100644 index 000000000..2a722b84c --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go @@ -0,0 +1,235 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package labels + +import ( + "bytes" + "encoding/json" + "strconv" + + "github.com/prometheus/common/model" + "golang.org/x/exp/slices" +) + +const ( + MetricName = "__name__" + AlertName = "alertname" + BucketLabel = "le" + InstanceName = "instance" + + labelSep = '\xfe' +) + +var seps = []byte{'\xff'} + +// Label is a key/value pair of strings. +type Label struct { + Name, Value string +} + +func (ls Labels) String() string { + var b bytes.Buffer + + b.WriteByte('{') + i := 0 + ls.Range(func(l Label) { + if i > 0 { + b.WriteByte(',') + b.WriteByte(' ') + } + b.WriteString(l.Name) + b.WriteByte('=') + b.WriteString(strconv.Quote(l.Value)) + i++ + }) + b.WriteByte('}') + return b.String() +} + +// MarshalJSON implements json.Marshaler. +func (ls Labels) MarshalJSON() ([]byte, error) { + return json.Marshal(ls.Map()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (ls *Labels) UnmarshalJSON(b []byte) error { + var m map[string]string + + if err := json.Unmarshal(b, &m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// MarshalYAML implements yaml.Marshaler. +func (ls Labels) MarshalYAML() (interface{}, error) { + return ls.Map(), nil +} + +// UnmarshalYAML implements yaml.Unmarshaler. +func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { + var m map[string]string + + if err := unmarshal(&m); err != nil { + return err + } + + *ls = FromMap(m) + return nil +} + +// IsValid checks if the metric name or label names are valid. +func (ls Labels) IsValid() bool { + err := ls.Validate(func(l Label) error { + if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } + if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + return nil + }) + return err == nil +} + +// Map returns a string map of the labels. +func (ls Labels) Map() map[string]string { + m := make(map[string]string) + ls.Range(func(l Label) { + m[l.Name] = l.Value + }) + return m +} + +// FromMap returns new sorted Labels from the given map. +func FromMap(m map[string]string) Labels { + l := make([]Label, 0, len(m)) + for k, v := range m { + l = append(l, Label{Name: k, Value: v}) + } + return New(l...) +} + +// Builder allows modifying Labels. +type Builder struct { + base Labels + del []string + add []Label +} + +// NewBuilder returns a new LabelsBuilder. +func NewBuilder(base Labels) *Builder { + b := &Builder{ + del: make([]string, 0, 5), + add: make([]Label, 0, 5), + } + b.Reset(base) + return b +} + +// Reset clears all current state for the builder. +func (b *Builder) Reset(base Labels) { + b.base = base + b.del = b.del[:0] + b.add = b.add[:0] + b.base.Range(func(l Label) { + if l.Value == "" { + b.del = append(b.del, l.Name) + } + }) +} + +// Del deletes the label of the given name. +func (b *Builder) Del(ns ...string) *Builder { + for _, n := range ns { + for i, a := range b.add { + if a.Name == n { + b.add = append(b.add[:i], b.add[i+1:]...) + } + } + b.del = append(b.del, n) + } + return b +} + +// Keep removes all labels from the base except those with the given names. +func (b *Builder) Keep(ns ...string) *Builder { + b.base.Range(func(l Label) { + for _, n := range ns { + if l.Name == n { + return + } + } + b.del = append(b.del, l.Name) + }) + return b +} + +// Set the name/value pair as a label. A value of "" means delete that label. +func (b *Builder) Set(n, v string) *Builder { + if v == "" { + // Empty labels are the same as missing labels. + return b.Del(n) + } + for i, a := range b.add { + if a.Name == n { + b.add[i].Value = v + return b + } + } + b.add = append(b.add, Label{Name: n, Value: v}) + + return b +} + +func (b *Builder) Get(n string) string { + // Del() removes entries from .add but Set() does not remove from .del, so check .add first. + for _, a := range b.add { + if a.Name == n { + return a.Value + } + } + if slices.Contains(b.del, n) { + return "" + } + return b.base.Get(n) +} + +// Range calls f on each label in the Builder. +func (b *Builder) Range(f func(l Label)) { + // Stack-based arrays to avoid heap allocation in most cases. + var addStack [128]Label + var delStack [128]string + // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). + origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) + b.base.Range(func(l Label) { + if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { + f(l) + } + }) + for _, a := range origAdd { + f(a) + } +} + +func contains(s []Label, n string) bool { + for _, a := range s { + if a.Name == n { + return true + } + } + return false +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go index cc6bfcc70..d79a83679 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -16,33 +16,14 @@ package labels import ( - "bytes" - "encoding/json" "reflect" - "strconv" "strings" "unsafe" "github.com/cespare/xxhash/v2" - "github.com/prometheus/common/model" "golang.org/x/exp/slices" ) -// Well-known label names used by Prometheus components. -const ( - MetricName = "__name__" - AlertName = "alertname" - BucketLabel = "le" - InstanceName = "instance" -) - -var seps = []byte{'\xff'} - -// Label is a key/value pair of strings. -type Label struct { - Name, Value string -} - // Labels is implemented by a single flat string holding name/value pairs. // Each name and value is preceded by its length in varint encoding. // Names are in order. @@ -77,26 +58,6 @@ func decodeString(data string, index int) (string, int) { return data[index : index+size], index + size } -func (ls Labels) String() string { - var b bytes.Buffer - - b.WriteByte('{') - for i := 0; i < len(ls.data); { - if i > 0 { - b.WriteByte(',') - b.WriteByte(' ') - } - var name, value string - name, i = decodeString(ls.data, i) - value, i = decodeString(ls.data, i) - b.WriteString(name) - b.WriteByte('=') - b.WriteString(strconv.Quote(value)) - } - b.WriteByte('}') - return b.String() -} - // Bytes returns ls as a byte slice. // It uses non-printing characters and so should not be used for printing. func (ls Labels) Bytes(buf []byte) []byte { @@ -109,45 +70,11 @@ func (ls Labels) Bytes(buf []byte) []byte { return buf } -// MarshalJSON implements json.Marshaler. -func (ls Labels) MarshalJSON() ([]byte, error) { - return json.Marshal(ls.Map()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (ls *Labels) UnmarshalJSON(b []byte) error { - var m map[string]string - - if err := json.Unmarshal(b, &m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - -// MarshalYAML implements yaml.Marshaler. -func (ls Labels) MarshalYAML() (interface{}, error) { - return ls.Map(), nil -} - // IsZero implements yaml.IsZeroer - if we don't have this then 'omitempty' fields are always omitted. func (ls Labels) IsZero() bool { return len(ls.data) == 0 } -// UnmarshalYAML implements yaml.Unmarshaler. -func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { - var m map[string]string - - if err := unmarshal(&m); err != nil { - return err - } - - *ls = FromMap(m) - return nil -} - // MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean. // If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false. // TODO: This is only used in printing an error message @@ -364,37 +291,11 @@ func (ls Labels) WithoutEmpty() Labels { return ls } -// IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { - err := ls.Validate(func(l Label) error { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return strconv.ErrSyntax - } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { - return strconv.ErrSyntax - } - return nil - }) - return err == nil -} - // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { return ls.data == o.data } -// Map returns a string map of the labels. -func (ls Labels) Map() map[string]string { - m := make(map[string]string, len(ls.data)/10) - for i := 0; i < len(ls.data); { - var lName, lValue string - lName, i = decodeString(ls.data, i) - lValue, i = decodeString(ls.data, i) - m[lName] = lValue - } - return m -} - // EmptyLabels returns an empty Labels value, for convenience. func EmptyLabels() Labels { return Labels{} @@ -420,15 +321,6 @@ func New(ls ...Label) Labels { return Labels{data: yoloString(buf)} } -// FromMap returns new sorted Labels from the given map. -func FromMap(m map[string]string) Labels { - l := make([]Label, 0, len(m)) - for k, v := range m { - l = append(l, Label{Name: k, Value: v}) - } - return New(l...) -} - // FromStrings creates new labels from pairs of strings. func FromStrings(ss ...string) Labels { if len(ss)%2 != 0 { @@ -547,124 +439,6 @@ func (ls Labels) ReleaseStrings(release func(string)) { release(ls.data) } -// Builder allows modifying Labels. -type Builder struct { - base Labels - del []string - add []Label -} - -// NewBuilder returns a new LabelsBuilder. -func NewBuilder(base Labels) *Builder { - b := &Builder{ - del: make([]string, 0, 5), - add: make([]Label, 0, 5), - } - b.Reset(base) - return b -} - -// Reset clears all current state for the builder. -func (b *Builder) Reset(base Labels) { - b.base = base - b.del = b.del[:0] - b.add = b.add[:0] - for i := 0; i < len(base.data); { - var lName, lValue string - lName, i = decodeString(base.data, i) - lValue, i = decodeString(base.data, i) - if lValue == "" { - b.del = append(b.del, lName) - } - } -} - -// Del deletes the label of the given name. -func (b *Builder) Del(ns ...string) *Builder { - for _, n := range ns { - for i, a := range b.add { - if a.Name == n { - b.add = append(b.add[:i], b.add[i+1:]...) - } - } - b.del = append(b.del, n) - } - return b -} - -// Keep removes all labels from the base except those with the given names. -func (b *Builder) Keep(ns ...string) *Builder { -Outer: - for i := 0; i < len(b.base.data); { - var lName string - lName, i = decodeString(b.base.data, i) - _, i = decodeString(b.base.data, i) - for _, n := range ns { - if lName == n { - continue Outer - } - } - b.del = append(b.del, lName) - } - return b -} - -// Set the name/value pair as a label. A value of "" means delete that label. -func (b *Builder) Set(n, v string) *Builder { - if v == "" { - // Empty labels are the same as missing labels. - return b.Del(n) - } - for i, a := range b.add { - if a.Name == n { - b.add[i].Value = v - return b - } - } - b.add = append(b.add, Label{Name: n, Value: v}) - - return b -} - -func (b *Builder) Get(n string) string { - // Del() removes entries from .add but Set() does not remove from .del, so check .add first. - for _, a := range b.add { - if a.Name == n { - return a.Value - } - } - if slices.Contains(b.del, n) { - return "" - } - return b.base.Get(n) -} - -// Range calls f on each label in the Builder. -func (b *Builder) Range(f func(l Label)) { - // Stack-based arrays to avoid heap allocation in most cases. - var addStack [128]Label - var delStack [128]string - // Take a copy of add and del, so they are unaffected by calls to Set() or Del(). - origAdd, origDel := append(addStack[:0], b.add...), append(delStack[:0], b.del...) - b.base.Range(func(l Label) { - if !slices.Contains(origDel, l.Name) && !contains(origAdd, l.Name) { - f(l) - } - }) - for _, a := range origAdd { - f(a) - } -} - -func contains(s []Label, n string) bool { - for _, a := range s { - if a.Name == n { - return true - } - } - return false -} - // Labels returns the labels from the builder. // If no modifications were made, the original labels are returned. func (b *Builder) Labels() Labels { @@ -829,6 +603,12 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } +// Add a name/value pair, using []byte instead of string to reduce memory allocations. +// The values must remain live until Labels() is called. +func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { + b.add = append(b.add, Label{Name: yoloString(name), Value: yoloString(value)}) +} + // Sort the labels added so far by name. func (b *ScratchBuilder) Sort() { slices.SortFunc(b.add, func(a, b Label) int { return strings.Compare(a.Name, b.Name) }) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go index 38903afc9..2f5fdbc3b 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/interface.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/interface.go @@ -16,6 +16,8 @@ package textparse import ( "mime" + "github.com/gogo/protobuf/types" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -64,6 +66,11 @@ type Parser interface { // retrieved (including the case where no exemplars exist at all). Exemplar(l *exemplar.Exemplar) bool + // CreatedTimestamp writes the created timestamp of the current sample + // into the passed timestamp. It returns false if no created timestamp + // exists or if the metric type does not support created timestamps. + CreatedTimestamp(ct *types.Timestamp) bool + // Next advances the parser to the next sample. It returns false if no // more samples were read or an error occurred. Next() (Entry, error) diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go index 5623e6833..bb5075544 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/openmetricsparse.go @@ -24,6 +24,8 @@ import ( "strings" "unicode/utf8" + "github.com/gogo/protobuf/types" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -211,6 +213,11 @@ func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool { return true } +// CreatedTimestamp returns false because OpenMetricsParser does not support created timestamps (yet). +func (p *OpenMetricsParser) CreatedTimestamp(_ *types.Timestamp) bool { + return false +} + // nextToken returns the next token from the openMetricsLexer. func (p *OpenMetricsParser) nextToken() token { tok := p.l.Lex() diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go index 04c295dd0..b3fa2d8a6 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/promparse.go @@ -26,6 +26,8 @@ import ( "unicode/utf8" "unsafe" + "github.com/gogo/protobuf/types" + "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" @@ -245,6 +247,11 @@ func (p *PromParser) Exemplar(*exemplar.Exemplar) bool { return false } +// CreatedTimestamp returns false because PromParser does not support created timestamps. +func (p *PromParser) CreatedTimestamp(_ *types.Timestamp) bool { + return false +} + // nextToken returns the next token from the promlexer. It skips over tabs // and spaces. func (p *PromParser) nextToken() token { diff --git a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go index fbb84a2bd..23afb5c59 100644 --- a/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go +++ b/vendor/github.com/prometheus/prometheus/model/textparse/protobufparse.go @@ -16,6 +16,7 @@ package textparse import ( "bytes" "encoding/binary" + "errors" "fmt" "io" "math" @@ -23,7 +24,7 @@ import ( "unicode/utf8" "github.com/gogo/protobuf/proto" - "github.com/pkg/errors" + "github.com/gogo/protobuf/types" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/exemplar" @@ -147,9 +148,15 @@ func (p *ProtobufParser) Series() ([]byte, *int64, float64) { if ts != 0 { return p.metricBytes.Bytes(), &ts, v } - // Nasty hack: Assume that ts==0 means no timestamp. That's not true in - // general, but proto3 has no distinction between unset and - // default. Need to avoid in the final format. + // TODO(beorn7): We assume here that ts==0 means no timestamp. That's + // not true in general, but proto3 originally has no distinction between + // unset and default. At a later stage, the `optional` keyword was + // (re-)introduced in proto3, but gogo-protobuf never got updated to + // support it. (Note that setting `[(gogoproto.nullable) = true]` for + // the `timestamp_ms` field doesn't help, either.) We plan to migrate + // away from gogo-protobuf to an actively maintained protobuf + // implementation. Once that's done, we can simply use the `optional` + // keyword and check for the unset state explicitly. return p.metricBytes.Bytes(), nil, v } @@ -310,22 +317,28 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { exProto = m.GetCounter().GetExemplar() case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: bb := m.GetHistogram().GetBucket() + isClassic := p.state == EntrySeries if p.fieldPos < 0 { - if p.state == EntrySeries { + if isClassic { return false // At _count or _sum. } p.fieldPos = 0 // Start at 1st bucket for native histograms. } for p.fieldPos < len(bb) { exProto = bb[p.fieldPos].GetExemplar() - if p.state == EntrySeries { + if isClassic { break } p.fieldPos++ - if exProto != nil { - break + // We deliberately drop exemplars with no timestamp only for native histograms. + if exProto != nil && (isClassic || exProto.GetTimestamp() != nil) { + break // Found a classic histogram exemplar or a native histogram exemplar with a timestamp. } } + // If the last exemplar for native histograms has no timestamp, ignore it. + if !isClassic && exProto.GetTimestamp() == nil { + return false + } default: return false } @@ -347,6 +360,24 @@ func (p *ProtobufParser) Exemplar(ex *exemplar.Exemplar) bool { return true } +func (p *ProtobufParser) CreatedTimestamp(ct *types.Timestamp) bool { + var foundCT *types.Timestamp + switch p.mf.GetType() { + case dto.MetricType_COUNTER: + foundCT = p.mf.GetMetric()[p.metricPos].GetCounter().GetCreatedTimestamp() + case dto.MetricType_SUMMARY: + foundCT = p.mf.GetMetric()[p.metricPos].GetSummary().GetCreatedTimestamp() + case dto.MetricType_HISTOGRAM, dto.MetricType_GAUGE_HISTOGRAM: + foundCT = p.mf.GetMetric()[p.metricPos].GetHistogram().GetCreatedTimestamp() + default: + } + if foundCT == nil { + return false + } + *ct = *foundCT + return true +} + // Next advances the parser to the next "sample" (emulating the behavior of a // text format parser). It returns (EntryInvalid, io.EOF) if no samples were // read. @@ -371,10 +402,10 @@ func (p *ProtobufParser) Next() (Entry, error) { // into metricBytes and validate only name, help, and type for now. name := p.mf.GetName() if !model.IsValidMetricName(model.LabelValue(name)) { - return EntryInvalid, errors.Errorf("invalid metric name: %s", name) + return EntryInvalid, fmt.Errorf("invalid metric name: %s", name) } if help := p.mf.GetHelp(); !utf8.ValidString(help) { - return EntryInvalid, errors.Errorf("invalid help for metric %q: %s", name, help) + return EntryInvalid, fmt.Errorf("invalid help for metric %q: %s", name, help) } switch p.mf.GetType() { case dto.MetricType_COUNTER, @@ -385,7 +416,7 @@ func (p *ProtobufParser) Next() (Entry, error) { dto.MetricType_UNTYPED: // All good. default: - return EntryInvalid, errors.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) + return EntryInvalid, fmt.Errorf("unknown metric type for metric %q: %s", name, p.mf.GetType()) } p.metricBytes.Reset() p.metricBytes.WriteString(name) @@ -438,7 +469,7 @@ func (p *ProtobufParser) Next() (Entry, error) { return EntryInvalid, err } default: - return EntryInvalid, errors.Errorf("invalid protobuf parsing state: %d", p.state) + return EntryInvalid, fmt.Errorf("invalid protobuf parsing state: %d", p.state) } return p.state, nil } @@ -451,13 +482,13 @@ func (p *ProtobufParser) updateMetricBytes() error { b.WriteByte(model.SeparatorByte) n := lp.GetName() if !model.LabelName(n).IsValid() { - return errors.Errorf("invalid label name: %s", n) + return fmt.Errorf("invalid label name: %s", n) } b.WriteString(n) b.WriteByte(model.SeparatorByte) v := lp.GetValue() if !utf8.ValidString(v) { - return errors.Errorf("invalid label value: %s", v) + return fmt.Errorf("invalid label value: %s", v) } b.WriteString(v) } @@ -532,7 +563,7 @@ func readDelimited(b []byte, mf *dto.MetricFamily) (n int, err error) { } totalLength := varIntLength + int(messageLength) if totalLength > len(b) { - return 0, errors.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) + return 0, fmt.Errorf("protobufparse: insufficient length of buffer, expected at least %d bytes, got %d bytes", totalLength, len(b)) } mf.Reset() return totalLength, mf.Unmarshal(b[varIntLength:totalLength]) diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go index 05d25747b..702ee62fc 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.pb.go @@ -411,7 +411,7 @@ type Histogram struct { SampleCount uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount,proto3" json:"sample_count,omitempty"` SampleCountFloat float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat,proto3" json:"sample_count_float,omitempty"` SampleSum float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum,proto3" json:"sample_sum,omitempty"` - // Buckets for the conventional histogram. + // Buckets for the classic histogram. Bucket []Bucket `protobuf:"bytes,3,rep,name=bucket,proto3" json:"bucket"` CreatedTimestamp *types.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"` // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8. @@ -878,6 +878,7 @@ type MetricFamily struct { Help string `protobuf:"bytes,2,opt,name=help,proto3" json:"help,omitempty"` Type MetricType `protobuf:"varint,3,opt,name=type,proto3,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []Metric `protobuf:"bytes,4,rep,name=metric,proto3" json:"metric"` + Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -944,6 +945,13 @@ func (m *MetricFamily) GetMetric() []Metric { return nil } +func (m *MetricFamily) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + func init() { proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") @@ -965,68 +973,68 @@ func init() { } var fileDescriptor_d1e5ddb18987a258 = []byte{ - // 963 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0x1b, 0x45, - 0x14, 0xee, 0x76, 0xfd, 0x93, 0x3d, 0x8e, 0x93, 0xcd, 0x60, 0x55, 0xab, 0x40, 0x62, 0xb3, 0x12, - 0x52, 0x40, 0xc8, 0x16, 0x50, 0x04, 0x2a, 0x45, 0x22, 0x69, 0xd3, 0x14, 0x15, 0xb7, 0x65, 0x6c, - 0x5f, 0x94, 0x9b, 0xd5, 0xd8, 0x9e, 0xac, 0x57, 0xec, 0xee, 0x2c, 0xfb, 0x53, 0x11, 0xee, 0x79, - 0x06, 0x5e, 0x01, 0xf1, 0x1c, 0x08, 0xf5, 0x92, 0x07, 0x40, 0x08, 0xe5, 0x49, 0xd0, 0xfc, 0xed, - 0x3a, 0xd5, 0xba, 0x90, 0xf6, 0x6e, 0xe6, 0xf3, 0x77, 0xce, 0x7c, 0xe7, 0x9b, 0xf1, 0x39, 0x0b, - 0x6e, 0xc0, 0x46, 0x49, 0xca, 0x22, 0x9a, 0xaf, 0x68, 0x91, 0x8d, 0x16, 0x61, 0x40, 0xe3, 0x7c, - 0x14, 0xd1, 0x3c, 0x0d, 0x16, 0xd9, 0x30, 0x49, 0x59, 0xce, 0x50, 0x2f, 0x60, 0xc3, 0x8a, 0x33, - 0x94, 0x9c, 0xfd, 0x9e, 0xcf, 0x7c, 0x26, 0x08, 0x23, 0xbe, 0x92, 0xdc, 0xfd, 0xbe, 0xcf, 0x98, - 0x1f, 0xd2, 0x91, 0xd8, 0xcd, 0x8b, 0xf3, 0x51, 0x1e, 0x44, 0x34, 0xcb, 0x49, 0x94, 0x48, 0x82, - 0xfb, 0x29, 0x58, 0xdf, 0x90, 0x39, 0x0d, 0x9f, 0x92, 0x20, 0x45, 0x08, 0x1a, 0x31, 0x89, 0xa8, - 0x63, 0x0c, 0x8c, 0x23, 0x0b, 0x8b, 0x35, 0xea, 0x41, 0xf3, 0x39, 0x09, 0x0b, 0xea, 0xdc, 0x14, - 0xa0, 0xdc, 0xb8, 0x07, 0xd0, 0x3c, 0x23, 0x85, 0xbf, 0xf6, 0x33, 0x8f, 0x31, 0xf4, 0xcf, 0xbf, - 0x19, 0xd0, 0xbe, 0xc7, 0x8a, 0x38, 0xa7, 0x69, 0x3d, 0x03, 0xdd, 0x81, 0x2d, 0xfa, 0x23, 0x8d, - 0x92, 0x90, 0xa4, 0x22, 0x73, 0xe7, 0xe3, 0xc3, 0x61, 0x5d, 0x5d, 0xc3, 0x53, 0xc5, 0xc2, 0x25, - 0x1f, 0x8d, 0x61, 0x6f, 0x91, 0x52, 0x92, 0xd3, 0xa5, 0x57, 0x96, 0xe3, 0x98, 0x22, 0xc9, 0xfe, - 0x50, 0x16, 0x3c, 0xd4, 0x05, 0x0f, 0xa7, 0x9a, 0x71, 0xd2, 0x78, 0xf1, 0x77, 0xdf, 0xc0, 0xb6, - 0x0a, 0x2d, 0x71, 0xf7, 0x2e, 0x6c, 0x7d, 0x5b, 0x90, 0x38, 0x0f, 0x42, 0x8a, 0xf6, 0x61, 0xeb, - 0x07, 0xb5, 0x56, 0x7a, 0xcb, 0xfd, 0x55, 0x27, 0xca, 0x52, 0xff, 0x32, 0xa0, 0x3d, 0x29, 0xa2, - 0x88, 0xa4, 0x17, 0xe8, 0x5d, 0xd8, 0xce, 0x48, 0x94, 0x84, 0xd4, 0x5b, 0xf0, 0xe2, 0x45, 0x86, - 0x06, 0xee, 0x48, 0x4c, 0xf8, 0x81, 0x0e, 0x00, 0x14, 0x25, 0x2b, 0x22, 0x95, 0xc9, 0x92, 0xc8, - 0xa4, 0x88, 0xd0, 0x57, 0x6b, 0xe7, 0x9b, 0x03, 0x73, 0xb3, 0x2d, 0x5a, 0xb1, 0xa8, 0xea, 0xc6, - 0x9a, 0xca, 0x5a, 0x73, 0x1a, 0xaf, 0x6d, 0x4e, 0x1f, 0xda, 0xb3, 0x38, 0xbf, 0x48, 0xe8, 0x72, - 0xc3, 0x55, 0xff, 0xde, 0x04, 0xeb, 0x61, 0x90, 0xe5, 0xcc, 0x4f, 0x49, 0xf4, 0x7f, 0x1c, 0xf8, - 0x10, 0xd0, 0x3a, 0xc5, 0x3b, 0x0f, 0x19, 0xc9, 0x85, 0x42, 0x03, 0xdb, 0x6b, 0xc4, 0x07, 0x1c, - 0xff, 0x2f, 0xbf, 0xee, 0x40, 0x6b, 0x5e, 0x2c, 0xbe, 0xa7, 0xb9, 0x72, 0xeb, 0x9d, 0x7a, 0xb7, - 0x4e, 0x04, 0x47, 0x79, 0xa5, 0x22, 0xea, 0x9d, 0xda, 0x7d, 0x5d, 0xa7, 0xd0, 0x2d, 0x68, 0x65, - 0x8b, 0x15, 0x8d, 0x88, 0xd3, 0x1c, 0x18, 0x47, 0x7b, 0x58, 0xed, 0xd0, 0x7b, 0xb0, 0xf3, 0x13, - 0x4d, 0x99, 0x97, 0xaf, 0x52, 0x9a, 0xad, 0x58, 0xb8, 0x74, 0x5a, 0xa2, 0x8a, 0x2e, 0x47, 0xa7, - 0x1a, 0xe4, 0x85, 0x0a, 0x9a, 0xf4, 0xad, 0x2d, 0x7c, 0xb3, 0x38, 0x22, 0x5d, 0x3b, 0x02, 0xbb, - 0xfa, 0x59, 0x79, 0xb6, 0x25, 0xf2, 0xec, 0x94, 0x24, 0xe9, 0xd8, 0x23, 0xe8, 0xc6, 0xd4, 0x27, - 0x79, 0xf0, 0x9c, 0x7a, 0x59, 0x42, 0x62, 0xc7, 0x12, 0xce, 0x0c, 0x5e, 0xe5, 0xcc, 0x24, 0x21, - 0xb1, 0x72, 0x67, 0x5b, 0x07, 0x73, 0x8c, 0x8b, 0x2f, 0x93, 0x2d, 0x69, 0x98, 0x13, 0x07, 0x06, - 0xe6, 0x11, 0xc2, 0xe5, 0x11, 0xf7, 0x39, 0x78, 0x85, 0x26, 0x0b, 0xe8, 0x0c, 0x4c, 0x5e, 0xa3, - 0x46, 0x65, 0x11, 0x8f, 0xa0, 0x9b, 0xb0, 0x2c, 0xa8, 0xa4, 0x6d, 0x5f, 0x4f, 0x9a, 0x0e, 0xd6, - 0xd2, 0xca, 0x64, 0x52, 0x5a, 0x57, 0x4a, 0xd3, 0x68, 0x29, 0xad, 0xa4, 0x49, 0x69, 0x3b, 0x52, - 0x9a, 0x46, 0x85, 0x34, 0xf7, 0x0f, 0x03, 0x5a, 0xf2, 0x40, 0xf4, 0x3e, 0xd8, 0x8b, 0x22, 0x2a, - 0xc2, 0xf5, 0x72, 0xe4, 0x3b, 0xde, 0xad, 0x70, 0x59, 0xd0, 0x6d, 0xb8, 0xf5, 0x32, 0xf5, 0xca, - 0x7b, 0xee, 0xbd, 0x14, 0x20, 0x6f, 0xa8, 0x0f, 0x9d, 0x22, 0x49, 0x68, 0xea, 0xcd, 0x59, 0x11, - 0x2f, 0xd5, 0xa3, 0x06, 0x01, 0x9d, 0x70, 0xe4, 0x4a, 0x73, 0x34, 0xaf, 0xd7, 0x1c, 0xdd, 0xbb, - 0x00, 0x95, 0x71, 0xfc, 0x51, 0xb2, 0xf3, 0xf3, 0x8c, 0xca, 0x0a, 0xf6, 0xb0, 0xda, 0x71, 0x3c, - 0xa4, 0xb1, 0x9f, 0xaf, 0xc4, 0xe9, 0x5d, 0xac, 0x76, 0xee, 0x2f, 0x06, 0x6c, 0xe9, 0xa4, 0xe8, - 0x0b, 0x68, 0x86, 0x7c, 0x36, 0x38, 0x86, 0xb8, 0xa6, 0x7e, 0xbd, 0x86, 0x72, 0x7c, 0xa8, 0x5b, - 0x92, 0x31, 0xf5, 0xdd, 0x12, 0x7d, 0x0e, 0xd6, 0x35, 0x5a, 0x36, 0xae, 0xc8, 0xee, 0xcf, 0x26, - 0xb4, 0xc6, 0x62, 0x0e, 0xbe, 0x99, 0xae, 0x8f, 0xa0, 0xe9, 0xf3, 0xc9, 0xa5, 0xa6, 0xce, 0xdb, - 0xf5, 0xc1, 0x62, 0xb8, 0x61, 0xc9, 0x44, 0x9f, 0x41, 0x7b, 0x21, 0x87, 0x99, 0x92, 0x7c, 0x50, - 0x1f, 0xa4, 0x26, 0x1e, 0xd6, 0x6c, 0x1e, 0x98, 0xc9, 0xd1, 0xa0, 0x3a, 0xf0, 0x86, 0x40, 0x35, - 0x3f, 0xb0, 0x66, 0xf3, 0xc0, 0x42, 0x76, 0x5d, 0xd1, 0x4c, 0x36, 0x06, 0xaa, 0xd6, 0x8c, 0x35, - 0x1b, 0x7d, 0x09, 0xd6, 0x4a, 0x37, 0x63, 0xd1, 0x44, 0x36, 0xda, 0x53, 0xf6, 0x6c, 0x5c, 0x45, - 0xf0, 0xf6, 0x5d, 0x3a, 0xee, 0x45, 0x99, 0xe8, 0x54, 0x26, 0xee, 0x94, 0xd8, 0x38, 0x73, 0x7f, - 0x35, 0x60, 0x5b, 0xde, 0xc3, 0x03, 0x12, 0x05, 0xe1, 0x45, 0xed, 0x47, 0x03, 0x82, 0xc6, 0x8a, - 0x86, 0x89, 0xfa, 0x66, 0x10, 0x6b, 0x74, 0x1b, 0x1a, 0x5c, 0xa3, 0xb0, 0x70, 0x67, 0xd3, 0x7f, - 0x5e, 0x66, 0x9e, 0x5e, 0x24, 0x14, 0x0b, 0x36, 0x6f, 0xf0, 0xf2, 0xeb, 0xc7, 0x69, 0xbc, 0xaa, - 0xc1, 0xcb, 0x38, 0xdd, 0xe0, 0x65, 0xc4, 0x07, 0x73, 0x80, 0x2a, 0x1f, 0xea, 0x40, 0xfb, 0xde, - 0x93, 0xd9, 0xe3, 0xe9, 0x29, 0xb6, 0x6f, 0x20, 0x0b, 0x9a, 0x67, 0xc7, 0xb3, 0xb3, 0x53, 0xdb, - 0xe0, 0xf8, 0x64, 0x36, 0x1e, 0x1f, 0xe3, 0x67, 0xf6, 0x4d, 0xbe, 0x99, 0x3d, 0x9e, 0x3e, 0x7b, - 0x7a, 0x7a, 0xdf, 0x36, 0x51, 0x17, 0xac, 0x87, 0x5f, 0x4f, 0xa6, 0x4f, 0xce, 0xf0, 0xf1, 0xd8, - 0x6e, 0xa0, 0xb7, 0x60, 0x57, 0xc4, 0x78, 0x15, 0xd8, 0x3c, 0x71, 0x5f, 0x5c, 0x1e, 0x1a, 0x7f, - 0x5e, 0x1e, 0x1a, 0xff, 0x5c, 0x1e, 0x1a, 0xdf, 0xf5, 0x02, 0xe6, 0x55, 0xe2, 0x3c, 0x29, 0x6e, - 0xde, 0x12, 0x2f, 0xfb, 0x93, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x68, 0x3f, 0xd9, 0x07, 0xdd, - 0x09, 0x00, 0x00, + // 969 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x8e, 0xdb, 0x44, + 0x14, 0xae, 0x9b, 0x5f, 0x9f, 0x6c, 0x76, 0xbd, 0x43, 0x54, 0x59, 0x0b, 0xbb, 0x09, 0x96, 0x90, + 0x16, 0x84, 0x12, 0x01, 0x45, 0xa0, 0x52, 0x24, 0x76, 0xdb, 0x6d, 0x8a, 0x4a, 0xda, 0x32, 0x49, + 0x2e, 0xca, 0x8d, 0x35, 0x49, 0x66, 0x1d, 0x0b, 0xdb, 0x63, 0xec, 0x71, 0xc5, 0x72, 0xcf, 0x33, + 0xf0, 0x02, 0x3c, 0x06, 0xe2, 0x12, 0xf5, 0x92, 0x2b, 0x2e, 0x11, 0xda, 0x27, 0x41, 0xf3, 0x63, + 0x3b, 0x5b, 0x39, 0x0b, 0x0b, 0x77, 0x33, 0x5f, 0xbe, 0x73, 0xe6, 0x3b, 0xdf, 0x4c, 0xce, 0x31, + 0x38, 0x3e, 0x1b, 0xc5, 0x09, 0x0b, 0x29, 0x5f, 0xd3, 0x2c, 0x1d, 0x2d, 0x03, 0x9f, 0x46, 0x7c, + 0x14, 0x52, 0x9e, 0xf8, 0xcb, 0x74, 0x18, 0x27, 0x8c, 0x33, 0xd4, 0xf3, 0xd9, 0xb0, 0xe4, 0x0c, + 0x15, 0xe7, 0xa0, 0xe7, 0x31, 0x8f, 0x49, 0xc2, 0x48, 0xac, 0x14, 0xf7, 0xa0, 0xef, 0x31, 0xe6, + 0x05, 0x74, 0x24, 0x77, 0x8b, 0xec, 0x7c, 0xc4, 0xfd, 0x90, 0xa6, 0x9c, 0x84, 0xb1, 0x22, 0x38, + 0x1f, 0x83, 0xf9, 0x15, 0x59, 0xd0, 0xe0, 0x39, 0xf1, 0x13, 0x84, 0xa0, 0x1e, 0x91, 0x90, 0xda, + 0xc6, 0xc0, 0x38, 0x36, 0xb1, 0x5c, 0xa3, 0x1e, 0x34, 0x5e, 0x92, 0x20, 0xa3, 0xf6, 0x6d, 0x09, + 0xaa, 0x8d, 0x73, 0x08, 0x8d, 0x31, 0xc9, 0xbc, 0x8d, 0x9f, 0x45, 0x8c, 0x91, 0xff, 0xfc, 0xb3, + 0x01, 0xad, 0x07, 0x2c, 0x8b, 0x38, 0x4d, 0xaa, 0x19, 0xe8, 0x1e, 0xb4, 0xe9, 0xf7, 0x34, 0x8c, + 0x03, 0x92, 0xc8, 0xcc, 0x9d, 0x0f, 0x8f, 0x86, 0x55, 0x75, 0x0d, 0xcf, 0x34, 0x0b, 0x17, 0x7c, + 0x34, 0x86, 0xfd, 0x65, 0x42, 0x09, 0xa7, 0x2b, 0xb7, 0x28, 0xc7, 0xae, 0xc9, 0x24, 0x07, 0x43, + 0x55, 0xf0, 0x30, 0x2f, 0x78, 0x38, 0xcb, 0x19, 0xd8, 0xd2, 0x41, 0x05, 0xe2, 0xdc, 0x87, 0xf6, + 0xd7, 0x19, 0x89, 0xb8, 0x1f, 0x50, 0x74, 0x00, 0xed, 0xef, 0xf4, 0x5a, 0x2b, 0x2d, 0xf6, 0x57, + 0x3d, 0x28, 0x8a, 0xfc, 0xc3, 0x80, 0xd6, 0x34, 0x0b, 0x43, 0x92, 0x5c, 0xa0, 0xb7, 0x61, 0x27, + 0x25, 0x61, 0x1c, 0x50, 0x77, 0x29, 0xca, 0x96, 0x19, 0xea, 0xb8, 0xa3, 0x30, 0xe9, 0x04, 0x3a, + 0x04, 0xd0, 0x94, 0x34, 0x0b, 0x75, 0x26, 0x53, 0x21, 0xd3, 0x2c, 0x44, 0x5f, 0x6c, 0x9c, 0x5f, + 0x1b, 0xd4, 0xb6, 0x1b, 0x92, 0x2b, 0x3e, 0xad, 0xbf, 0xfa, 0xb3, 0x7f, 0x6b, 0x43, 0x65, 0xa5, + 0x2d, 0xf5, 0xff, 0x60, 0x4b, 0x1f, 0x5a, 0xf3, 0x88, 0x5f, 0xc4, 0x74, 0xb5, 0xe5, 0x7a, 0x7f, + 0x6d, 0x80, 0xf9, 0xd8, 0x4f, 0x39, 0xf3, 0x12, 0x12, 0xfe, 0x9b, 0xda, 0xdf, 0x07, 0xb4, 0x49, + 0x71, 0xcf, 0x03, 0x46, 0xb8, 0xd4, 0x66, 0x60, 0x6b, 0x83, 0xf8, 0x48, 0xe0, 0xff, 0xe4, 0xd4, + 0x3d, 0x68, 0x2e, 0xb2, 0xe5, 0xb7, 0x94, 0x6b, 0x9f, 0xde, 0xaa, 0xf6, 0xe9, 0x54, 0x72, 0xb4, + 0x4b, 0x3a, 0xa2, 0xda, 0xa3, 0xbd, 0x9b, 0x7b, 0x84, 0xee, 0x40, 0x33, 0x5d, 0xae, 0x69, 0x48, + 0xec, 0xc6, 0xc0, 0x38, 0xde, 0xc7, 0x7a, 0x87, 0xde, 0x81, 0xdd, 0x1f, 0x68, 0xc2, 0x5c, 0xbe, + 0x4e, 0x68, 0xba, 0x66, 0xc1, 0xca, 0x6e, 0x4a, 0xfd, 0x5d, 0x81, 0xce, 0x72, 0x50, 0x94, 0x28, + 0x69, 0xca, 0xb1, 0x96, 0x74, 0xcc, 0x14, 0x88, 0xf2, 0xeb, 0x18, 0xac, 0xf2, 0x67, 0xed, 0x56, + 0x5b, 0xe6, 0xd9, 0x2d, 0x48, 0xca, 0xab, 0x27, 0xd0, 0x8d, 0xa8, 0x47, 0xb8, 0xff, 0x92, 0xba, + 0x69, 0x4c, 0x22, 0xdb, 0x94, 0x9e, 0x0c, 0xae, 0xf3, 0x64, 0x1a, 0x93, 0x48, 0xfb, 0xb2, 0x93, + 0x07, 0x0b, 0x4c, 0x88, 0x2f, 0x92, 0xad, 0x68, 0xc0, 0x89, 0x0d, 0x83, 0xda, 0x31, 0xc2, 0xc5, + 0x11, 0x0f, 0x05, 0x78, 0x85, 0xa6, 0x0a, 0xe8, 0x0c, 0x6a, 0xa2, 0xc6, 0x1c, 0x55, 0x45, 0x3c, + 0x81, 0x6e, 0xcc, 0x52, 0xbf, 0x94, 0xb6, 0x73, 0x33, 0x69, 0x79, 0x70, 0x2e, 0xad, 0x48, 0xa6, + 0xa4, 0x75, 0x95, 0xb4, 0x1c, 0x2d, 0xa4, 0x15, 0x34, 0x25, 0x6d, 0x57, 0x49, 0xcb, 0x51, 0x29, + 0xcd, 0xf9, 0xcd, 0x80, 0xa6, 0x3a, 0x10, 0xbd, 0x0b, 0xd6, 0x32, 0x0b, 0xb3, 0x60, 0xb3, 0x1c, + 0xf5, 0x82, 0xf7, 0x4a, 0x5c, 0x15, 0x74, 0x17, 0xee, 0xbc, 0x4e, 0xbd, 0xf2, 0x92, 0x7b, 0xaf, + 0x05, 0xa8, 0x1b, 0xea, 0x43, 0x27, 0x8b, 0x63, 0x9a, 0xb8, 0x0b, 0x96, 0x45, 0x2b, 0xfd, 0x9c, + 0x41, 0x42, 0xa7, 0x02, 0xb9, 0xd2, 0x0a, 0x6b, 0x37, 0x6b, 0x85, 0xce, 0x7d, 0x80, 0xd2, 0x38, + 0xf1, 0x28, 0xd9, 0xf9, 0x79, 0x4a, 0x55, 0x05, 0xfb, 0x58, 0xef, 0x04, 0x1e, 0xd0, 0xc8, 0xe3, + 0x6b, 0x79, 0x7a, 0x17, 0xeb, 0x9d, 0xf3, 0x93, 0x01, 0xed, 0x3c, 0x29, 0xfa, 0x0c, 0x1a, 0x81, + 0x98, 0x04, 0xb6, 0x21, 0xaf, 0xa9, 0x5f, 0xad, 0xa1, 0x18, 0x16, 0xfa, 0x96, 0x54, 0x4c, 0x75, + 0x87, 0x44, 0x9f, 0x82, 0x79, 0x93, 0x06, 0x5d, 0x92, 0x9d, 0x1f, 0x6b, 0xd0, 0x9c, 0xc8, 0xa9, + 0xf7, 0xff, 0x74, 0x7d, 0x00, 0x0d, 0x4f, 0xcc, 0x29, 0x3d, 0x63, 0xde, 0xac, 0x0e, 0x96, 0xa3, + 0x0c, 0x2b, 0x26, 0xfa, 0x04, 0x5a, 0x4b, 0x35, 0xba, 0xb4, 0xe4, 0xc3, 0xea, 0x20, 0x3d, 0xdf, + 0x70, 0xce, 0x16, 0x81, 0xa9, 0x1a, 0x07, 0xba, 0xeb, 0x6e, 0x09, 0xd4, 0x33, 0x03, 0xe7, 0x6c, + 0x11, 0x98, 0xa9, 0x7e, 0x2b, 0x9b, 0xc9, 0xd6, 0x40, 0xdd, 0x94, 0x71, 0xce, 0x46, 0x9f, 0x83, + 0xb9, 0xce, 0xdb, 0xb0, 0x6c, 0x22, 0x5b, 0xed, 0x29, 0xba, 0x35, 0x2e, 0x23, 0x44, 0xe3, 0x2e, + 0x1c, 0x77, 0xc3, 0x54, 0x76, 0xaa, 0x1a, 0xee, 0x14, 0xd8, 0x24, 0x75, 0x7e, 0x31, 0x60, 0x47, + 0xdd, 0xc3, 0x23, 0x12, 0xfa, 0xc1, 0x45, 0xe5, 0x27, 0x02, 0x82, 0xfa, 0x9a, 0x06, 0xb1, 0xfe, + 0x42, 0x90, 0x6b, 0x74, 0x17, 0xea, 0x42, 0xa3, 0xb4, 0x70, 0x77, 0xdb, 0x7f, 0x5e, 0x65, 0x9e, + 0x5d, 0xc4, 0x14, 0x4b, 0xb6, 0x68, 0xed, 0xea, 0x5b, 0xc7, 0xae, 0x5f, 0xd7, 0xda, 0x55, 0x5c, + 0xde, 0xda, 0x55, 0x84, 0x50, 0x91, 0x45, 0x3e, 0x97, 0x16, 0x9a, 0x58, 0xae, 0xdf, 0x5b, 0x00, + 0x94, 0x67, 0xa0, 0x0e, 0xb4, 0x1e, 0x3c, 0x9b, 0x3f, 0x9d, 0x9d, 0x61, 0xeb, 0x16, 0x32, 0xa1, + 0x31, 0x3e, 0x99, 0x8f, 0xcf, 0x2c, 0x43, 0xe0, 0xd3, 0xf9, 0x64, 0x72, 0x82, 0x5f, 0x58, 0xb7, + 0xc5, 0x66, 0xfe, 0x74, 0xf6, 0xe2, 0xf9, 0xd9, 0x43, 0xab, 0x86, 0xba, 0x60, 0x3e, 0xfe, 0x72, + 0x3a, 0x7b, 0x36, 0xc6, 0x27, 0x13, 0xab, 0x8e, 0xde, 0x80, 0x3d, 0x19, 0xe3, 0x96, 0x60, 0xe3, + 0xd4, 0x79, 0x75, 0x79, 0x64, 0xfc, 0x7e, 0x79, 0x64, 0xfc, 0x75, 0x79, 0x64, 0x7c, 0xd3, 0xf3, + 0x99, 0x5b, 0x0a, 0x76, 0x95, 0xe0, 0x45, 0x53, 0xbe, 0xf6, 0x8f, 0xfe, 0x0e, 0x00, 0x00, 0xff, + 0xff, 0x6d, 0x53, 0xc5, 0x1e, 0xdf, 0x09, 0x00, 0x00, } func (m *LabelPair) Marshal() (dAtA []byte, err error) { @@ -1756,6 +1764,13 @@ func (m *MetricFamily) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintMetrics(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x2a + } if len(m.Metric) > 0 { for iNdEx := len(m.Metric) - 1; iNdEx >= 0; iNdEx-- { { @@ -2129,6 +2144,10 @@ func (m *MetricFamily) Size() (n int) { n += 1 + l + sovMetrics(uint64(l)) } } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sovMetrics(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -4075,6 +4094,38 @@ func (m *MetricFamily) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMetrics + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMetrics + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMetrics + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipMetrics(dAtA[iNdEx:]) diff --git a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto index 8e225bb3b..06dcc34af 100644 --- a/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto +++ b/vendor/github.com/prometheus/prometheus/prompb/io/prometheus/client/metrics.proto @@ -52,7 +52,7 @@ message Counter { double value = 1; Exemplar exemplar = 2; - google.protobuf.Timestamp created_timestamp = 3 [(gogoproto.nullable) = true]; + google.protobuf.Timestamp created_timestamp = 3; } message Quantile { @@ -65,7 +65,7 @@ message Summary { double sample_sum = 2; repeated Quantile quantile = 3 [(gogoproto.nullable) = false]; - google.protobuf.Timestamp created_timestamp = 4 [(gogoproto.nullable) = true]; + google.protobuf.Timestamp created_timestamp = 4; } message Untyped { @@ -76,10 +76,10 @@ message Histogram { uint64 sample_count = 1; double sample_count_float = 4; // Overrides sample_count if > 0. double sample_sum = 2; - // Buckets for the conventional histogram. + // Buckets for the classic histogram. repeated Bucket bucket = 3 [(gogoproto.nullable) = false]; // Ordered in increasing order of upper_bound, +Inf bucket is optional. - google.protobuf.Timestamp created_timestamp = 15 [(gogoproto.nullable) = true]; + google.protobuf.Timestamp created_timestamp = 15; // Everything below here is for native histograms (also known as sparse histograms). // Native histograms are an experimental feature without stability guarantees. @@ -153,4 +153,5 @@ message MetricFamily { string help = 2; MetricType type = 3; repeated Metric metric = 4 [(gogoproto.nullable) = false]; -} \ No newline at end of file + string unit = 5; +} diff --git a/vendor/github.com/prometheus/prometheus/scrape/manager.go b/vendor/github.com/prometheus/prometheus/scrape/manager.go index 427b9f2be..3b70e48a1 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/manager.go +++ b/vendor/github.com/prometheus/prometheus/scrape/manager.go @@ -14,6 +14,7 @@ package scrape import ( + "errors" "fmt" "hash/fnv" "reflect" @@ -22,7 +23,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" @@ -32,82 +32,23 @@ import ( "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/util/osutil" + "github.com/prometheus/prometheus/util/pool" ) -var targetMetadataCache = newMetadataMetricsCollector() - -// MetadataMetricsCollector is a Custom Collector for the metadata cache metrics. -type MetadataMetricsCollector struct { - CacheEntries *prometheus.Desc - CacheBytes *prometheus.Desc - - scrapeManager *Manager -} - -func newMetadataMetricsCollector() *MetadataMetricsCollector { - return &MetadataMetricsCollector{ - CacheEntries: prometheus.NewDesc( - "prometheus_target_metadata_cache_entries", - "Total number of metric metadata entries in the cache", - []string{"scrape_job"}, - nil, - ), - CacheBytes: prometheus.NewDesc( - "prometheus_target_metadata_cache_bytes", - "The number of bytes that are currently used for storing metric metadata in the cache", - []string{"scrape_job"}, - nil, - ), - } -} - -func (mc *MetadataMetricsCollector) registerManager(m *Manager) { - mc.scrapeManager = m -} - -// Describe sends the metrics descriptions to the channel. -func (mc *MetadataMetricsCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- mc.CacheEntries - ch <- mc.CacheBytes -} - -// Collect creates and sends the metrics for the metadata cache. -func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) { - if mc.scrapeManager == nil { - return - } - - for tset, targets := range mc.scrapeManager.TargetsActive() { - var size, length int - for _, t := range targets { - size += t.MetadataSize() - length += t.MetadataLength() - } - - ch <- prometheus.MustNewConstMetric( - mc.CacheEntries, - prometheus.GaugeValue, - float64(length), - tset, - ) - - ch <- prometheus.MustNewConstMetric( - mc.CacheBytes, - prometheus.GaugeValue, - float64(size), - tset, - ) - } -} - -// NewManager is the Manager constructor -func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager { +// NewManager is the Manager constructor. +func NewManager(o *Options, logger log.Logger, app storage.Appendable, registerer prometheus.Registerer) (*Manager, error) { if o == nil { o = &Options{} } if logger == nil { logger = log.NewNopLogger() } + + sm, err := newScrapeMetrics(registerer) + if err != nil { + return nil, fmt.Errorf("failed to create scrape manager due to error: %w", err) + } + m := &Manager{ append: app, opts: o, @@ -116,10 +57,13 @@ func NewManager(o *Options, logger log.Logger, app storage.Appendable) *Manager scrapePools: make(map[string]*scrapePool), graceShut: make(chan struct{}), triggerReload: make(chan struct{}, 1), + metrics: sm, + buffers: pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }), } - targetMetadataCache.registerManager(m) - return m + m.metrics.setTargetMetadataCacheGatherer(m) + + return m, nil } // Options are the configuration parameters to the scrape manager. @@ -132,9 +76,6 @@ type Options struct { // Option to enable the experimental in-memory metadata storage and append // metadata to the WAL. EnableMetadataStorage bool - // Option to enable protobuf negotiation with the client. Note that the client can already - // send protobuf without needing to enable this. - EnableProtobufNegotiation bool // Option to increase the interval used by scrape manager to throttle target groups updates. DiscoveryReloadInterval model.Duration @@ -155,8 +96,11 @@ type Manager struct { scrapeConfigs map[string]*config.ScrapeConfig scrapePools map[string]*scrapePool targetSets map[string][]*targetgroup.Group + buffers *pool.Pool triggerReload chan struct{} + + metrics *scrapeMetrics } // Run receives and saves target set updates and triggers the scraping loops reloading. @@ -214,8 +158,10 @@ func (m *Manager) reload() { level.Error(m.logger).Log("msg", "error reloading target set", "err", "invalid config id:"+setName) continue } - sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.opts) + m.metrics.targetScrapePools.Inc() + sp, err := newScrapePool(scrapeConfig, m.append, m.offsetSeed, log.With(m.logger, "scrape_pool", setName), m.buffers, m.opts, m.metrics) if err != nil { + m.metrics.targetScrapePoolsFailed.Inc() level.Error(m.logger).Log("msg", "error creating new scrape pool", "err", err, "scrape_pool", setName) continue } diff --git a/vendor/github.com/prometheus/prometheus/scrape/metrics.go b/vendor/github.com/prometheus/prometheus/scrape/metrics.go new file mode 100644 index 000000000..d74143185 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/scrape/metrics.go @@ -0,0 +1,307 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package scrape + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" +) + +type scrapeMetrics struct { + // Used by Manager. + targetMetadataCache *MetadataMetricsCollector + targetScrapePools prometheus.Counter + targetScrapePoolsFailed prometheus.Counter + + // Used by scrapePool. + targetReloadIntervalLength *prometheus.SummaryVec + targetScrapePoolReloads prometheus.Counter + targetScrapePoolReloadsFailed prometheus.Counter + targetScrapePoolSyncsCounter *prometheus.CounterVec + targetScrapePoolExceededTargetLimit prometheus.Counter + targetScrapePoolTargetLimit *prometheus.GaugeVec + targetScrapePoolTargetsAdded *prometheus.GaugeVec + targetSyncIntervalLength *prometheus.SummaryVec + targetSyncFailed *prometheus.CounterVec + + // Used by targetScraper. + targetScrapeExceededBodySizeLimit prometheus.Counter + + // Used by scrapeCache. + targetScrapeCacheFlushForced prometheus.Counter + + // Used by scrapeLoop. + targetIntervalLength *prometheus.SummaryVec + targetScrapeSampleLimit prometheus.Counter + targetScrapeSampleDuplicate prometheus.Counter + targetScrapeSampleOutOfOrder prometheus.Counter + targetScrapeSampleOutOfBounds prometheus.Counter + targetScrapeExemplarOutOfOrder prometheus.Counter + targetScrapePoolExceededLabelLimits prometheus.Counter + targetScrapeNativeHistogramBucketLimit prometheus.Counter +} + +func newScrapeMetrics(reg prometheus.Registerer) (*scrapeMetrics, error) { + sm := &scrapeMetrics{} + + // Manager metrics. + sm.targetMetadataCache = &MetadataMetricsCollector{ + CacheEntries: prometheus.NewDesc( + "prometheus_target_metadata_cache_entries", + "Total number of metric metadata entries in the cache", + []string{"scrape_job"}, + nil, + ), + CacheBytes: prometheus.NewDesc( + "prometheus_target_metadata_cache_bytes", + "The number of bytes that are currently used for storing metric metadata in the cache", + []string{"scrape_job"}, + nil, + ), + // TargetsGatherer should be set later, because it's a circular dependency. + // newScrapeMetrics() is called by NewManager(), while also TargetsGatherer is the new Manager. + } + + sm.targetScrapePools = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pools_total", + Help: "Total number of scrape pool creation attempts.", + }, + ) + sm.targetScrapePoolsFailed = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pools_failed_total", + Help: "Total number of scrape pool creations that failed.", + }, + ) + + // Used by scrapePool. + sm.targetReloadIntervalLength = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_target_reload_length_seconds", + Help: "Actual interval to reload the scrape pool with a given configuration.", + Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, + }, + []string{"interval"}, + ) + sm.targetScrapePoolReloads = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_reloads_total", + Help: "Total number of scrape pool reloads.", + }, + ) + sm.targetScrapePoolReloadsFailed = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_reloads_failed_total", + Help: "Total number of failed scrape pool reloads.", + }, + ) + sm.targetScrapePoolExceededTargetLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_exceeded_target_limit_total", + Help: "Total number of times scrape pools hit the target limit, during sync or config reload.", + }, + ) + sm.targetScrapePoolTargetLimit = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_target_scrape_pool_target_limit", + Help: "Maximum number of targets allowed in this scrape pool.", + }, + []string{"scrape_job"}, + ) + sm.targetScrapePoolTargetsAdded = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "prometheus_target_scrape_pool_targets", + Help: "Current number of targets in this scrape pool.", + }, + []string{"scrape_job"}, + ) + sm.targetScrapePoolSyncsCounter = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_sync_total", + Help: "Total number of syncs that were executed on a scrape pool.", + }, + []string{"scrape_job"}, + ) + sm.targetSyncIntervalLength = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_target_sync_length_seconds", + Help: "Actual interval to sync the scrape pool.", + Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, + }, + []string{"scrape_job"}, + ) + sm.targetSyncFailed = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "prometheus_target_sync_failed_total", + Help: "Total number of target sync failures.", + }, + []string{"scrape_job"}, + ) + + // Used by targetScraper. + sm.targetScrapeExceededBodySizeLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exceeded_body_size_limit_total", + Help: "Total number of scrapes that hit the body size limit", + }, + ) + + // Used by scrapeCache. + sm.targetScrapeCacheFlushForced = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_cache_flush_forced_total", + Help: "How many times a scrape cache was flushed due to getting big while scrapes are failing.", + }, + ) + + // Used by scrapeLoop. + sm.targetIntervalLength = prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "prometheus_target_interval_length_seconds", + Help: "Actual intervals between scrapes.", + Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, + }, + []string{"interval"}, + ) + sm.targetScrapeSampleLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exceeded_sample_limit_total", + Help: "Total number of scrapes that hit the sample limit and were rejected.", + }, + ) + sm.targetScrapeSampleDuplicate = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_sample_duplicate_timestamp_total", + Help: "Total number of samples rejected due to duplicate timestamps but different values.", + }, + ) + sm.targetScrapeSampleOutOfOrder = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_sample_out_of_order_total", + Help: "Total number of samples rejected due to not being out of the expected order.", + }, + ) + sm.targetScrapeSampleOutOfBounds = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_sample_out_of_bounds_total", + Help: "Total number of samples rejected due to timestamp falling outside of the time bounds.", + }, + ) + sm.targetScrapePoolExceededLabelLimits = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrape_pool_exceeded_label_limits_total", + Help: "Total number of times scrape pools hit the label limits, during sync or config reload.", + }, + ) + sm.targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total", + Help: "Total number of scrapes that hit the native histogram bucket limit and were rejected.", + }, + ) + sm.targetScrapeExemplarOutOfOrder = prometheus.NewCounter( + prometheus.CounterOpts{ + Name: "prometheus_target_scrapes_exemplar_out_of_order_total", + Help: "Total number of exemplar rejected due to not being out of the expected order.", + }, + ) + + for _, collector := range []prometheus.Collector{ + // Used by Manager. + sm.targetMetadataCache, + sm.targetScrapePools, + sm.targetScrapePoolsFailed, + // Used by scrapePool. + sm.targetReloadIntervalLength, + sm.targetScrapePoolReloads, + sm.targetScrapePoolReloadsFailed, + sm.targetSyncIntervalLength, + sm.targetScrapePoolSyncsCounter, + sm.targetScrapePoolExceededTargetLimit, + sm.targetScrapePoolTargetLimit, + sm.targetScrapePoolTargetsAdded, + sm.targetSyncFailed, + // Used by targetScraper. + sm.targetScrapeExceededBodySizeLimit, + // Used by scrapeCache. + sm.targetScrapeCacheFlushForced, + // Used by scrapeLoop. + sm.targetIntervalLength, + sm.targetScrapeSampleLimit, + sm.targetScrapeSampleDuplicate, + sm.targetScrapeSampleOutOfOrder, + sm.targetScrapeSampleOutOfBounds, + sm.targetScrapeExemplarOutOfOrder, + sm.targetScrapePoolExceededLabelLimits, + sm.targetScrapeNativeHistogramBucketLimit, + } { + err := reg.Register(collector) + if err != nil { + return nil, fmt.Errorf("failed to register scrape metrics: %w", err) + } + } + return sm, nil +} + +func (sm *scrapeMetrics) setTargetMetadataCacheGatherer(gatherer TargetsGatherer) { + sm.targetMetadataCache.TargetsGatherer = gatherer +} + +type TargetsGatherer interface { + TargetsActive() map[string][]*Target +} + +// MetadataMetricsCollector is a Custom Collector for the metadata cache metrics. +type MetadataMetricsCollector struct { + CacheEntries *prometheus.Desc + CacheBytes *prometheus.Desc + TargetsGatherer TargetsGatherer +} + +// Describe sends the metrics descriptions to the channel. +func (mc *MetadataMetricsCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- mc.CacheEntries + ch <- mc.CacheBytes +} + +// Collect creates and sends the metrics for the metadata cache. +func (mc *MetadataMetricsCollector) Collect(ch chan<- prometheus.Metric) { + if mc.TargetsGatherer == nil { + return + } + + for tset, targets := range mc.TargetsGatherer.TargetsActive() { + var size, length int + for _, t := range targets { + size += t.MetadataSize() + length += t.MetadataLength() + } + + ch <- prometheus.MustNewConstMetric( + mc.CacheEntries, + prometheus.GaugeValue, + float64(length), + tset, + ) + + ch <- prometheus.MustNewConstMetric( + mc.CacheBytes, + prometheus.GaugeValue, + float64(size), + tset, + ) + } +} diff --git a/vendor/github.com/prometheus/prometheus/scrape/scrape.go b/vendor/github.com/prometheus/prometheus/scrape/scrape.go index 5db309008..29f98979b 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/scrape.go +++ b/vendor/github.com/prometheus/prometheus/scrape/scrape.go @@ -18,19 +18,19 @@ import ( "bytes" "compress/gzip" "context" + "errors" "fmt" "io" "math" "net/http" "reflect" "strconv" + "strings" "sync" "time" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" config_util "github.com/prometheus/common/config" "github.com/prometheus/common/model" "github.com/prometheus/common/version" @@ -60,172 +60,6 @@ var AlignScrapeTimestamps = true var errNameLabelMandatory = fmt.Errorf("missing metric name (%s label)", labels.MetricName) -var ( - targetIntervalLength = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "prometheus_target_interval_length_seconds", - Help: "Actual intervals between scrapes.", - Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, - }, - []string{"interval"}, - ) - targetReloadIntervalLength = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "prometheus_target_reload_length_seconds", - Help: "Actual interval to reload the scrape pool with a given configuration.", - Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, - }, - []string{"interval"}, - ) - targetScrapePools = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrape_pools_total", - Help: "Total number of scrape pool creation attempts.", - }, - ) - targetScrapePoolsFailed = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrape_pools_failed_total", - Help: "Total number of scrape pool creations that failed.", - }, - ) - targetScrapePoolReloads = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrape_pool_reloads_total", - Help: "Total number of scrape pool reloads.", - }, - ) - targetScrapePoolReloadsFailed = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrape_pool_reloads_failed_total", - Help: "Total number of failed scrape pool reloads.", - }, - ) - targetScrapePoolExceededTargetLimit = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrape_pool_exceeded_target_limit_total", - Help: "Total number of times scrape pools hit the target limit, during sync or config reload.", - }, - ) - targetScrapePoolTargetLimit = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_target_scrape_pool_target_limit", - Help: "Maximum number of targets allowed in this scrape pool.", - }, - []string{"scrape_job"}, - ) - targetScrapePoolTargetsAdded = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "prometheus_target_scrape_pool_targets", - Help: "Current number of targets in this scrape pool.", - }, - []string{"scrape_job"}, - ) - targetSyncIntervalLength = prometheus.NewSummaryVec( - prometheus.SummaryOpts{ - Name: "prometheus_target_sync_length_seconds", - Help: "Actual interval to sync the scrape pool.", - Objectives: map[float64]float64{0.01: 0.001, 0.05: 0.005, 0.5: 0.05, 0.90: 0.01, 0.99: 0.001}, - }, - []string{"scrape_job"}, - ) - targetScrapePoolSyncsCounter = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_target_scrape_pool_sync_total", - Help: "Total number of syncs that were executed on a scrape pool.", - }, - []string{"scrape_job"}, - ) - targetScrapeExceededBodySizeLimit = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrapes_exceeded_body_size_limit_total", - Help: "Total number of scrapes that hit the body size limit", - }, - ) - targetScrapeSampleLimit = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrapes_exceeded_sample_limit_total", - Help: "Total number of scrapes that hit the sample limit and were rejected.", - }, - ) - targetScrapeSampleDuplicate = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrapes_sample_duplicate_timestamp_total", - Help: "Total number of samples rejected due to duplicate timestamps but different values.", - }, - ) - targetScrapeSampleOutOfOrder = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrapes_sample_out_of_order_total", - Help: "Total number of samples rejected due to not being out of the expected order.", - }, - ) - targetScrapeSampleOutOfBounds = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrapes_sample_out_of_bounds_total", - Help: "Total number of samples rejected due to timestamp falling outside of the time bounds.", - }, - ) - targetScrapeCacheFlushForced = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrapes_cache_flush_forced_total", - Help: "How many times a scrape cache was flushed due to getting big while scrapes are failing.", - }, - ) - targetScrapeExemplarOutOfOrder = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrapes_exemplar_out_of_order_total", - Help: "Total number of exemplar rejected due to not being out of the expected order.", - }, - ) - targetScrapePoolExceededLabelLimits = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrape_pool_exceeded_label_limits_total", - Help: "Total number of times scrape pools hit the label limits, during sync or config reload.", - }, - ) - targetSyncFailed = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "prometheus_target_sync_failed_total", - Help: "Total number of target sync failures.", - }, - []string{"scrape_job"}, - ) - targetScrapeNativeHistogramBucketLimit = prometheus.NewCounter( - prometheus.CounterOpts{ - Name: "prometheus_target_scrapes_exceeded_native_histogram_bucket_limit_total", - Help: "Total number of scrapes that hit the native histogram bucket limit and were rejected.", - }, - ) -) - -func init() { - prometheus.MustRegister( - targetIntervalLength, - targetReloadIntervalLength, - targetScrapePools, - targetScrapePoolsFailed, - targetScrapePoolReloads, - targetScrapePoolReloadsFailed, - targetSyncIntervalLength, - targetScrapePoolSyncsCounter, - targetScrapeExceededBodySizeLimit, - targetScrapeSampleLimit, - targetScrapeSampleDuplicate, - targetScrapeSampleOutOfOrder, - targetScrapeSampleOutOfBounds, - targetScrapePoolExceededTargetLimit, - targetScrapePoolTargetLimit, - targetScrapePoolTargetsAdded, - targetScrapeCacheFlushForced, - targetMetadataCache, - targetScrapeExemplarOutOfOrder, - targetScrapePoolExceededLabelLimits, - targetSyncFailed, - targetScrapeNativeHistogramBucketLimit, - ) -} - // scrapePool manages scrapes for sets of targets. type scrapePool struct { appendable storage.Appendable @@ -251,7 +85,7 @@ type scrapePool struct { noDefaultPort bool - enableProtobufNegotiation bool + metrics *scrapeMetrics } type labelLimits struct { @@ -274,45 +108,42 @@ type scrapeLoopOptions struct { scrapeClassicHistograms bool mrc []*relabel.Config cache *scrapeCache + enableCompression bool } const maxAheadTime = 10 * time.Minute -// returning an empty label set is interpreted as "drop" +// returning an empty label set is interpreted as "drop". type labelsMutator func(labels.Labels) labels.Labels -func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, options *Options) (*scrapePool, error) { - targetScrapePools.Inc() +func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed uint64, logger log.Logger, buffers *pool.Pool, options *Options, metrics *scrapeMetrics) (*scrapePool, error) { if logger == nil { logger = log.NewNopLogger() } client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, options.HTTPClientOptions...) if err != nil { - targetScrapePoolsFailed.Inc() - return nil, errors.Wrap(err, "error creating HTTP client") + return nil, fmt.Errorf("error creating HTTP client: %w", err) } - buffers := pool.New(1e3, 100e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) - ctx, cancel := context.WithCancel(context.Background()) sp := &scrapePool{ - cancel: cancel, - appendable: app, - config: cfg, - client: client, - activeTargets: map[uint64]*Target{}, - loops: map[uint64]loop{}, - logger: logger, - httpOpts: options.HTTPClientOptions, - noDefaultPort: options.NoDefaultPort, - enableProtobufNegotiation: options.EnableProtobufNegotiation, + cancel: cancel, + appendable: app, + config: cfg, + client: client, + activeTargets: map[uint64]*Target{}, + loops: map[uint64]loop{}, + logger: logger, + metrics: metrics, + httpOpts: options.HTTPClientOptions, + noDefaultPort: options.NoDefaultPort, } sp.newLoop = func(opts scrapeLoopOptions) loop { // Update the targets retrieval function for metadata to a new scrape cache. cache := opts.cache if cache == nil { - cache = newScrapeCache() + cache = newScrapeCache(metrics) } opts.target.SetMetadataStore(cache) @@ -330,6 +161,7 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed offsetSeed, opts.honorTimestamps, opts.trackTimestampsStaleness, + opts.enableCompression, opts.sampleLimit, opts.bucketLimit, opts.labelLimits, @@ -340,9 +172,10 @@ func newScrapePool(cfg *config.ScrapeConfig, app storage.Appendable, offsetSeed options.EnableMetadataStorage, opts.target, options.PassMetadataInContext, + metrics, ) } - targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) + sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) return sp, nil } @@ -397,11 +230,11 @@ func (sp *scrapePool) stop() { sp.client.CloseIdleConnections() if sp.config != nil { - targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName) - targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName) - targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName) - targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName) - targetSyncFailed.DeleteLabelValues(sp.config.JobName) + sp.metrics.targetScrapePoolSyncsCounter.DeleteLabelValues(sp.config.JobName) + sp.metrics.targetScrapePoolTargetLimit.DeleteLabelValues(sp.config.JobName) + sp.metrics.targetScrapePoolTargetsAdded.DeleteLabelValues(sp.config.JobName) + sp.metrics.targetSyncIntervalLength.DeleteLabelValues(sp.config.JobName) + sp.metrics.targetSyncFailed.DeleteLabelValues(sp.config.JobName) } } @@ -411,13 +244,13 @@ func (sp *scrapePool) stop() { func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { sp.mtx.Lock() defer sp.mtx.Unlock() - targetScrapePoolReloads.Inc() + sp.metrics.targetScrapePoolReloads.Inc() start := time.Now() client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, cfg.JobName, sp.httpOpts...) if err != nil { - targetScrapePoolReloadsFailed.Inc() - return errors.Wrap(err, "error creating HTTP client") + sp.metrics.targetScrapePoolReloadsFailed.Inc() + return fmt.Errorf("error creating HTTP client: %w", err) } reuseCache := reusableCache(sp.config, cfg) @@ -425,7 +258,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { oldClient := sp.client sp.client = client - targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) + sp.metrics.targetScrapePoolTargetLimit.WithLabelValues(sp.config.JobName).Set(float64(sp.config.TargetLimit)) var ( wg sync.WaitGroup @@ -441,6 +274,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { } honorLabels = sp.config.HonorLabels honorTimestamps = sp.config.HonorTimestamps + enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs ) @@ -454,17 +288,20 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { oldLoop.disableEndOfRunStalenessMarkers() cache = oc } else { - cache = newScrapeCache() + cache = newScrapeCache(sp.metrics) } t := sp.activeTargets[fp] interval, timeout, err := t.intervalAndTimeout(interval, timeout) - acceptHeader := scrapeAcceptHeader - if sp.enableProtobufNegotiation { - acceptHeader = scrapeAcceptHeaderWithProtobuf - } var ( - s = &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader} + s = &targetScraper{ + Target: t, + client: sp.client, + timeout: timeout, + bodySizeLimit: bodySizeLimit, + acceptHeader: acceptHeader(cfg.ScrapeProtocols), + acceptEncodingHeader: acceptEncodingHeader(enableCompression), + } newLoop = sp.newLoop(scrapeLoopOptions{ target: t, scraper: s, @@ -473,6 +310,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { labelLimits: labelLimits, honorLabels: honorLabels, honorTimestamps: honorTimestamps, + enableCompression: enableCompression, trackTimestampsStaleness: trackTimestampsStaleness, mrc: mrc, cache: cache, @@ -500,7 +338,7 @@ func (sp *scrapePool) reload(cfg *config.ScrapeConfig) error { wg.Wait() oldClient.CloseIdleConnections() - targetReloadIntervalLength.WithLabelValues(interval.String()).Observe( + sp.metrics.targetReloadIntervalLength.WithLabelValues(interval.String()).Observe( time.Since(start).Seconds(), ) return nil @@ -524,7 +362,7 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { for _, err := range failures { level.Error(sp.logger).Log("msg", "Creating target failed", "err", err) } - targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) + sp.metrics.targetSyncFailed.WithLabelValues(sp.config.JobName).Add(float64(len(failures))) for _, t := range targets { // Replicate .Labels().IsEmpty() with a loop here to avoid generating garbage. nonEmpty := false @@ -543,10 +381,10 @@ func (sp *scrapePool) Sync(tgs []*targetgroup.Group) { sp.targetMtx.Unlock() sp.sync(all) - targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe( + sp.metrics.targetSyncIntervalLength.WithLabelValues(sp.config.JobName).Observe( time.Since(start).Seconds(), ) - targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc() + sp.metrics.targetScrapePoolSyncsCounter.WithLabelValues(sp.config.JobName).Inc() } // sync takes a list of potentially duplicated targets, deduplicates them, starts @@ -567,6 +405,7 @@ func (sp *scrapePool) sync(targets []*Target) { } honorLabels = sp.config.HonorLabels honorTimestamps = sp.config.HonorTimestamps + enableCompression = sp.config.EnableCompression trackTimestampsStaleness = sp.config.TrackTimestampsStaleness mrc = sp.config.MetricRelabelConfigs scrapeClassicHistograms = sp.config.ScrapeClassicHistograms @@ -582,11 +421,15 @@ func (sp *scrapePool) sync(targets []*Target) { // for every target. var err error interval, timeout, err = t.intervalAndTimeout(interval, timeout) - acceptHeader := scrapeAcceptHeader - if sp.enableProtobufNegotiation { - acceptHeader = scrapeAcceptHeaderWithProtobuf + s := &targetScraper{ + Target: t, + client: sp.client, + timeout: timeout, + bodySizeLimit: bodySizeLimit, + acceptHeader: acceptHeader(sp.config.ScrapeProtocols), + acceptEncodingHeader: acceptEncodingHeader(enableCompression), + metrics: sp.metrics, } - s := &targetScraper{Target: t, client: sp.client, timeout: timeout, bodySizeLimit: bodySizeLimit, acceptHeader: acceptHeader} l := sp.newLoop(scrapeLoopOptions{ target: t, scraper: s, @@ -595,6 +438,7 @@ func (sp *scrapePool) sync(targets []*Target) { labelLimits: labelLimits, honorLabels: honorLabels, honorTimestamps: honorTimestamps, + enableCompression: enableCompression, trackTimestampsStaleness: trackTimestampsStaleness, mrc: mrc, interval: interval, @@ -638,7 +482,7 @@ func (sp *scrapePool) sync(targets []*Target) { sp.targetMtx.Unlock() - targetScrapePoolTargetsAdded.WithLabelValues(sp.config.JobName).Set(float64(len(uniqueLoops))) + sp.metrics.targetScrapePoolTargetsAdded.WithLabelValues(sp.config.JobName).Set(float64(len(uniqueLoops))) forcedErr := sp.refreshTargetLimitErr() for _, l := range sp.loops { l.setForcedError(forcedErr) @@ -662,7 +506,7 @@ func (sp *scrapePool) refreshTargetLimitErr() error { return nil } if l := len(sp.activeTargets); l > int(sp.config.TargetLimit) { - targetScrapePoolExceededTargetLimit.Inc() + sp.metrics.targetScrapePoolExceededTargetLimit.Inc() return fmt.Errorf("target_limit exceeded (number of targets: %d, limit: %d)", l, sp.config.TargetLimit) } return nil @@ -808,16 +652,36 @@ type targetScraper struct { gzipr *gzip.Reader buf *bufio.Reader - bodySizeLimit int64 - acceptHeader string + bodySizeLimit int64 + acceptHeader string + acceptEncodingHeader string + + metrics *scrapeMetrics } var errBodySizeLimit = errors.New("body size limit exceeded") -const ( - scrapeAcceptHeader = `application/openmetrics-text;version=1.0.0,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1` - scrapeAcceptHeaderWithProtobuf = `application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited,application/openmetrics-text;version=1.0.0;q=0.8,application/openmetrics-text;version=0.0.1;q=0.75,text/plain;version=0.0.4;q=0.5,*/*;q=0.1` -) +// acceptHeader transforms preference from the options into specific header values as +// https://www.rfc-editor.org/rfc/rfc9110.html#name-accept defines. +// No validation is here, we expect scrape protocols to be validated already. +func acceptHeader(sps []config.ScrapeProtocol) string { + var vals []string + weight := len(config.ScrapeProtocolsHeaders) + 1 + for _, sp := range sps { + vals = append(vals, fmt.Sprintf("%s;q=0.%d", config.ScrapeProtocolsHeaders[sp], weight)) + weight-- + } + // Default match anything. + vals = append(vals, fmt.Sprintf("*/*;q=0.%d", weight)) + return strings.Join(vals, ",") +} + +func acceptEncodingHeader(enableCompression bool) string { + if enableCompression { + return "gzip" + } + return "identity" +} var UserAgent = fmt.Sprintf("Prometheus/%s", version.Version) @@ -828,7 +692,7 @@ func (s *targetScraper) scrape(ctx context.Context) (*http.Response, error) { return nil, err } req.Header.Add("Accept", s.acceptHeader) - req.Header.Add("Accept-Encoding", "gzip") + req.Header.Add("Accept-Encoding", s.acceptEncodingHeader) req.Header.Set("User-Agent", UserAgent) req.Header.Set("X-Prometheus-Scrape-Timeout-Seconds", strconv.FormatFloat(s.timeout.Seconds(), 'f', -1, 64)) @@ -845,7 +709,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w }() if resp.StatusCode != http.StatusOK { - return "", errors.Errorf("server returned HTTP status %s", resp.Status) + return "", fmt.Errorf("server returned HTTP status %s", resp.Status) } if s.bodySizeLimit <= 0 { @@ -857,7 +721,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w return "", err } if n >= s.bodySizeLimit { - targetScrapeExceededBodySizeLimit.Inc() + s.metrics.targetScrapeExceededBodySizeLimit.Inc() return "", errBodySizeLimit } return resp.Header.Get("Content-Type"), nil @@ -883,7 +747,7 @@ func (s *targetScraper) readResponse(ctx context.Context, resp *http.Response, w return "", err } if n >= s.bodySizeLimit { - targetScrapeExceededBodySizeLimit.Inc() + s.metrics.targetScrapeExceededBodySizeLimit.Inc() return "", errBodySizeLimit } return resp.Header.Get("Content-Type"), nil @@ -914,6 +778,7 @@ type scrapeLoop struct { offsetSeed uint64 honorTimestamps bool trackTimestampsStaleness bool + enableCompression bool forcedErr error forcedErrMtx sync.Mutex sampleLimit int @@ -937,6 +802,8 @@ type scrapeLoop struct { reportExtraMetrics bool appendMetadataToWAL bool + + metrics *scrapeMetrics } // scrapeCache tracks mappings of exposed metric strings to label sets and @@ -964,6 +831,8 @@ type scrapeCache struct { metaMtx sync.Mutex metadata map[string]*metaEntry + + metrics *scrapeMetrics } // metaEntry holds meta information about a metric. @@ -979,13 +848,14 @@ func (m *metaEntry) size() int { return len(m.Help) + len(m.Unit) + len(m.Type) } -func newScrapeCache() *scrapeCache { +func newScrapeCache(metrics *scrapeMetrics) *scrapeCache { return &scrapeCache{ series: map[string]*cacheEntry{}, droppedSeries: map[string]*uint64{}, seriesCur: map[uint64]labels.Labels{}, seriesPrev: map[uint64]labels.Labels{}, metadata: map[string]*metaEntry{}, + metrics: metrics, } } @@ -1004,7 +874,7 @@ func (c *scrapeCache) iterDone(flushCache bool) { // since the last scrape, and allow an additional 1000 in case // initial scrapes all fail. flushCache = true - targetScrapeCacheFlushForced.Inc() + c.metrics.targetScrapeCacheFlushForced.Inc() } if flushCache { @@ -1199,6 +1069,7 @@ func newScrapeLoop(ctx context.Context, offsetSeed uint64, honorTimestamps bool, trackTimestampsStaleness bool, + enableCompression bool, sampleLimit int, bucketLimit int, labelLimits *labelLimits, @@ -1209,6 +1080,7 @@ func newScrapeLoop(ctx context.Context, appendMetadataToWAL bool, target *Target, passMetadataInContext bool, + metrics *scrapeMetrics, ) *scrapeLoop { if l == nil { l = log.NewNopLogger() @@ -1217,7 +1089,7 @@ func newScrapeLoop(ctx context.Context, buffers = pool.New(1e3, 1e6, 3, func(sz int) interface{} { return make([]byte, 0, sz) }) } if cache == nil { - cache = newScrapeCache() + cache = newScrapeCache(metrics) } appenderCtx := ctx @@ -1245,6 +1117,7 @@ func newScrapeLoop(ctx context.Context, appenderCtx: appenderCtx, honorTimestamps: honorTimestamps, trackTimestampsStaleness: trackTimestampsStaleness, + enableCompression: enableCompression, sampleLimit: sampleLimit, bucketLimit: bucketLimit, labelLimits: labelLimits, @@ -1253,6 +1126,7 @@ func newScrapeLoop(ctx context.Context, scrapeClassicHistograms: scrapeClassicHistograms, reportExtraMetrics: reportExtraMetrics, appendMetadataToWAL: appendMetadataToWAL, + metrics: metrics, } sl.ctx, sl.cancel = context.WithCancel(ctx) @@ -1332,7 +1206,7 @@ func (sl *scrapeLoop) scrapeAndReport(last, appendTime time.Time, errc chan<- er // Only record after the first scrape. if !last.IsZero() { - targetIntervalLength.WithLabelValues(sl.interval.String()).Observe( + sl.metrics.targetIntervalLength.WithLabelValues(sl.interval.String()).Observe( time.Since(last).Seconds(), ) } @@ -1547,6 +1421,8 @@ func (sl *scrapeLoop) append(app storage.Appender, b []byte, contentType string, metadataChanged bool ) + exemplars := make([]exemplar.Exemplar, 1) + // updateMetadata updates the current iteration's metadata object and the // metadataChanged value if we have metadata in the scrape cache AND the // labelset is for a new series or the metadata for this series has just @@ -1673,7 +1549,7 @@ loop: // If any label limits is exceeded the scrape should fail. if err = verifyLabelLimits(lset, sl.labelLimits); err != nil { - targetScrapePoolExceededLabelLimits.Inc() + sl.metrics.targetScrapePoolExceededLabelLimits.Inc() break loop } @@ -1692,7 +1568,7 @@ loop: } sampleAdded, err = sl.checkAddError(ce, met, parsedTimestamp, err, &sampleLimitErr, &bucketLimitErr, &appErrs) if err != nil { - if err != storage.ErrNotFound { + if !errors.Is(err, storage.ErrNotFound) { level.Debug(sl.l).Log("msg", "Unexpected error", "series", string(met), "err", err) } break loop @@ -1712,18 +1588,46 @@ loop: // Increment added even if there's an error so we correctly report the // number of samples remaining after relabeling. added++ - + exemplars = exemplars[:0] // Reset and reuse the exemplar slice. for hasExemplar := p.Exemplar(&e); hasExemplar; hasExemplar = p.Exemplar(&e) { if !e.HasTs { + if isHistogram { + // We drop exemplars for native histograms if they don't have a timestamp. + // Missing timestamps are deliberately not supported as we want to start + // enforcing timestamps for exemplars as otherwise proper deduplication + // is inefficient and purely based on heuristics: we cannot distinguish + // between repeated exemplars and new instances with the same values. + // This is done silently without logs as it is not an error but out of spec. + // This does not affect classic histograms so that behaviour is unchanged. + e = exemplar.Exemplar{} // Reset for next time round loop. + continue + } e.Ts = t } + exemplars = append(exemplars, e) + e = exemplar.Exemplar{} // Reset for next time round loop. + } + // Sort so that checking for duplicates / out of order is more efficient during validation. + slices.SortFunc(exemplars, exemplar.Compare) + outOfOrderExemplars := 0 + for _, e := range exemplars { _, exemplarErr := app.AppendExemplar(ref, lset, e) - exemplarErr = sl.checkAddExemplarError(exemplarErr, e, &appErrs) - if exemplarErr != nil { + switch { + case exemplarErr == nil: + // Do nothing. + case errors.Is(exemplarErr, storage.ErrOutOfOrderExemplar): + outOfOrderExemplars++ + default: // Since exemplar storage is still experimental, we don't fail the scrape on ingestion errors. level.Debug(sl.l).Log("msg", "Error while adding exemplar in AddExemplar", "exemplar", fmt.Sprintf("%+v", e), "err", exemplarErr) } - e = exemplar.Exemplar{} // reset for next time round loop + } + if outOfOrderExemplars > 0 && outOfOrderExemplars == len(exemplars) { + // Only report out of order exemplars if all are out of order, otherwise this was a partial update + // to some existing set of exemplars. + appErrs.numExemplarOutOfOrder += outOfOrderExemplars + level.Debug(sl.l).Log("msg", "Out of order exemplars", "count", outOfOrderExemplars, "latest", fmt.Sprintf("%+v", exemplars[len(exemplars)-1])) + sl.metrics.targetScrapeExemplarOutOfOrder.Add(float64(outOfOrderExemplars)) } if sl.appendMetadataToWAL && metadataChanged { @@ -1738,14 +1642,14 @@ loop: err = sampleLimitErr } // We only want to increment this once per scrape, so this is Inc'd outside the loop. - targetScrapeSampleLimit.Inc() + sl.metrics.targetScrapeSampleLimit.Inc() } if bucketLimitErr != nil { if err == nil { err = bucketLimitErr // If sample limit is hit, that error takes precedence. } // We only want to increment this once per scrape, so this is Inc'd outside the loop. - targetScrapeNativeHistogramBucketLimit.Inc() + sl.metrics.targetScrapeNativeHistogramBucketLimit.Inc() } if appErrs.numOutOfOrder > 0 { level.Warn(sl.l).Log("msg", "Error on ingesting out-of-order samples", "num_dropped", appErrs.numOutOfOrder) @@ -1763,8 +1667,8 @@ loop: sl.cache.forEachStale(func(lset labels.Labels) bool { // Series no longer exposed, mark it stale. _, err = app.Append(0, lset, defTime, math.Float64frombits(value.StaleNaN)) - switch errors.Cause(err) { - case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: + switch { + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Do not count these in logging, as this is expected if a target // goes away and comes back again with a new scrape loop. err = nil @@ -1778,35 +1682,35 @@ loop: // Adds samples to the appender, checking the error, and then returns the # of samples added, // whether the caller should continue to process more samples, and any sample or bucket limit errors. func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err error, sampleLimitErr, bucketLimitErr *error, appErrs *appendErrors) (bool, error) { - switch errors.Cause(err) { - case nil: + switch { + case err == nil: if (tp == nil || sl.trackTimestampsStaleness) && ce != nil { sl.cache.trackStaleness(ce.hash, ce.lset) } return true, nil - case storage.ErrNotFound: + case errors.Is(err, storage.ErrNotFound): return false, storage.ErrNotFound - case storage.ErrOutOfOrderSample: + case errors.Is(err, storage.ErrOutOfOrderSample): appErrs.numOutOfOrder++ level.Debug(sl.l).Log("msg", "Out of order sample", "series", string(met)) - targetScrapeSampleOutOfOrder.Inc() + sl.metrics.targetScrapeSampleOutOfOrder.Inc() return false, nil - case storage.ErrDuplicateSampleForTimestamp: + case errors.Is(err, storage.ErrDuplicateSampleForTimestamp): appErrs.numDuplicates++ level.Debug(sl.l).Log("msg", "Duplicate sample for timestamp", "series", string(met)) - targetScrapeSampleDuplicate.Inc() + sl.metrics.targetScrapeSampleDuplicate.Inc() return false, nil - case storage.ErrOutOfBounds: + case errors.Is(err, storage.ErrOutOfBounds): appErrs.numOutOfBounds++ level.Debug(sl.l).Log("msg", "Out of bounds metric", "series", string(met)) - targetScrapeSampleOutOfBounds.Inc() + sl.metrics.targetScrapeSampleOutOfBounds.Inc() return false, nil - case errSampleLimit: + case errors.Is(err, errSampleLimit): // Keep on parsing output if we hit the limit, so we report the correct // total number of samples scraped. *sampleLimitErr = err return false, nil - case errBucketLimit: + case errors.Is(err, errBucketLimit): // Keep on parsing output if we hit the limit, so we report the correct // total number of samples scraped. *bucketLimitErr = err @@ -1816,20 +1720,6 @@ func (sl *scrapeLoop) checkAddError(ce *cacheEntry, met []byte, tp *int64, err e } } -func (sl *scrapeLoop) checkAddExemplarError(err error, e exemplar.Exemplar, appErrs *appendErrors) error { - switch errors.Cause(err) { - case storage.ErrNotFound: - return storage.ErrNotFound - case storage.ErrOutOfOrderExemplar: - appErrs.numExemplarOutOfOrder++ - level.Debug(sl.l).Log("msg", "Out of order exemplar", "exemplar", fmt.Sprintf("%+v", e)) - targetScrapeExemplarOutOfOrder.Inc() - return nil - default: - return err - } -} - // The constants are suffixed with the invalid \xff unicode rune to avoid collisions // with scraped metrics in the cache. var ( @@ -1932,13 +1822,13 @@ func (sl *scrapeLoop) addReportSample(app storage.Appender, s []byte, t int64, v } ref, err := app.Append(ref, lset, t, v) - switch errors.Cause(err) { - case nil: + switch { + case err == nil: if !ok { sl.cache.addRef(s, ref, lset, lset.Hash()) } return nil - case storage.ErrOutOfOrderSample, storage.ErrDuplicateSampleForTimestamp: + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Do not log here, as this is expected if a target goes away and comes back // again with a new scrape loop. return nil diff --git a/vendor/github.com/prometheus/prometheus/scrape/target.go b/vendor/github.com/prometheus/prometheus/scrape/target.go index 8b745a9c4..62f662693 100644 --- a/vendor/github.com/prometheus/prometheus/scrape/target.go +++ b/vendor/github.com/prometheus/prometheus/scrape/target.go @@ -14,6 +14,7 @@ package scrape import ( + "errors" "fmt" "hash/fnv" "net" @@ -22,7 +23,6 @@ import ( "sync" "time" - "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/config" @@ -145,9 +145,7 @@ func (t *Target) SetMetadataStore(s MetricMetadataStore) { func (t *Target) hash() uint64 { h := fnv.New64a() - //nolint: errcheck h.Write([]byte(fmt.Sprintf("%016d", t.labels.Hash()))) - //nolint: errcheck h.Write([]byte(t.URL().String())) return h.Sum64() @@ -198,7 +196,7 @@ func (t *Target) DiscoveredLabels() labels.Labels { return t.discoveredLabels.Copy() } -// SetDiscoveredLabels sets new DiscoveredLabels +// SetDiscoveredLabels sets new DiscoveredLabels. func (t *Target) SetDiscoveredLabels(l labels.Labels) { t.mtx.Lock() defer t.mtx.Unlock() @@ -291,12 +289,12 @@ func (t *Target) intervalAndTimeout(defaultInterval, defaultDuration time.Durati intervalLabel := t.labels.Get(model.ScrapeIntervalLabel) interval, err := model.ParseDuration(intervalLabel) if err != nil { - return defaultInterval, defaultDuration, errors.Errorf("Error parsing interval label %q: %v", intervalLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("Error parsing interval label %q: %w", intervalLabel, err) } timeoutLabel := t.labels.Get(model.ScrapeTimeoutLabel) timeout, err := model.ParseDuration(timeoutLabel) if err != nil { - return defaultInterval, defaultDuration, errors.Errorf("Error parsing timeout label %q: %v", timeoutLabel, err) + return defaultInterval, defaultDuration, fmt.Errorf("Error parsing timeout label %q: %w", timeoutLabel, err) } return time.Duration(interval), time.Duration(timeout), nil @@ -368,13 +366,19 @@ type bucketLimitAppender struct { func (app *bucketLimitAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels, t int64, h *histogram.Histogram, fh *histogram.FloatHistogram) (storage.SeriesRef, error) { if h != nil { - if len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { - return 0, errBucketLimit + for len(h.PositiveBuckets)+len(h.NegativeBuckets) > app.limit { + if h.Schema == -4 { + return 0, errBucketLimit + } + h = h.ReduceResolution(h.Schema - 1) } } if fh != nil { - if len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { - return 0, errBucketLimit + for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > app.limit { + if fh.Schema == -4 { + return 0, errBucketLimit + } + fh = fh.ReduceResolution(fh.Schema - 1) } } ref, err := app.Appender.AppendHistogram(ref, lset, t, h, fh) @@ -446,7 +450,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort case "https": addr += ":443" default: - return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("invalid scheme: %q", cfg.Scheme) + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("invalid scheme: %q", cfg.Scheme) } lb.Set(model.AddressLabel, addr) } @@ -473,7 +477,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort interval := lb.Get(model.ScrapeIntervalLabel) intervalDuration, err := model.ParseDuration(interval) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("error parsing scrape interval: %v", err) + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape interval: %w", err) } if time.Duration(intervalDuration) == 0 { return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape interval cannot be 0") @@ -482,14 +486,14 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort timeout := lb.Get(model.ScrapeTimeoutLabel) timeoutDuration, err := model.ParseDuration(timeout) if err != nil { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("error parsing scrape timeout: %v", err) + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("error parsing scrape timeout: %w", err) } if time.Duration(timeoutDuration) == 0 { return labels.EmptyLabels(), labels.EmptyLabels(), errors.New("scrape timeout cannot be 0") } if timeoutDuration > intervalDuration { - return labels.EmptyLabels(), labels.EmptyLabels(), errors.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) + return labels.EmptyLabels(), labels.EmptyLabels(), fmt.Errorf("scrape timeout cannot be greater than scrape interval (%q > %q)", timeout, interval) } // Meta labels are deleted after relabelling. Other internal labels propagate to @@ -509,7 +513,7 @@ func PopulateLabels(lb *labels.Builder, cfg *config.ScrapeConfig, noDefaultPort err = res.Validate(func(l labels.Label) error { // Check label values are valid, drop the target if not. if !model.LabelValue(l.Value).IsValid() { - return errors.Errorf("invalid label value for %q: %q", l.Name, l.Value) + return fmt.Errorf("invalid label value for %q: %q", l.Name, l.Value) } return nil }) @@ -538,7 +542,7 @@ func TargetsFromGroup(tg *targetgroup.Group, cfg *config.ScrapeConfig, noDefault lset, origLabels, err := PopulateLabels(lb, cfg, noDefaultPort) if err != nil { - failures = append(failures, errors.Wrapf(err, "instance %d in group %s", i, tg)) + failures = append(failures, fmt.Errorf("instance %d in group %s: %w", i, tg, err)) } if !lset.IsEmpty() || !origLabels.IsEmpty() { targets = append(targets, NewTarget(lset, origLabels, cfg.Params)) diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index 79ca658eb..2b1b6a63e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -37,16 +37,12 @@ var ( // ErrTooOldSample is when out of order support is enabled but the sample is outside the time window allowed. ErrTooOldSample = errors.New("too old sample") // ErrDuplicateSampleForTimestamp is when the sample has same timestamp but different value. - ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") - ErrOutOfOrderExemplar = errors.New("out of order exemplar") - ErrDuplicateExemplar = errors.New("duplicate exemplar") - ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) - ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") - ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") - ErrHistogramCountNotBigEnough = errors.New("histogram's observation count should be at least the number of observations found in the buckets") - ErrHistogramNegativeBucketCount = errors.New("histogram has a bucket whose observation count is negative") - ErrHistogramSpanNegativeOffset = errors.New("histogram has a span whose offset is negative") - ErrHistogramSpansBucketsMismatch = errors.New("histogram spans specify different number of buckets than provided") + ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") + ErrOutOfOrderExemplar = errors.New("out of order exemplar") + ErrDuplicateExemplar = errors.New("duplicate exemplar") + ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) + ErrExemplarsDisabled = fmt.Errorf("exemplar storage is disabled or max exemplars is less than or equal to 0") + ErrNativeHistogramsDisabled = fmt.Errorf("native histograms are disabled") ) // SeriesRef is a generic series reference. In prometheus it is either a @@ -327,7 +323,7 @@ func (s testSeriesSet) At() Series { return s.series } func (s testSeriesSet) Err() error { return nil } func (s testSeriesSet) Warnings() annotations.Annotations { return nil } -// TestSeriesSet returns a mock series set +// TestSeriesSet returns a mock series set. func TestSeriesSet(series Series) SeriesSet { return testSeriesSet{series: series} } diff --git a/vendor/github.com/prometheus/prometheus/storage/merge.go b/vendor/github.com/prometheus/prometheus/storage/merge.go index 1e29374e5..b4ebb440f 100644 --- a/vendor/github.com/prometheus/prometheus/storage/merge.go +++ b/vendor/github.com/prometheus/prometheus/storage/merge.go @@ -473,10 +473,10 @@ func ChainSampleIteratorFromSeries(it chunkenc.Iterator, series []Series) chunke return csi } -func ChainSampleIteratorFromMetas(it chunkenc.Iterator, chunks []chunks.Meta) chunkenc.Iterator { - csi := getChainSampleIterator(it, len(chunks)) - for i, c := range chunks { - csi.iterators[i] = c.Chunk.Iterator(csi.iterators[i]) +func ChainSampleIteratorFromIterables(it chunkenc.Iterator, iterables []chunkenc.Iterable) chunkenc.Iterator { + csi := getChainSampleIterator(it, len(iterables)) + for i, c := range iterables { + csi.iterators[i] = c.Iterator(csi.iterators[i]) } return csi } @@ -497,9 +497,14 @@ func (c *chainSampleIterator) Seek(t int64) chunkenc.ValueType { c.consecutive = false c.h = samplesIteratorHeap{} for _, iter := range c.iterators { - if iter.Seek(t) != chunkenc.ValNone { - heap.Push(&c.h, iter) + if iter.Seek(t) == chunkenc.ValNone { + if iter.Err() != nil { + // If any iterator is reporting an error, abort. + return chunkenc.ValNone + } + continue } + heap.Push(&c.h, iter) } if len(c.h) > 0 { c.curr = heap.Pop(&c.h).(chunkenc.Iterator) @@ -571,7 +576,13 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType { // So, we don't call Next() on it here. c.curr = c.iterators[0] for _, iter := range c.iterators[1:] { - if iter.Next() != chunkenc.ValNone { + if iter.Next() == chunkenc.ValNone { + if iter.Err() != nil { + // If any iterator is reporting an error, abort. + // If c.iterators[0] is reporting an error, we'll handle that below. + return chunkenc.ValNone + } + } else { heap.Push(&c.h, iter) } } @@ -583,7 +594,19 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType { for { currValueType = c.curr.Next() - if currValueType != chunkenc.ValNone { + + if currValueType == chunkenc.ValNone { + if c.curr.Err() != nil { + // Abort if we've hit an error. + return chunkenc.ValNone + } + + if len(c.h) == 0 { + // No iterator left to iterate. + c.curr = nil + return chunkenc.ValNone + } + } else { currT = c.curr.AtT() if currT == c.lastT { // Ignoring sample for the same timestamp. @@ -603,10 +626,6 @@ func (c *chainSampleIterator) Next() chunkenc.ValueType { } // Current iterator does not hold the smallest timestamp. heap.Push(&c.h, c.curr) - } else if len(c.h) == 0 { - // No iterator left to iterate. - c.curr = nil - return chunkenc.ValNone } c.curr = heap.Pop(&c.h).(chunkenc.Iterator) @@ -841,6 +860,9 @@ func (c *concatenatingChunkIterator) Next() bool { c.curr = c.iterators[c.idx].At() return true } + if c.iterators[c.idx].Err() != nil { + return false + } c.idx++ return c.Next() } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go b/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go index cb4587b02..20d48d008 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/azuread/azuread.go @@ -43,7 +43,7 @@ const ( IngestionPublicAudience = "https://monitor.azure.com//.default" ) -// ManagedIdentityConfig is used to store managed identity config values +// ManagedIdentityConfig is used to store managed identity config values. type ManagedIdentityConfig struct { // ClientID is the clientId of the managed identity that is being used to authenticate. ClientID string `yaml:"client_id,omitempty"` @@ -62,7 +62,7 @@ type OAuthConfig struct { } // AzureADConfig is used to store the config values. -type AzureADConfig struct { // nolint:revive +type AzureADConfig struct { //nolint:revive // exported. // ManagedIdentity is the managed identity that is being used to authenticate. ManagedIdentity *ManagedIdentityConfig `yaml:"managed_identity,omitempty"` @@ -235,7 +235,7 @@ func newManagedIdentityTokenCredential(clientOpts *azcore.ClientOptions, managed return azidentity.NewManagedIdentityCredential(opts) } -// newOAuthTokenCredential returns new OAuth token credential +// newOAuthTokenCredential returns new OAuth token credential. func newOAuthTokenCredential(clientOpts *azcore.ClientOptions, oAuthConfig *OAuthConfig) (azcore.TokenCredential, error) { opts := &azidentity.ClientSecretCredentialOptions{ClientOptions: *clientOpts} return azidentity.NewClientSecretCredential(oAuthConfig.TenantID, oAuthConfig.ClientID, oAuthConfig.ClientSecret, opts) @@ -326,7 +326,7 @@ func getAudience(cloud string) (string, error) { } } -// getCloudConfiguration returns the cloud Configuration which contains AAD endpoint for different clouds +// getCloudConfiguration returns the cloud Configuration which contains AAD endpoint for different clouds. func getCloudConfiguration(c string) (cloud.Configuration, error) { switch strings.ToLower(c) { case strings.ToLower(AzureChina): diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go index 4c190f2a4..67035cd8e 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/codec.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/codec.go @@ -475,7 +475,7 @@ func (c *concreteSeriesIterator) At() (t int64, v float64) { return s.Timestamp, s.Value } -// AtHistogram implements chunkenc.Iterator +// AtHistogram implements chunkenc.Iterator. func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { if c.curValType != chunkenc.ValHistogram { panic("iterator is not on an integer histogram sample") @@ -484,7 +484,7 @@ func (c *concreteSeriesIterator) AtHistogram() (int64, *histogram.Histogram) { return h.Timestamp, HistogramProtoToHistogram(h) } -// AtFloatHistogram implements chunkenc.Iterator +// AtFloatHistogram implements chunkenc.Iterator. func (c *concreteSeriesIterator) AtFloatHistogram() (int64, *histogram.FloatHistogram) { switch c.curValType { case chunkenc.ValHistogram: @@ -547,7 +547,7 @@ func (c *concreteSeriesIterator) Err() error { } // validateLabelsAndMetricName validates the label names/values and metric names returned from remote read, -// also making sure that there are no labels with duplicate names +// also making sure that there are no labels with duplicate names. func validateLabelsAndMetricName(ls []prompb.Label) error { for i, l := range ls { if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) { @@ -752,7 +752,7 @@ func spansToSpansProto(s []histogram.Span) []prompb.BucketSpan { return spans } -// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric +// LabelProtosToMetric unpack a []*prompb.Label to a model.Metric. func LabelProtosToMetric(labelPairs []*prompb.Label) model.Metric { metric := make(model.Metric, len(labelPairs)) for _, l := range labelPairs { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go index cfcb5652e..b44ba869f 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_label.go @@ -1,11 +1,20 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package normalize +package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" import ( "strings" "unicode" + + "go.opentelemetry.io/collector/featuregate" +) + +var dropSanitizationGate = featuregate.GlobalRegistry().MustRegister( + "pkg.translator.prometheus.PermissiveLabelSanitization", + featuregate.StageAlpha, + featuregate.WithRegisterDescription("Controls whether to change labels starting with '_' to 'key_'."), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"), ) // Normalizes the specified label to follow Prometheus label names standard @@ -27,6 +36,8 @@ func NormalizeLabel(label string) string { // If label starts with a number, prepend with "key_" if unicode.IsDigit(rune(label[0])) { label = "key_" + label + } else if strings.HasPrefix(label, "_") && !strings.HasPrefix(label, "__") && !dropSanitizationGate.IsEnabled() { + label = "key" + label } return label diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go index 0b62a28f2..72fc04cea 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/normalize_name.go @@ -1,12 +1,13 @@ // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 -package normalize +package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" import ( "strings" "unicode" + "go.opentelemetry.io/collector/featuregate" "go.opentelemetry.io/collector/pdata/pmetric" ) @@ -16,6 +17,7 @@ import ( // Prometheus best practices for units: https://prometheus.io/docs/practices/naming/#base-units // OpenMetrics specification for units: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#units-and-base-units var unitMap = map[string]string{ + // Time "d": "days", "h": "hours", @@ -35,11 +37,6 @@ var unitMap = map[string]string{ "MBy": "megabytes", "GBy": "gigabytes", "TBy": "terabytes", - "B": "bytes", - "KB": "kilobytes", - "MB": "megabytes", - "GB": "gigabytes", - "TB": "terabytes", // SI "m": "meters", @@ -54,7 +51,6 @@ var unitMap = map[string]string{ "Hz": "hertz", "1": "", "%": "percent", - "$": "dollars", } // The map that translates the "per" unit @@ -69,7 +65,14 @@ var perUnitMap = map[string]string{ "y": "year", } -// Build a Prometheus-compliant metric name for the specified metric +var normalizeNameGate = featuregate.GlobalRegistry().MustRegister( + "pkg.translator.prometheus.NormalizeName", + featuregate.StageBeta, + featuregate.WithRegisterDescription("Controls whether metrics names are automatically normalized to follow Prometheus naming convention"), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector-contrib/issues/8950"), +) + +// BuildCompliantName builds a Prometheus-compliant metric name for the specified metric // // Metric name is prefixed with specified namespace and underscore (if any). // Namespace is not cleaned up. Make sure specified namespace follows Prometheus @@ -77,7 +80,33 @@ var perUnitMap = map[string]string{ // // See rules at https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels // and https://prometheus.io/docs/practices/naming/#metric-and-label-naming -func BuildPromCompliantName(metric pmetric.Metric, namespace string) string { +func BuildCompliantName(metric pmetric.Metric, namespace string, addMetricSuffixes bool) string { + var metricName string + + // Full normalization following standard Prometheus naming conventions + if addMetricSuffixes && normalizeNameGate.IsEnabled() { + return normalizeName(metric, namespace) + } + + // Simple case (no full normalization, no units, etc.), we simply trim out forbidden chars + metricName = RemovePromForbiddenRunes(metric.Name()) + + // Namespace? + if namespace != "" { + return namespace + "_" + metricName + } + + // Metric name starts with a digit? Prefix it with an underscore + if metricName != "" && unicode.IsDigit(rune(metricName[0])) { + metricName = "_" + metricName + } + + return metricName +} + +// Build a normalized name for the specified metric +func normalizeName(metric pmetric.Metric, namespace string) string { + // Split metric name in "tokens" (remove all non-alphanumeric) nameTokens := strings.FieldsFunc( metric.Name(), diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/unit_to_ucum.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/unit_to_ucum.go new file mode 100644 index 000000000..b2f2c4f3a --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheus/unit_to_ucum.go @@ -0,0 +1,90 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package prometheus // import "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" + +import "strings" + +var wordToUCUM = map[string]string{ + + // Time + "days": "d", + "hours": "h", + "minutes": "min", + "seconds": "s", + "milliseconds": "ms", + "microseconds": "us", + "nanoseconds": "ns", + + // Bytes + "bytes": "By", + "kibibytes": "KiBy", + "mebibytes": "MiBy", + "gibibytes": "GiBy", + "tibibytes": "TiBy", + "kilobytes": "KBy", + "megabytes": "MBy", + "gigabytes": "GBy", + "terabytes": "TBy", + + // SI + "meters": "m", + "volts": "V", + "amperes": "A", + "joules": "J", + "watts": "W", + "grams": "g", + + // Misc + "celsius": "Cel", + "hertz": "Hz", + "ratio": "1", + "percent": "%", +} + +// The map that translates the "per" unit +// Example: per_second (singular) => /s +var perWordToUCUM = map[string]string{ + "second": "s", + "minute": "m", + "hour": "h", + "day": "d", + "week": "w", + "month": "mo", + "year": "y", +} + +// UnitWordToUCUM converts english unit words to UCUM units: +// https://ucum.org/ucum#section-Alphabetic-Index-By-Symbol +// It also handles rates, such as meters_per_second, by translating the first +// word to UCUM, and the "per" word to UCUM. It joins them with a "/" between. +func UnitWordToUCUM(unit string) string { + unitTokens := strings.SplitN(unit, "_per_", 2) + if len(unitTokens) == 0 { + return "" + } + ucumUnit := wordToUCUMOrDefault(unitTokens[0]) + if len(unitTokens) > 1 && unitTokens[1] != "" { + ucumUnit += "/" + perWordToUCUMOrDefault(unitTokens[1]) + } + return ucumUnit +} + +// wordToUCUMOrDefault retrieves the Prometheus "basic" unit corresponding to +// the specified "basic" unit. Returns the specified unit if not found in +// wordToUCUM. +func wordToUCUMOrDefault(unit string) string { + if promUnit, ok := wordToUCUM[unit]; ok { + return promUnit + } + return unit +} + +// perWordToUCUMOrDefault retrieve the Prometheus "per" unit corresponding to +// the specified "per" unit. Returns the specified unit if not found in perWordToUCUM. +func perWordToUCUMOrDefault(perUnit string) string { + if promPerUnit, ok := perWordToUCUM[perUnit]; ok { + return promPerUnit + } + return perUnit +} diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go index 6080686e7..49ad5672b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/helper.go @@ -71,8 +71,8 @@ func (a ByLabelName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } // creates a new TimeSeries in the map if not found and returns the time series signature. // tsMap will be unmodified if either labels or sample is nil, but can still be modified if the exemplar is nil. func addSample(tsMap map[string]*prompb.TimeSeries, sample *prompb.Sample, labels []prompb.Label, - datatype string, -) string { + datatype string) string { + if sample == nil || labels == nil || tsMap == nil { return "" } @@ -132,7 +132,14 @@ func addExemplar(tsMap map[string]*prompb.TimeSeries, bucketBounds []bucketBound // the label slice should not contain duplicate label names; this method sorts the slice by label name before creating // the signature. func timeSeriesSignature(datatype string, labels *[]prompb.Label) string { + length := len(datatype) + + for _, lb := range *labels { + length += 2 + len(lb.GetName()) + len(lb.GetValue()) + } + b := strings.Builder{} + b.Grow(length) b.WriteString(datatype) sort.Sort(ByLabelName(*labels)) @@ -151,8 +158,22 @@ func timeSeriesSignature(datatype string, labels *[]prompb.Label) string { // Unpaired string value is ignored. String pairs overwrites OTLP labels if collision happens, and the overwrite is // logged. Resultant label names are sanitized. func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externalLabels map[string]string, extras ...string) []prompb.Label { + serviceName, haveServiceName := resource.Attributes().Get(conventions.AttributeServiceName) + instance, haveInstanceID := resource.Attributes().Get(conventions.AttributeServiceInstanceID) + + // Calculate the maximum possible number of labels we could return so we can preallocate l + maxLabelCount := attributes.Len() + len(externalLabels) + len(extras)/2 + + if haveServiceName { + maxLabelCount++ + } + + if haveInstanceID { + maxLabelCount++ + } + // map ensures no duplicate label name - l := map[string]prompb.Label{} + l := make(map[string]string, maxLabelCount) // Ensure attributes are sorted by key for consistent merging of keys which // collide when sanitized. @@ -164,35 +185,25 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa sort.Stable(ByLabelName(labels)) for _, label := range labels { - finalKey := prometheustranslator.NormalizeLabel(label.Name) + var finalKey = prometheustranslator.NormalizeLabel(label.Name) if existingLabel, alreadyExists := l[finalKey]; alreadyExists { - existingLabel.Value = existingLabel.Value + ";" + label.Value - l[finalKey] = existingLabel + l[finalKey] = existingLabel + ";" + label.Value } else { - l[finalKey] = prompb.Label{ - Name: finalKey, - Value: label.Value, - } + l[finalKey] = label.Value } } // Map service.name + service.namespace to job - if serviceName, ok := resource.Attributes().Get(conventions.AttributeServiceName); ok { + if haveServiceName { val := serviceName.AsString() if serviceNamespace, ok := resource.Attributes().Get(conventions.AttributeServiceNamespace); ok { val = fmt.Sprintf("%s/%s", serviceNamespace.AsString(), val) } - l[model.JobLabel] = prompb.Label{ - Name: model.JobLabel, - Value: val, - } + l[model.JobLabel] = val } // Map service.instance.id to instance - if instance, ok := resource.Attributes().Get(conventions.AttributeServiceInstanceID); ok { - l[model.InstanceLabel] = prompb.Label{ - Name: model.InstanceLabel, - Value: instance.AsString(), - } + if haveInstanceID { + l[model.InstanceLabel] = instance.AsString() } for key, value := range externalLabels { // External labels have already been sanitized @@ -200,10 +211,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa // Skip external labels if they are overridden by metric attributes continue } - l[key] = prompb.Label{ - Name: key, - Value: value, - } + l[key] = value } for i := 0; i < len(extras); i += 2 { @@ -219,15 +227,12 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa if !(len(name) > 4 && name[:2] == "__" && name[len(name)-2:] == "__") { name = prometheustranslator.NormalizeLabel(name) } - l[name] = prompb.Label{ - Name: name, - Value: extras[i+1], - } + l[name] = extras[i+1] } s := make([]prompb.Label, 0, len(l)) - for _, lb := range l { - s = append(s, lb) + for k, v := range l { + s = append(s, prompb.Label{Name: k, Value: v}) } return s @@ -236,6 +241,7 @@ func createAttributes(resource pcommon.Resource, attributes pcommon.Map, externa // isValidAggregationTemporality checks whether an OTel metric has a valid // aggregation temporality for conversion to a Prometheus metric. func isValidAggregationTemporality(metric pmetric.Metric) bool { + //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge, pmetric.MetricTypeSummary: return true @@ -254,7 +260,22 @@ func isValidAggregationTemporality(metric pmetric.Metric) bool { func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, tsMap map[string]*prompb.TimeSeries) { timestamp := convertTimeStamp(pt.Timestamp()) // sum, count, and buckets of the histogram should append suffix to baseName - baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + baseName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) + baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels) + + createLabels := func(nameSuffix string, extras ...string) []prompb.Label { + extraLabelCount := len(extras) / 2 + labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name + copy(labels, baseLabels) + + for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ { + labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) + } + + labels = append(labels, prompb.Label{Name: nameStr, Value: baseName + nameSuffix}) + + return labels + } // If the sum is unset, it indicates the _sum metric point should be // omitted @@ -268,7 +289,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon sum.Value = math.Float64frombits(value.StaleNaN) } - sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr) + sumlabels := createLabels(sumStr) addSample(tsMap, sum, sumlabels, metric.Type().String()) } @@ -282,7 +303,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon count.Value = math.Float64frombits(value.StaleNaN) } - countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr) + countlabels := createLabels(countStr) addSample(tsMap, count, countlabels, metric.Type().String()) // cumulative count for conversion to cumulative histogram @@ -304,7 +325,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon bucket.Value = math.Float64frombits(value.StaleNaN) } boundStr := strconv.FormatFloat(bound, 'f', -1, 64) - labels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, boundStr) + labels := createLabels(bucketStr, leStr, boundStr) sig := addSample(tsMap, bucket, labels, metric.Type().String()) bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: bound}) @@ -318,7 +339,7 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon } else { infBucket.Value = float64(pt.Count()) } - infLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+bucketStr, leStr, pInfStr) + infLabels := createLabels(bucketStr, leStr, pInfStr) sig := addSample(tsMap, infBucket, infLabels, metric.Type().String()) bucketBounds = append(bucketBounds, bucketBoundsData{sig: sig, bound: math.Inf(1)}) @@ -327,14 +348,8 @@ func addSingleHistogramDataPoint(pt pmetric.HistogramDataPoint, resource pcommon // add _created time series if needed startTimestamp := pt.StartTimestamp() if settings.ExportCreatedMetric && startTimestamp != 0 { - createdLabels := createAttributes( - resource, - pt.Attributes(), - settings.ExternalLabels, - nameStr, - baseName+createdSuffix, - ) - addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, metric.Type().String()) + labels := createLabels(createdSuffix) + addCreatedTimeSeriesIfNeeded(tsMap, labels, startTimestamp, metric.Type().String()) } } @@ -402,6 +417,7 @@ func getPromExemplars[T exemplarType](pt T) []prompb.Exemplar { func mostRecentTimestampInMetric(metric pmetric.Metric) pcommon.Timestamp { var ts pcommon.Timestamp // handle individual metric based on type + //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() @@ -441,11 +457,26 @@ func maxTimestamp(a, b pcommon.Timestamp) pcommon.Timestamp { // addSingleSummaryDataPoint converts pt to len(QuantileValues) + 2 samples. func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Resource, metric pmetric.Metric, settings Settings, - tsMap map[string]*prompb.TimeSeries, -) { + tsMap map[string]*prompb.TimeSeries) { timestamp := convertTimeStamp(pt.Timestamp()) // sum and count of the summary should append suffix to baseName - baseName := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + baseName := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) + baseLabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels) + + createLabels := func(name string, extras ...string) []prompb.Label { + extraLabelCount := len(extras) / 2 + labels := make([]prompb.Label, len(baseLabels), len(baseLabels)+extraLabelCount+1) // +1 for name + copy(labels, baseLabels) + + for extrasIdx := 0; extrasIdx < extraLabelCount; extrasIdx++ { + labels = append(labels, prompb.Label{Name: extras[extrasIdx], Value: extras[extrasIdx+1]}) + } + + labels = append(labels, prompb.Label{Name: nameStr, Value: name}) + + return labels + } + // treat sum as a sample in an individual TimeSeries sum := &prompb.Sample{ Value: pt.Sum(), @@ -454,7 +485,7 @@ func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Res if pt.Flags().NoRecordedValue() { sum.Value = math.Float64frombits(value.StaleNaN) } - sumlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+sumStr) + sumlabels := createLabels(baseName + sumStr) addSample(tsMap, sum, sumlabels, metric.Type().String()) // treat count as a sample in an individual TimeSeries @@ -465,7 +496,7 @@ func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Res if pt.Flags().NoRecordedValue() { count.Value = math.Float64frombits(value.StaleNaN) } - countlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName+countStr) + countlabels := createLabels(baseName + countStr) addSample(tsMap, count, countlabels, metric.Type().String()) // process each percentile/quantile @@ -479,20 +510,14 @@ func addSingleSummaryDataPoint(pt pmetric.SummaryDataPoint, resource pcommon.Res quantile.Value = math.Float64frombits(value.StaleNaN) } percentileStr := strconv.FormatFloat(qt.Quantile(), 'f', -1, 64) - qtlabels := createAttributes(resource, pt.Attributes(), settings.ExternalLabels, nameStr, baseName, quantileStr, percentileStr) + qtlabels := createLabels(baseName, quantileStr, percentileStr) addSample(tsMap, quantile, qtlabels, metric.Type().String()) } // add _created time series if needed startTimestamp := pt.StartTimestamp() if settings.ExportCreatedMetric && startTimestamp != 0 { - createdLabels := createAttributes( - resource, - pt.Attributes(), - settings.ExternalLabels, - nameStr, - baseName+createdSuffix, - ) + createdLabels := createLabels(baseName + createdSuffix) addCreatedTimeSeriesIfNeeded(tsMap, createdLabels, startTimestamp, metric.Type().String()) } } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go index 9a4ec6e11..3c7494a6b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/histograms.go @@ -60,15 +60,20 @@ func addSingleExponentialHistogramDataPoint( // to Prometheus Native Histogram. func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prompb.Histogram, error) { scale := p.Scale() - if scale < -4 || scale > 8 { + if scale < -4 { return prompb.Histogram{}, fmt.Errorf("cannot convert exponential to native histogram."+ - " Scale must be <= 8 and >= -4, was %d", scale) - // TODO: downscale to 8 if scale > 8 + " Scale must be >= -4, was %d", scale) } - pSpans, pDeltas := convertBucketsLayout(p.Positive()) - nSpans, nDeltas := convertBucketsLayout(p.Negative()) + var scaleDown int32 + if scale > 8 { + scaleDown = scale - 8 + scale = 8 + } + + pSpans, pDeltas := convertBucketsLayout(p.Positive(), scaleDown) + nSpans, nDeltas := convertBucketsLayout(p.Negative(), scaleDown) h := prompb.Histogram{ Schema: scale, @@ -106,17 +111,19 @@ func exponentialToNativeHistogram(p pmetric.ExponentialHistogramDataPoint) (prom // The bucket indexes conversion was adjusted, since OTel exp. histogram bucket // index 0 corresponds to the range (1, base] while Prometheus bucket index 0 // to the range (base 1]. -func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets) ([]prompb.BucketSpan, []int64) { +// +// scaleDown is the factor by which the buckets are scaled down. In other words 2^scaleDown buckets will be merged into one. +func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets, scaleDown int32) ([]prompb.BucketSpan, []int64) { bucketCounts := buckets.BucketCounts() if bucketCounts.Len() == 0 { return nil, nil } var ( - spans []prompb.BucketSpan - deltas []int64 - prevCount int64 - nextBucketIdx int32 + spans []prompb.BucketSpan + deltas []int64 + count int64 + prevCount int64 ) appendDelta := func(count int64) { @@ -125,34 +132,67 @@ func convertBucketsLayout(buckets pmetric.ExponentialHistogramDataPointBuckets) prevCount = count } - for i := 0; i < bucketCounts.Len(); i++ { - count := int64(bucketCounts.At(i)) + // Let the compiler figure out that this is const during this function by + // moving it into a local variable. + numBuckets := bucketCounts.Len() + + // The offset is scaled and adjusted by 1 as described above. + bucketIdx := buckets.Offset()>>scaleDown + 1 + spans = append(spans, prompb.BucketSpan{ + Offset: bucketIdx, + Length: 0, + }) + + for i := 0; i < numBuckets; i++ { + // The offset is scaled and adjusted by 1 as described above. + nextBucketIdx := (int32(i)+buckets.Offset())>>scaleDown + 1 + if bucketIdx == nextBucketIdx { // We have not collected enough buckets to merge yet. + count += int64(bucketCounts.At(i)) + continue + } if count == 0 { + count = int64(bucketCounts.At(i)) continue } - // The offset is adjusted by 1 as described above. - bucketIdx := int32(i) + buckets.Offset() + 1 - delta := bucketIdx - nextBucketIdx - if i == 0 || delta > 2 { - // We have to create a new span, either because we are - // at the very beginning, or because we have found a gap + gap := nextBucketIdx - bucketIdx - 1 + if gap > 2 { + // We have to create a new span, because we have found a gap // of more than two buckets. The constant 2 is copied from the logic in // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 spans = append(spans, prompb.BucketSpan{ - Offset: delta, + Offset: gap, Length: 0, }) } else { // We have found a small gap (or no gap at all). // Insert empty buckets as needed. - for j := int32(0); j < delta; j++ { + for j := int32(0); j < gap; j++ { appendDelta(0) } } appendDelta(count) - nextBucketIdx = bucketIdx + 1 + count = int64(bucketCounts.At(i)) + bucketIdx = nextBucketIdx } + // Need to use the last item's index. The offset is scaled and adjusted by 1 as described above. + gap := (int32(numBuckets)+buckets.Offset()-1)>>scaleDown + 1 - bucketIdx + if gap > 2 { + // We have to create a new span, because we have found a gap + // of more than two buckets. The constant 2 is copied from the logic in + // https://github.com/prometheus/client_golang/blob/27f0506d6ebbb117b6b697d0552ee5be2502c5f2/prometheus/histogram.go#L1296 + spans = append(spans, prompb.BucketSpan{ + Offset: gap, + Length: 0, + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < gap; j++ { + appendDelta(0) + } + } + appendDelta(count) return spans, deltas } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go index 34ee762dd..6a5a65604 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/metrics_to_prw.go @@ -22,6 +22,7 @@ type Settings struct { ExternalLabels map[string]string DisableTargetInfo bool ExportCreatedMetric bool + AddMetricSuffixes bool } // FromMetrics converts pmetric.Metrics to prometheus remote write format. @@ -51,6 +52,7 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp } // handle individual metric based on type + //exhaustive:enforce switch metric.Type() { case pmetric.MetricTypeGauge: dataPoints := metric.Gauge().DataPoints() @@ -81,7 +83,7 @@ func FromMetrics(md pmetric.Metrics, settings Settings) (tsMap map[string]*promp if dataPoints.Len() == 0 { errs = multierr.Append(errs, fmt.Errorf("empty data points. %s is dropped", metric.Name())) } - name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + name := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) for x := 0; x < dataPoints.Len(); x++ { errs = multierr.Append( errs, diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go index 3a5d201dd..c8e59694b 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/otlptranslator/prometheusremotewrite/number_data_points.go @@ -27,7 +27,7 @@ func addSingleGaugeNumberDataPoint( settings Settings, series map[string]*prompb.TimeSeries, ) { - name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + name := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) labels := createAttributes( resource, pt.Attributes(), @@ -60,7 +60,7 @@ func addSingleSumNumberDataPoint( settings Settings, series map[string]*prompb.TimeSeries, ) { - name := prometheustranslator.BuildPromCompliantName(metric, settings.Namespace) + name := prometheustranslator.BuildCompliantName(metric, settings.Namespace, settings.AddMetricSuffixes) labels := createAttributes( resource, pt.Attributes(), diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go index e2702c9f7..3a99e3360 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/read_handler.go @@ -15,6 +15,7 @@ package remote import ( "context" + "errors" "net/http" "strings" "sync" @@ -169,7 +170,8 @@ func (h *readHandler) remoteReadSamples( } return nil }(); err != nil { - if httpErr, ok := err.(HTTPError); ok { + var httpErr HTTPError + if errors.As(err, &httpErr) { http.Error(w, httpErr.Error(), httpErr.Status()) return } @@ -241,7 +243,8 @@ func (h *readHandler) remoteReadStreamedXORChunks(ctx context.Context, w http.Re } return nil }(); err != nil { - if httpErr, ok := err.(HTTPError); ok { + var httpErr HTTPError + if errors.As(err, &httpErr) { http.Error(w, httpErr.Error(), httpErr.Status()) return } diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go index b6533f927..758ba3cc9 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/storage.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/storage.go @@ -77,10 +77,7 @@ func NewStorage(l log.Logger, reg prometheus.Registerer, stCallback startTimeCal } func (s *Storage) Notify() { - for _, q := range s.rws.queues { - // These should all be non blocking - q.watcher.Notify() - } + s.rws.Notify() } // ApplyConfig updates the state as the new config requires. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write.go b/vendor/github.com/prometheus/prometheus/storage/remote/write.go index 4b0a24901..237f8caa9 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write.go @@ -121,6 +121,16 @@ func (rws *WriteStorage) run() { } } +func (rws *WriteStorage) Notify() { + rws.mtx.Lock() + defer rws.mtx.Unlock() + + for _, q := range rws.queues { + // These should all be non blocking + q.watcher.Notify() + } +} + // ApplyConfig updates the state as the new config requires. // Only stop & create queues which have changes. func (rws *WriteStorage) ApplyConfig(conf *config.Config) error { diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go index 6c0cd8a29..9891c6aae 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/write_handler.go @@ -66,9 +66,9 @@ func (h *writeHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } err = h.write(r.Context(), req) - switch err { - case nil: - case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp: + switch { + case err == nil: + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Indicated an out of order sample is a bad request to prevent retries. http.Error(w, err.Error(), http.StatusBadRequest) return @@ -207,7 +207,9 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - prwMetricsMap, errs := otlptranslator.FromMetrics(req.Metrics(), otlptranslator.Settings{}) + prwMetricsMap, errs := otlptranslator.FromMetrics(req.Metrics(), otlptranslator.Settings{ + AddMetricSuffixes: true, + }) if errs != nil { level.Warn(h.logger).Log("msg", "Error translating OTLP metrics to Prometheus write request", "err", errs) } @@ -222,9 +224,9 @@ func (h *otlpWriteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { Timeseries: prwMetrics, }) - switch err { - case nil: - case storage.ErrOutOfOrderSample, storage.ErrOutOfBounds, storage.ErrDuplicateSampleForTimestamp: + switch { + case err == nil: + case errors.Is(err, storage.ErrOutOfOrderSample), errors.Is(err, storage.ErrOutOfBounds), errors.Is(err, storage.ErrDuplicateSampleForTimestamp): // Indicated an out of order sample is a bad request to prevent retries. http.Error(w, err.Error(), http.StatusBadRequest) return diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 13a389970..a586536b1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -17,6 +17,7 @@ package tsdb import ( "context" "encoding/json" + "fmt" "io" "os" "path/filepath" @@ -116,8 +117,19 @@ type ChunkWriter interface { // ChunkReader provides reading access of serialized time series data. type ChunkReader interface { - // Chunk returns the series data chunk with the given reference. - Chunk(meta chunks.Meta) (chunkenc.Chunk, error) + // ChunkOrIterable returns the series data for the given chunks.Meta. + // Either a single chunk will be returned, or an iterable. + // A single chunk should be returned if chunks.Meta maps to a chunk that + // already exists and doesn't need modifications. + // An iterable should be returned if chunks.Meta maps to a subset of the + // samples in a stored chunk, or multiple chunks. (E.g. OOOHeadChunkReader + // could return an iterable where multiple histogram samples have counter + // resets. There can only be one counter reset per histogram chunk so + // multiple chunks would be created from the iterable in this case.) + // Only one of chunk or iterable should be returned. In some cases you may + // always expect a chunk to be returned. You can check that iterable is nil + // in those cases. + ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) // Close releases all underlying resources of the reader. Close() error @@ -238,7 +250,7 @@ func readMetaFile(dir string) (*BlockMeta, int64, error) { return nil, 0, err } if m.Version != metaVersion1 { - return nil, 0, errors.Errorf("unexpected meta file version %d", m.Version) + return nil, 0, fmt.Errorf("unexpected meta file version %d", m.Version) } return &m, int64(len(b)), nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go index e7ff5b165..0126f1fbd 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go @@ -14,11 +14,10 @@ package chunkenc import ( + "fmt" "math" "sync" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/histogram" ) @@ -68,6 +67,8 @@ const ( // Chunk holds a sequence of sample pairs that can be iterated over and appended to. type Chunk interface { + Iterable + // Bytes returns the underlying byte slice of the chunk. Bytes() []byte @@ -77,11 +78,6 @@ type Chunk interface { // Appender returns an appender to append samples to the chunk. Appender() (Appender, error) - // The iterator passed as argument is for re-use. - // Depending on implementation, the iterator can - // be re-used or a new iterator can be allocated. - Iterator(Iterator) Iterator - // NumSamples returns the number of samples in the chunk. NumSamples() int @@ -93,6 +89,13 @@ type Chunk interface { Compact() } +type Iterable interface { + // The iterator passed as argument is for re-use. + // Depending on implementation, the iterator can + // be re-used or a new iterator can be allocated. + Iterator(Iterator) Iterator +} + // Appender adds sample pairs to a chunk. type Appender interface { Append(int64, float64) @@ -185,6 +188,19 @@ func (v ValueType) ChunkEncoding() Encoding { } } +func (v ValueType) NewChunk() (Chunk, error) { + switch v { + case ValFloat: + return NewXORChunk(), nil + case ValHistogram: + return NewHistogramChunk(), nil + case ValFloatHistogram: + return NewFloatHistogramChunk(), nil + default: + return nil, fmt.Errorf("value type %v unsupported", v) + } +} + // MockSeriesIterator returns an iterator for a mock series with custom timeStamps and values. func MockSeriesIterator(timestamps []int64, values []float64) Iterator { return &mockSeriesIterator{ @@ -293,7 +309,7 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { c.b.count = 0 return c, nil } - return nil, errors.Errorf("invalid chunk encoding %q", e) + return nil, fmt.Errorf("invalid chunk encoding %q", e) } func (p *pool) Put(c Chunk) error { @@ -332,7 +348,7 @@ func (p *pool) Put(c Chunk) error { sh.b.count = 0 p.floatHistogram.Put(c) default: - return errors.Errorf("invalid chunk encoding %q", c.Encoding()) + return fmt.Errorf("invalid chunk encoding %q", c.Encoding()) } return nil } @@ -349,7 +365,7 @@ func FromData(e Encoding, d []byte) (Chunk, error) { case EncFloatHistogram: return &FloatHistogramChunk{b: bstream{count: 0, stream: d}}, nil } - return nil, errors.Errorf("invalid chunk encoding %q", e) + return nil, fmt.Errorf("invalid chunk encoding %q", e) } // NewEmptyChunk returns an empty chunk for the given encoding. @@ -362,5 +378,5 @@ func NewEmptyChunk(e Encoding) (Chunk, error) { case EncFloatHistogram: return NewFloatHistogramChunk(), nil } - return nil, errors.Errorf("invalid chunk encoding %q", e) + return nil, fmt.Errorf("invalid chunk encoding %q", e) } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go index 505d11245..3d76cdf65 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/float_histogram.go @@ -103,7 +103,7 @@ func (c *FloatHistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValFloatHistogram { // nolint:revive + for it.Next() == ValFloatHistogram { } if err := it.Err(); err != nil { return nil, err @@ -605,10 +605,10 @@ func (a *FloatHistogramAppender) AppendFloatHistogram(prev *FloatHistogramAppend pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h) if !okToAppend || counterReset { if appendOnly { - if !okToAppend { - return nil, false, a, fmt.Errorf("float histogram schema change") + if counterReset { + return nil, false, a, fmt.Errorf("float histogram counter reset") } - return nil, false, a, fmt.Errorf("float histogram counter reset") + return nil, false, a, fmt.Errorf("float histogram schema change") } newChunk := NewFloatHistogramChunk() app, err := newChunk.Appender() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go index 847d89376..ac84e7a1e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go @@ -114,7 +114,7 @@ func (c *HistogramChunk) Appender() (Appender, error) { // To get an appender, we must know the state it would have if we had // appended all existing data from scratch. We iterate through the end // and populate via the iterator's state. - for it.Next() == ValHistogram { // nolint:revive + for it.Next() == ValHistogram { } if err := it.Err(); err != nil { return nil, err @@ -640,10 +640,10 @@ func (a *HistogramAppender) AppendHistogram(prev *HistogramAppender, t int64, h pForwardInserts, nForwardInserts, okToAppend, counterReset := a.appendable(h) if !okToAppend || counterReset { if appendOnly { - if !okToAppend { - return nil, false, a, fmt.Errorf("histogram schema change") + if counterReset { + return nil, false, a, fmt.Errorf("histogram counter reset") } - return nil, false, a, fmt.Errorf("histogram counter reset") + return nil, false, a, fmt.Errorf("histogram schema change") } newChunk := NewHistogramChunk() app, err := newChunk.Appender() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go index cda1080a8..70f129b95 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram_meta.go @@ -284,7 +284,7 @@ loop: // cover an entirely different set of buckets. The function returns the // “forward” inserts to expand 'a' to also cover all the buckets exclusively // covered by 'b', and it returns the “backward” inserts to expand 'b' to also -// cover all the buckets exclusively covered by 'a' +// cover all the buckets exclusively covered by 'a'. func expandSpansBothWays(a, b []histogram.Span) (forward, backward []Insert, mergedSpans []histogram.Span) { ai := newBucketIterator(a) bi := newBucketIterator(b) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go index 449f9fbac..b43574dcb 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/varbit.go @@ -14,9 +14,8 @@ package chunkenc import ( + "fmt" "math/bits" - - "github.com/pkg/errors" ) // putVarbitInt writes an int64 using varbit encoding with a bit bucketing @@ -109,7 +108,7 @@ func readVarbitInt(b *bstreamReader) (int64, error) { val = int64(bits) default: - return 0, errors.Errorf("invalid bit pattern %b", d) + return 0, fmt.Errorf("invalid bit pattern %b", d) } if sz != 0 { @@ -215,7 +214,7 @@ func readVarbitUint(b *bstreamReader) (uint64, error) { return 0, err } default: - return 0, errors.Errorf("invalid bit pattern %b", d) + return 0, fmt.Errorf("invalid bit pattern %b", d) } if sz != 0 { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go index 089a51a64..d54e5dbab 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go @@ -99,7 +99,7 @@ func (c *XORChunk) Appender() (Appender, error) { // To get an appender we must know the state it would have if we had // appended all existing data from scratch. // We iterate through the end and populate via the iterator's state. - for it.Next() != ValNone { // nolint:revive + for it.Next() != ValNone { } if err := it.Err(); err != nil { return nil, err diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go index 05fd24a06..f22285a0c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go @@ -24,8 +24,6 @@ import ( "path/filepath" "strconv" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/tsdb/chunkenc" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/prometheus/prometheus/tsdb/fileutil" @@ -93,13 +91,14 @@ func (p HeadChunkRef) Unpack() (HeadSeriesRef, HeadChunkID) { // // Example: // assume a memSeries.firstChunkID=7 and memSeries.mmappedChunks=[p5,p6,p7,p8,p9]. -// | HeadChunkID value | refers to ... | -// |-------------------|----------------------------------------------------------------------------------------| -// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head | -// | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. | -// | 12 | *memChunk{next: nil} -// | 13 | *memChunk{next: ^} -// | 14 | memSeries.headChunks -> *memChunk{next: ^} +// +// | HeadChunkID value | refers to ... | +// |-------------------|----------------------------------------------------------------------------------------| +// | 0-6 | chunks that have been compacted to blocks, these won't return data for queries in Head | +// | 7-11 | memSeries.mmappedChunks[i] where i is 0 to 4. | +// | 12 | *memChunk{next: nil} +// | 13 | *memChunk{next: ^} +// | 14 | memSeries.headChunks -> *memChunk{next: ^} type HeadChunkID uint64 // BlockChunkRef refers to a chunk within a persisted block. @@ -118,11 +117,16 @@ func (b BlockChunkRef) Unpack() (int, int) { return sgmIndex, chkStart } -// Meta holds information about a chunk of data. +// Meta holds information about one or more chunks. +// For examples of when chunks.Meta could refer to multiple chunks, see +// ChunkReader.ChunkOrIterable(). type Meta struct { // Ref and Chunk hold either a reference that can be used to retrieve // chunk data or the data itself. - // If Chunk is nil, call ChunkReader.Chunk(Meta.Ref) to get the chunk and assign it to the Chunk field + // If Chunk is nil, call ChunkReader.ChunkOrIterable(Meta.Ref) to get the + // chunk and assign it to the Chunk field. If an iterable is returned from + // that method, then it may not be possible to set Chunk as the iterable + // might form several chunks. Ref ChunkRef Chunk chunkenc.Chunk @@ -198,7 +202,7 @@ func ChunkFromSamplesGeneric(s Samples) (Meta, error) { }, nil } -// PopulatedChunk creates a chunk populated with samples every second starting at minTime +// PopulatedChunk creates a chunk populated with samples every second starting at minTime. func PopulatedChunk(numSamples int, minTime int64) (Meta, error) { samples := make([]Sample, numSamples) for i := 0; i < numSamples; i++ { @@ -284,7 +288,7 @@ func checkCRC32(data, sum []byte) error { // This combination of shifts is the inverse of digest.Sum() in go/src/hash/crc32. want := uint32(sum[0])<<24 + uint32(sum[1])<<16 + uint32(sum[2])<<8 + uint32(sum[3]) if got != want { - return errors.Errorf("checksum mismatch expected:%x, actual:%x", want, got) + return fmt.Errorf("checksum mismatch expected:%x, actual:%x", want, got) } return nil } @@ -397,12 +401,12 @@ func (w *Writer) cut() error { func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, allocSize int64) (headerSize int, newFile *os.File, seq int, returnErr error) { p, seq, err := nextSequenceFile(dirFile.Name()) if err != nil { - return 0, nil, 0, errors.Wrap(err, "next sequence file") + return 0, nil, 0, fmt.Errorf("next sequence file: %w", err) } ptmp := p + ".tmp" f, err := os.OpenFile(ptmp, os.O_WRONLY|os.O_CREATE, 0o666) if err != nil { - return 0, nil, 0, errors.Wrap(err, "open temp file") + return 0, nil, 0, fmt.Errorf("open temp file: %w", err) } defer func() { if returnErr != nil { @@ -417,11 +421,11 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all }() if allocSize > 0 { if err = fileutil.Preallocate(f, allocSize, true); err != nil { - return 0, nil, 0, errors.Wrap(err, "preallocate") + return 0, nil, 0, fmt.Errorf("preallocate: %w", err) } } if err = dirFile.Sync(); err != nil { - return 0, nil, 0, errors.Wrap(err, "sync directory") + return 0, nil, 0, fmt.Errorf("sync directory: %w", err) } // Write header metadata for new file. @@ -431,24 +435,24 @@ func cutSegmentFile(dirFile *os.File, magicNumber uint32, chunksFormat byte, all n, err := f.Write(metab) if err != nil { - return 0, nil, 0, errors.Wrap(err, "write header") + return 0, nil, 0, fmt.Errorf("write header: %w", err) } if err := f.Close(); err != nil { - return 0, nil, 0, errors.Wrap(err, "close temp file") + return 0, nil, 0, fmt.Errorf("close temp file: %w", err) } f = nil if err := fileutil.Rename(ptmp, p); err != nil { - return 0, nil, 0, errors.Wrap(err, "replace file") + return 0, nil, 0, fmt.Errorf("replace file: %w", err) } f, err = os.OpenFile(p, os.O_WRONLY, 0o666) if err != nil { - return 0, nil, 0, errors.Wrap(err, "open final file") + return 0, nil, 0, fmt.Errorf("open final file: %w", err) } // Skip header for further writes. if _, err := f.Seek(int64(n), 0); err != nil { - return 0, nil, 0, errors.Wrap(err, "seek in final file") + return 0, nil, 0, fmt.Errorf("seek in final file: %w", err) } return n, f, seq, nil } @@ -605,16 +609,16 @@ func newReader(bs []ByteSlice, cs []io.Closer, pool chunkenc.Pool) (*Reader, err cr := Reader{pool: pool, bs: bs, cs: cs} for i, b := range cr.bs { if b.Len() < SegmentHeaderSize { - return nil, errors.Wrapf(errInvalidSize, "invalid segment header in segment %d", i) + return nil, fmt.Errorf("invalid segment header in segment %d: %w", i, errInvalidSize) } // Verify magic number. if m := binary.BigEndian.Uint32(b.Range(0, MagicChunksSize)); m != MagicChunks { - return nil, errors.Errorf("invalid magic number %x", m) + return nil, fmt.Errorf("invalid magic number %x", m) } // Verify chunk format version. if v := int(b.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 { - return nil, errors.Errorf("invalid chunk format version %d", v) + return nil, fmt.Errorf("invalid chunk format version %d", v) } cr.size += int64(b.Len()) } @@ -640,7 +644,7 @@ func NewDirReader(dir string, pool chunkenc.Pool) (*Reader, error) { f, err := fileutil.OpenMmapFile(fn) if err != nil { return nil, tsdb_errors.NewMulti( - errors.Wrap(err, "mmap files"), + fmt.Errorf("mmap files: %w", err), tsdb_errors.CloseAll(cs), ).Err() } @@ -668,24 +672,24 @@ func (s *Reader) Size() int64 { } // Chunk returns a chunk from a given reference. -func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) { +func (s *Reader) ChunkOrIterable(meta Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack() if sgmIndex >= len(s.bs) { - return nil, errors.Errorf("segment index %d out of range", sgmIndex) + return nil, nil, fmt.Errorf("segment index %d out of range", sgmIndex) } sgmBytes := s.bs[sgmIndex] if chkStart+MaxChunkLengthFieldSize > sgmBytes.Len() { - return nil, errors.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len()) + return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, sgmBytes.Len()) } // With the minimum chunk length this should never cause us reading // over the end of the slice. c := sgmBytes.Range(chkStart, chkStart+MaxChunkLengthFieldSize) chkDataLen, n := binary.Uvarint(c) if n <= 0 { - return nil, errors.Errorf("reading chunk length failed with %d", n) + return nil, nil, fmt.Errorf("reading chunk length failed with %d", n) } chkEncStart := chkStart + n @@ -694,17 +698,18 @@ func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) { chkDataEnd := chkEnd - crc32.Size if chkEnd > sgmBytes.Len() { - return nil, errors.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len()) + return nil, nil, fmt.Errorf("segment doesn't include enough bytes to read the chunk - required:%v, available:%v", chkEnd, sgmBytes.Len()) } sum := sgmBytes.Range(chkDataEnd, chkEnd) if err := checkCRC32(sgmBytes.Range(chkEncStart, chkDataEnd), sum); err != nil { - return nil, err + return nil, nil, err } chkData := sgmBytes.Range(chkDataStart, chkDataEnd) chkEnc := sgmBytes.Range(chkEncStart, chkEncStart+ChunkEncodingSize)[0] - return s.pool.Get(chunkenc.Encoding(chkEnc), chkData) + chk, err := s.pool.Get(chunkenc.Encoding(chkEnc), chkData) + return chk, nil, err } func nextSequenceFile(dir string) (string, int, error) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index d73eb36f8..b495b6182 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -17,6 +17,8 @@ import ( "bufio" "bytes" "encoding/binary" + "errors" + "fmt" "hash" "io" "os" @@ -25,7 +27,6 @@ import ( "sync" "github.com/dennwc/varint" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "golang.org/x/exp/slices" @@ -107,7 +108,7 @@ type CorruptionErr struct { } func (e *CorruptionErr) Error() string { - return errors.Wrapf(e.Err, "corruption in head chunk file %s", segmentFile(e.Dir, e.FileIndex)).Error() + return fmt.Errorf("corruption in head chunk file %s: %w", segmentFile(e.Dir, e.FileIndex), e.Err).Error() } // chunkPos keeps track of the position in the head chunk files. @@ -240,10 +241,10 @@ type mmappedChunkFile struct { func NewChunkDiskMapper(reg prometheus.Registerer, dir string, pool chunkenc.Pool, writeBufferSize, writeQueueSize int) (*ChunkDiskMapper, error) { // Validate write buffer size. if writeBufferSize < MinWriteBufferSize || writeBufferSize > MaxWriteBufferSize { - return nil, errors.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxWriteBufferSize, writeBufferSize) + return nil, fmt.Errorf("ChunkDiskMapper write buffer size should be between %d and %d (actual: %d)", MinWriteBufferSize, MaxWriteBufferSize, writeBufferSize) } if writeBufferSize%1024 != 0 { - return nil, errors.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize) + return nil, fmt.Errorf("ChunkDiskMapper write buffer size should be a multiple of 1024 (actual: %d)", writeBufferSize) } if err := os.MkdirAll(dir, 0o777); err != nil { @@ -320,7 +321,7 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { for seq, fn := range files { f, err := fileutil.OpenMmapFile(fn) if err != nil { - return errors.Wrapf(err, "mmap files, file: %s", fn) + return fmt.Errorf("mmap files, file: %s: %w", fn, err) } cdm.closers[seq] = f cdm.mmappedChunkFiles[seq] = &mmappedChunkFile{byteSlice: realByteSlice(f.Bytes())} @@ -335,23 +336,23 @@ func (cdm *ChunkDiskMapper) openMMapFiles() (returnErr error) { lastSeq := chkFileIndices[0] for _, seq := range chkFileIndices[1:] { if seq != lastSeq+1 { - return errors.Errorf("found unsequential head chunk files %s (index: %d) and %s (index: %d)", files[lastSeq], lastSeq, files[seq], seq) + return fmt.Errorf("found unsequential head chunk files %s (index: %d) and %s (index: %d)", files[lastSeq], lastSeq, files[seq], seq) } lastSeq = seq } for i, b := range cdm.mmappedChunkFiles { if b.byteSlice.Len() < HeadChunkFileHeaderSize { - return errors.Wrapf(errInvalidSize, "%s: invalid head chunk file header", files[i]) + return fmt.Errorf("%s: invalid head chunk file header: %w", files[i], errInvalidSize) } // Verify magic number. if m := binary.BigEndian.Uint32(b.byteSlice.Range(0, MagicChunksSize)); m != MagicHeadChunks { - return errors.Errorf("%s: invalid magic number %x", files[i], m) + return fmt.Errorf("%s: invalid magic number %x", files[i], m) } // Verify chunk format version. if v := int(b.byteSlice.Range(MagicChunksSize, MagicChunksSize+ChunksFormatVersionSize)[0]); v != chunksFormatV1 { - return errors.Errorf("%s: invalid chunk format version %d", files[i], v) + return fmt.Errorf("%s: invalid chunk format version %d", files[i], v) } } @@ -394,16 +395,16 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro f, err := os.Open(files[lastFile]) if err != nil { - return files, errors.Wrap(err, "open file during last head chunk file repair") + return files, fmt.Errorf("open file during last head chunk file repair: %w", err) } buf := make([]byte, MagicChunksSize) size, err := f.Read(buf) if err != nil && err != io.EOF { - return files, errors.Wrap(err, "failed to read magic number during last head chunk file repair") + return files, fmt.Errorf("failed to read magic number during last head chunk file repair: %w", err) } if err := f.Close(); err != nil { - return files, errors.Wrap(err, "close file during last head chunk file repair") + return files, fmt.Errorf("close file during last head chunk file repair: %w", err) } // We either don't have enough bytes for the magic number or the magic number is 0. @@ -413,7 +414,7 @@ func repairLastChunkFile(files map[int]string) (_ map[int]string, returnErr erro if size < MagicChunksSize || binary.BigEndian.Uint32(buf) == 0 { // Corrupt file, hence remove it. if err := os.RemoveAll(files[lastFile]); err != nil { - return files, errors.Wrap(err, "delete corrupted, empty head chunk file during last file repair") + return files, fmt.Errorf("delete corrupted, empty head chunk file during last file repair: %w", err) } delete(files, lastFile) } @@ -559,7 +560,7 @@ func (cdm *ChunkDiskMapper) cutAndExpectRef(chkRef ChunkDiskMapperRef) (err erro } if expSeq, expOffset := chkRef.Unpack(); seq != expSeq || offset != expOffset { - return errors.Errorf("expected newly cut file to have sequence:offset %d:%d, got %d:%d", expSeq, expOffset, seq, offset) + return fmt.Errorf("expected newly cut file to have sequence:offset %d:%d, got %d:%d", expSeq, expOffset, seq, offset) } return nil @@ -701,13 +702,13 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: -1, - Err: errors.Errorf("head chunk file index %d more than current open file", sgmIndex), + Err: fmt.Errorf("head chunk file index %d more than current open file", sgmIndex), } } return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("head chunk file index %d does not exist on disk", sgmIndex), + Err: fmt.Errorf("head chunk file index %d does not exist on disk", sgmIndex), } } @@ -715,7 +716,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("head chunk file doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, mmapFile.byteSlice.Len()), + Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk size data field - required:%v, available:%v", chkStart+MaxChunkLengthFieldSize, mmapFile.byteSlice.Len()), } } @@ -734,7 +735,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("reading chunk length failed with %d", n), + Err: fmt.Errorf("reading chunk length failed with %d", n), } } @@ -744,7 +745,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("head chunk file doesn't include enough bytes to read the chunk - required:%v, available:%v", chkDataEnd, mmapFile.byteSlice.Len()), + Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk - required:%v, available:%v", chkDataEnd, mmapFile.byteSlice.Len()), } } @@ -761,7 +762,7 @@ func (cdm *ChunkDiskMapper) Chunk(ref ChunkDiskMapperRef) (chunkenc.Chunk, error return nil, &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: sgmIndex, - Err: errors.Errorf("checksum mismatch expected:%x, actual:%x", sum, act), + Err: fmt.Errorf("checksum mismatch expected:%x, actual:%x", sum, act), } } @@ -829,7 +830,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: segID, - Err: errors.Errorf("head chunk file has some unread data, but doesn't include enough bytes to read the chunk header"+ + Err: fmt.Errorf("head chunk file has some unread data, but doesn't include enough bytes to read the chunk header"+ " - required:%v, available:%v, file:%d", idx+MaxHeadChunkMetaSize, fileEnd, segID), } } @@ -866,7 +867,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: segID, - Err: errors.Errorf("head chunk file doesn't include enough bytes to read the chunk header - required:%v, available:%v, file:%d", idx+CRCSize, fileEnd, segID), + Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the chunk header - required:%v, available:%v, file:%d", idx+CRCSize, fileEnd, segID), } } @@ -879,7 +880,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: segID, - Err: errors.Errorf("checksum mismatch expected:%x, actual:%x", sum, act), + Err: fmt.Errorf("checksum mismatch expected:%x, actual:%x", sum, act), } } idx += CRCSize @@ -905,7 +906,7 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return &CorruptionErr{ Dir: cdm.dir.Name(), FileIndex: segID, - Err: errors.Errorf("head chunk file doesn't include enough bytes to read the last chunk data - required:%v, available:%v, file:%d", idx, fileEnd, segID), + Err: fmt.Errorf("head chunk file doesn't include enough bytes to read the last chunk data - required:%v, available:%v, file:%d", idx, fileEnd, segID), } } } @@ -998,10 +999,9 @@ func (cdm *ChunkDiskMapper) deleteFiles(removedFiles []int) ([]int, error) { // DeleteCorrupted deletes all the head chunk files after the one which had the corruption // (including the corrupt file). func (cdm *ChunkDiskMapper) DeleteCorrupted(originalErr error) error { - err := errors.Cause(originalErr) // So that we can pick up errors even if wrapped. - cerr, ok := err.(*CorruptionErr) - if !ok { - return errors.Wrap(originalErr, "cannot handle error") + var cerr *CorruptionErr + if !errors.As(originalErr, &cerr) { + return fmt.Errorf("cannot handle error: %w", originalErr) } // Delete all the head chunk files following the corrupt head chunk file. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index ca3a95096..32c88d2cc 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -151,7 +151,7 @@ func NewLeveledCompactor(ctx context.Context, r prometheus.Registerer, l log.Log func NewLeveledCompactorWithChunkSize(ctx context.Context, r prometheus.Registerer, l log.Logger, ranges []int64, pool chunkenc.Pool, maxBlockChunkSegmentSize int64, mergeFunc storage.VerticalChunkSeriesMergeFunc) (*LeveledCompactor, error) { if len(ranges) == 0 { - return nil, errors.Errorf("at least one range must be provided") + return nil, fmt.Errorf("at least one range must be provided") } if pool == nil { pool = chunkenc.NewPool() @@ -201,7 +201,14 @@ func (c *LeveledCompactor) Plan(dir string) ([]string, error) { func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) { slices.SortFunc(dms, func(a, b dirMeta) int { - return int(a.meta.MinTime - b.meta.MinTime) + switch { + case a.meta.MinTime < b.meta.MinTime: + return -1 + case a.meta.MinTime > b.meta.MinTime: + return 1 + default: + return 0 + } }) res := c.selectOverlappingDirs(dms) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index ce8ef1f9a..2e3801a9e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -203,10 +203,14 @@ type DB struct { compactor Compactor blocksToDelete BlocksToDeleteFunc - // Mutex for that must be held when modifying the general block layout. + // Mutex for that must be held when modifying the general block layout or lastGarbageCollectedMmapRef. mtx sync.RWMutex blocks []*Block + // The last OOO chunk that was compacted and written to disk. New queriers must not read chunks less + // than or equal to this reference, as these chunks could be garbage collected at any time. + lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef + head *Head compactc chan struct{} @@ -248,6 +252,7 @@ type dbMetrics struct { tombCleanTimer prometheus.Histogram blocksBytes prometheus.Gauge maxBytes prometheus.Gauge + retentionDuration prometheus.Gauge } func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { @@ -321,6 +326,10 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { Name: "prometheus_tsdb_retention_limit_bytes", Help: "Max number of bytes to be retained in the tsdb blocks, configured 0 means disabled", }) + m.retentionDuration = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "prometheus_tsdb_retention_limit_seconds", + Help: "How long to retain samples in storage.", + }) m.sizeRetentionCount = prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_size_retentions_total", Help: "The number of times that blocks were deleted because the maximum number of bytes was exceeded.", @@ -341,6 +350,7 @@ func newDBMetrics(db *DB, r prometheus.Registerer) *dbMetrics { m.tombCleanTimer, m.blocksBytes, m.maxBytes, + m.retentionDuration, ) } return m @@ -580,7 +590,14 @@ func (db *DBReadOnly) Blocks() ([]BlockReader, error) { } slices.SortFunc(loadable, func(a, b *Block) int { - return int(a.Meta().MinTime - b.Meta().MinTime) + switch { + case a.Meta().MinTime < b.Meta().MinTime: + return -1 + case a.Meta().MinTime > b.Meta().MinTime: + return 1 + default: + return 0 + } }) blockMetas := make([]BlockMeta, 0, len(loadable)) @@ -649,7 +666,7 @@ func (db *DBReadOnly) Block(blockID string) (BlockReader, error) { _, err := os.Stat(filepath.Join(db.dir, blockID)) if os.IsNotExist(err) { - return nil, errors.Errorf("invalid block ID %s", blockID) + return nil, fmt.Errorf("invalid block ID %s", blockID) } block, err := OpenBlock(db.logger, filepath.Join(db.dir, blockID), nil) @@ -870,6 +887,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs maxBytes = 0 } db.metrics.maxBytes.Set(float64(maxBytes)) + db.metrics.retentionDuration.Set((time.Duration(opts.RetentionDuration) * time.Millisecond).Seconds()) if err := db.reload(); err != nil { return nil, err @@ -1229,6 +1247,20 @@ func (db *DB) compactOOOHead(ctx context.Context) error { lastWBLFile, minOOOMmapRef := oooHead.LastWBLFile(), oooHead.LastMmapRef() if lastWBLFile != 0 || minOOOMmapRef != 0 { + if minOOOMmapRef != 0 { + // Ensure that no more queriers are created that will reference chunks we're about to garbage collect. + // truncateOOO waits for any existing queriers that reference chunks we're about to garbage collect to + // complete before running garbage collection, so we don't need to do that here. + // + // We take mtx to ensure that Querier() and ChunkQuerier() don't miss blocks: without this, they could + // capture the list of blocks before the call to reloadBlocks() above runs, but then capture + // lastGarbageCollectedMmapRef after we update it here, and therefore not query either the blocks we've just + // written or the head chunks those blocks were created from. + db.mtx.Lock() + db.lastGarbageCollectedMmapRef = minOOOMmapRef + db.mtx.Unlock() + } + if err := db.head.truncateOOO(lastWBLFile, minOOOMmapRef); err != nil { return errors.Wrap(err, "truncate ooo wbl") } @@ -1448,7 +1480,14 @@ func (db *DB) reloadBlocks() (err error) { db.metrics.blocksBytes.Set(float64(blocksSize)) slices.SortFunc(toLoad, func(a, b *Block) int { - return int(a.Meta().MinTime - b.Meta().MinTime) + switch { + case a.Meta().MinTime < b.Meta().MinTime: + return -1 + case a.Meta().MinTime > b.Meta().MinTime: + return 1 + default: + return 0 + } }) // Swap new blocks first for subsequently created readers to be seen. @@ -1518,7 +1557,14 @@ func deletableBlocks(db *DB, blocks []*Block) map[ulid.ULID]struct{} { // Sort the blocks by time - newest to oldest (largest to smallest timestamp). // This ensures that the retentions will remove the oldest blocks. slices.SortFunc(blocks, func(a, b *Block) int { - return int(b.Meta().MaxTime - a.Meta().MaxTime) + switch { + case b.Meta().MaxTime < a.Meta().MaxTime: + return -1 + case b.Meta().MaxTime > a.Meta().MaxTime: + return 1 + default: + return 0 + } }) for _, block := range blocks { @@ -1806,10 +1852,10 @@ func (db *DB) ForceHeadMMap() { // will create a new block containing all data that's currently in the memory buffer/WAL. func (db *DB) Snapshot(dir string, withHead bool) error { if dir == db.dir { - return errors.Errorf("cannot snapshot into base directory") + return fmt.Errorf("cannot snapshot into base directory") } if _, err := ulid.ParseStrict(dir); err == nil { - return errors.Errorf("dir must not be a valid ULID") + return fmt.Errorf("dir must not be a valid ULID") } db.cmtx.Lock() @@ -1841,7 +1887,7 @@ func (db *DB) Snapshot(dir string, withHead bool) error { } // Querier returns a new querier over the data partition for the given time range. -func (db *DB) Querier(mint, maxt int64) (storage.Querier, error) { +func (db *DB) Querier(mint, maxt int64) (_ storage.Querier, err error) { var blocks []BlockReader db.mtx.RLock() @@ -1852,11 +1898,23 @@ func (db *DB) Querier(mint, maxt int64) (storage.Querier, error) { blocks = append(blocks, b) } } - var inOrderHeadQuerier storage.Querier + + blockQueriers := make([]storage.Querier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers + + defer func() { + if err != nil { + // If we fail, all previously opened queriers must be closed. + for _, q := range blockQueriers { + // TODO(bwplotka): Handle error. + _ = q.Close() + } + } + }() + if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) var err error - inOrderHeadQuerier, err = NewBlockQuerier(rh, mint, maxt) + inOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) if err != nil { return nil, errors.Wrapf(err, "open block querier for head %s", rh) } @@ -1878,44 +1936,40 @@ func (db *DB) Querier(mint, maxt int64) (storage.Querier, error) { return nil, errors.Wrapf(err, "open block querier for head while getting new querier %s", rh) } } + + if inOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, inOrderHeadQuerier) + } } - var outOfOrderHeadQuerier storage.Querier if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - rh := NewOOORangeHead(db.head, mint, maxt) + rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) var err error - outOfOrderHeadQuerier, err = NewBlockQuerier(rh, mint, maxt) + outOfOrderHeadQuerier, err := NewBlockQuerier(rh, mint, maxt) if err != nil { + // If NewBlockQuerier() failed, make sure to clean up the pending read created by NewOOORangeHead. + rh.isoState.Close() + return nil, errors.Wrapf(err, "open block querier for ooo head %s", rh) } - } - blockQueriers := make([]storage.Querier, 0, len(blocks)) - for _, b := range blocks { - q, err := NewBlockQuerier(b, mint, maxt) - if err == nil { - blockQueriers = append(blockQueriers, q) - continue - } - // If we fail, all previously opened queriers must be closed. - for _, q := range blockQueriers { - // TODO(bwplotka): Handle error. - _ = q.Close() - } - return nil, errors.Wrapf(err, "open querier for block %s", b) - } - if inOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, inOrderHeadQuerier) - } - if outOfOrderHeadQuerier != nil { blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) } + + for _, b := range blocks { + q, err := NewBlockQuerier(b, mint, maxt) + if err != nil { + return nil, errors.Wrapf(err, "open querier for block %s", b) + } + blockQueriers = append(blockQueriers, q) + } + return storage.NewMergeQuerier(blockQueriers, nil, storage.ChainedSeriesMerge), nil } // blockChunkQuerierForRange returns individual block chunk queriers from the persistent blocks, in-order head block, and the // out-of-order head block, overlapping with the given time range. -func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerier, error) { +func (db *DB) blockChunkQuerierForRange(mint, maxt int64) (_ []storage.ChunkQuerier, err error) { var blocks []BlockReader db.mtx.RLock() @@ -1926,11 +1980,22 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerie blocks = append(blocks, b) } } - var inOrderHeadQuerier storage.ChunkQuerier + + blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)+2) // +2 to allow for possible in-order and OOO head queriers + + defer func() { + if err != nil { + // If we fail, all previously opened queriers must be closed. + for _, q := range blockQueriers { + // TODO(bwplotka): Handle error. + _ = q.Close() + } + } + }() + if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) - var err error - inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) + inOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { return nil, errors.Wrapf(err, "open querier for head %s", rh) } @@ -1952,37 +2017,28 @@ func (db *DB) blockChunkQuerierForRange(mint, maxt int64) ([]storage.ChunkQuerie return nil, errors.Wrapf(err, "open querier for head while getting new querier %s", rh) } } + + if inOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, inOrderHeadQuerier) + } } - var outOfOrderHeadQuerier storage.ChunkQuerier if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { - rh := NewOOORangeHead(db.head, mint, maxt) - var err error - outOfOrderHeadQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) + rh := NewOOORangeHead(db.head, mint, maxt, db.lastGarbageCollectedMmapRef) + outOfOrderHeadQuerier, err := NewBlockChunkQuerier(rh, mint, maxt) if err != nil { return nil, errors.Wrapf(err, "open block chunk querier for ooo head %s", rh) } + + blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) } - blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)) for _, b := range blocks { q, err := NewBlockChunkQuerier(b, mint, maxt) - if err == nil { - blockQueriers = append(blockQueriers, q) - continue + if err != nil { + return nil, errors.Wrapf(err, "open querier for block %s", b) } - // If we fail, all previously opened queriers must be closed. - for _, q := range blockQueriers { - // TODO(bwplotka): Handle error. - _ = q.Close() - } - return nil, errors.Wrapf(err, "open querier for block %s", b) - } - if inOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, inOrderHeadQuerier) - } - if outOfOrderHeadQuerier != nil { - blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) + blockQueriers = append(blockQueriers, q) } return blockQueriers, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go index ab97876a3..cd98fbd82 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/encoding/encoding.go @@ -15,13 +15,14 @@ package encoding import ( "encoding/binary" + "errors" + "fmt" "hash" "hash/crc32" "math" "unsafe" "github.com/dennwc/varint" - "github.com/pkg/errors" ) var ( @@ -153,7 +154,7 @@ func NewDecbufUvarintAt(bs ByteSlice, off int, castagnoliTable *crc32.Table) Dec l, n := varint.Uvarint(b) if n <= 0 || n > binary.MaxVarintLen32 { - return Decbuf{E: errors.Errorf("invalid uvarint %d", n)} + return Decbuf{E: fmt.Errorf("invalid uvarint %d", n)} } if bs.Len() < off+n+int(l)+4 { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go index aa0a4b1b3..ff230c44b 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/errors/errors.go @@ -25,7 +25,7 @@ import ( type multiError []error // NewMulti returns multiError with provided errors added if not nil. -func NewMulti(errs ...error) multiError { // nolint:revive +func NewMulti(errs ...error) multiError { //nolint:revive // unexported-return. m := multiError{} m.Add(errs...) return m @@ -38,7 +38,8 @@ func (es *multiError) Add(errs ...error) { if err == nil { continue } - if merr, ok := err.(nonNilMultiError); ok { + var merr nonNilMultiError + if errors.As(err, &merr) { *es = append(*es, merr.errs...) continue } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go index 904fc7c2b..8eaf42653 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/exemplar.go @@ -245,11 +245,26 @@ func (ce *CircularExemplarStorage) validateExemplar(key []byte, e exemplar.Exemp // Check for duplicate vs last stored exemplar for this series. // NB these are expected, and appending them is a no-op. - if ce.exemplars[idx.newest].exemplar.Equals(e) { + // For floats and classic histograms, there is only 1 exemplar per series, + // so this is sufficient. For native histograms with multiple exemplars per series, + // we have another check below. + newestExemplar := ce.exemplars[idx.newest].exemplar + if newestExemplar.Equals(e) { return storage.ErrDuplicateExemplar } - if e.Ts <= ce.exemplars[idx.newest].exemplar.Ts { + // Since during the scrape the exemplars are sorted first by timestamp, then value, then labels, + // if any of these conditions are true, we know that the exemplar is either a duplicate + // of a previous one (but not the most recent one as that is checked above) or out of order. + // We now allow exemplars with duplicate timestamps as long as they have different values and/or labels + // since that can happen for different buckets of a native histogram. + // We do not distinguish between duplicates and out of order as iterating through the exemplars + // to check for that would be expensive (versus just comparing with the most recent one) especially + // since this is run under a lock, and not worth it as we just need to return an error so we do not + // append the exemplar. + if e.Ts < newestExemplar.Ts || + (e.Ts == newestExemplar.Ts && e.Value < newestExemplar.Value) || + (e.Ts == newestExemplar.Ts && e.Value == newestExemplar.Value && e.Labels.Hash() < newestExemplar.Labels.Hash()) { if appended { ce.metrics.outOfOrderExemplars.Inc() } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go index 4dbca4f97..782ff27ec 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/mmap.go @@ -14,9 +14,8 @@ package fileutil import ( + "fmt" "os" - - "github.com/pkg/errors" ) type MmapFile struct { @@ -31,7 +30,7 @@ func OpenMmapFile(path string) (*MmapFile, error) { func OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) { f, err := os.Open(path) if err != nil { - return nil, errors.Wrap(err, "try lock file") + return nil, fmt.Errorf("try lock file: %w", err) } defer func() { if retErr != nil { @@ -41,14 +40,14 @@ func OpenMmapFileWithSize(path string, size int) (mf *MmapFile, retErr error) { if size <= 0 { info, err := f.Stat() if err != nil { - return nil, errors.Wrap(err, "stat") + return nil, fmt.Errorf("stat: %w", err) } size = int(info.Size()) } b, err := mmap(f, size) if err != nil { - return nil, errors.Wrapf(err, "mmap, size %d", size) + return nil, fmt.Errorf("mmap, size %d: %w", size, err) } return &MmapFile{f: f, b: b}, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go index ada046221..026c69b35 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/fileutil/preallocate_linux.go @@ -15,6 +15,7 @@ package fileutil import ( + "errors" "os" "syscall" ) @@ -23,10 +24,10 @@ func preallocExtend(f *os.File, sizeInBytes int64) error { // use mode = 0 to change size err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes) if err != nil { - errno, ok := err.(syscall.Errno) + var errno syscall.Errno // not supported; fallback // fallocate EINTRs frequently in some environments; fallback - if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { + if errors.As(err, &errno) && (errno == syscall.ENOTSUP || errno == syscall.EINTR) { return preallocExtendTrunc(f, sizeInBytes) } } @@ -37,9 +38,9 @@ func preallocFixed(f *os.File, sizeInBytes int64) error { // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes) if err != nil { - errno, ok := err.(syscall.Errno) + var errno syscall.Errno // treat not supported as nil error - if ok && errno == syscall.ENOTSUP { + if errors.As(err, &errno) && errno == syscall.ENOTSUP { return nil } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 0ea2b8385..3ff2bee71 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -20,6 +20,7 @@ import ( "math" "path/filepath" "runtime" + "strconv" "sync" "time" @@ -106,9 +107,12 @@ type Head struct { iso *isolation + oooIso *oooIsolation + cardinalityMutex sync.Mutex cardinalityCache *index.PostingsStats // Posting stats cache which will expire after 30sec. - lastPostingsStatsCall time.Duration // Last posting stats call (PostingsCardinalityStats()) time for caching. + cardinalityCacheKey string + lastPostingsStatsCall time.Duration // Last posting stats call (PostingsCardinalityStats()) time for caching. // chunkDiskMapper is used to write and read Head chunks to/from disk. chunkDiskMapper *chunks.ChunkDiskMapper @@ -224,11 +228,11 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wlog.WL, opts *Hea // even if ooo is not enabled yet. capMax := opts.OutOfOrderCapMax.Load() if capMax <= 0 || capMax > 255 { - return nil, errors.Errorf("OOOCapMax of %d is invalid. must be > 0 and <= 255", capMax) + return nil, fmt.Errorf("OOOCapMax of %d is invalid. must be > 0 and <= 255", capMax) } if opts.ChunkRange < 1 { - return nil, errors.Errorf("invalid chunk range %d", opts.ChunkRange) + return nil, fmt.Errorf("invalid chunk range %d", opts.ChunkRange) } if opts.SeriesCallback == nil { opts.SeriesCallback = &noopSeriesLifecycleCallback{} @@ -300,6 +304,7 @@ func (h *Head) resetInMemoryState() error { } h.iso = newIsolation(h.opts.IsolationDisabled) + h.oooIso = newOOOIsolation() h.exemplarMetrics = em h.exemplars = es @@ -857,7 +862,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) slice := mmappedChunks[seriesRef] if len(slice) > 0 && slice[len(slice)-1].maxTime >= mint { h.metrics.mmapChunkCorruptionTotal.Inc() - return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", + return fmt.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", seriesRef, slice[len(slice)-1].minTime, slice[len(slice)-1].maxTime, mint, maxt) } slice = append(slice, &mmappedChunk{ @@ -872,7 +877,7 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) if len(ms.mmappedChunks) > 0 && ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime >= mint { h.metrics.mmapChunkCorruptionTotal.Inc() - return errors.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", + return fmt.Errorf("out of sequence m-mapped chunk for series ref %d, last chunk: [%d, %d], new: [%d, %d]", seriesRef, ms.mmappedChunks[len(ms.mmappedChunks)-1].minTime, ms.mmappedChunks[len(ms.mmappedChunks)-1].maxTime, mint, maxt) } @@ -985,16 +990,23 @@ func (h *Head) DisableNativeHistograms() { // PostingsCardinalityStats returns highest cardinality stats by label and value names. func (h *Head) PostingsCardinalityStats(statsByLabelName string, limit int) *index.PostingsStats { + cacheKey := statsByLabelName + ";" + strconv.Itoa(limit) + h.cardinalityMutex.Lock() defer h.cardinalityMutex.Unlock() - currentTime := time.Duration(time.Now().Unix()) * time.Second - seconds := currentTime - h.lastPostingsStatsCall - if seconds > cardinalityCacheExpirationTime { + if h.cardinalityCacheKey != cacheKey { h.cardinalityCache = nil + } else { + currentTime := time.Duration(time.Now().Unix()) * time.Second + seconds := currentTime - h.lastPostingsStatsCall + if seconds > cardinalityCacheExpirationTime { + h.cardinalityCache = nil + } } if h.cardinalityCache != nil { return h.cardinalityCache } + h.cardinalityCacheKey = cacheKey h.cardinalityCache = h.postings.Stats(statsByLabelName, limit) h.lastPostingsStatsCall = time.Duration(time.Now().Unix()) * time.Second @@ -1133,6 +1145,14 @@ func (h *Head) WaitForPendingReadersInTimeRange(mint, maxt int64) { } } +// WaitForPendingReadersForOOOChunksAtOrBefore is like WaitForPendingReadersInTimeRange, except it waits for +// queries touching OOO chunks less than or equal to chunk to finish querying. +func (h *Head) WaitForPendingReadersForOOOChunksAtOrBefore(chunk chunks.ChunkDiskMapperRef) { + for h.oooIso.HasOpenReadsAtOrBefore(chunk) { + time.Sleep(500 * time.Millisecond) + } +} + // WaitForAppendersOverlapping waits for appends overlapping maxt to finish. func (h *Head) WaitForAppendersOverlapping(maxt int64) { for maxt >= h.iso.lowestAppendTime() { @@ -1271,13 +1291,19 @@ func (h *Head) truncateWAL(mint int64) error { } // truncateOOO +// - waits for any pending reads that potentially touch chunks less than or equal to newMinOOOMmapRef // - truncates the OOO WBL files whose index is strictly less than lastWBLFile. -// - garbage collects all the m-map chunks from the memory that are less than or equal to minOOOMmapRef +// - garbage collects all the m-map chunks from the memory that are less than or equal to newMinOOOMmapRef // and then deletes the series that do not have any data anymore. -func (h *Head) truncateOOO(lastWBLFile int, minOOOMmapRef chunks.ChunkDiskMapperRef) error { +// +// The caller is responsible for ensuring that no further queriers will be created that reference chunks less +// than or equal to newMinOOOMmapRef before calling truncateOOO. +func (h *Head) truncateOOO(lastWBLFile int, newMinOOOMmapRef chunks.ChunkDiskMapperRef) error { curMinOOOMmapRef := chunks.ChunkDiskMapperRef(h.minOOOMmapRef.Load()) - if minOOOMmapRef.GreaterThan(curMinOOOMmapRef) { - h.minOOOMmapRef.Store(uint64(minOOOMmapRef)) + if newMinOOOMmapRef.GreaterThan(curMinOOOMmapRef) { + h.WaitForPendingReadersForOOOChunksAtOrBefore(newMinOOOMmapRef) + h.minOOOMmapRef.Store(uint64(newMinOOOMmapRef)) + if err := h.truncateSeriesAndChunkDiskMapper("truncateOOO"); err != nil { return err } @@ -1407,11 +1433,13 @@ func (h *RangeHead) NumSeries() uint64 { return h.head.NumSeries() } +var rangeHeadULID = ulid.MustParse("0000000000XXXXXXXRANGEHEAD") + func (h *RangeHead) Meta() BlockMeta { return BlockMeta{ MinTime: h.MinTime(), MaxTime: h.MaxTime(), - ULID: h.head.Meta().ULID, + ULID: rangeHeadULID, Stats: BlockStats{ NumSeries: h.NumSeries(), }, @@ -1527,7 +1555,7 @@ func (h *Head) gc() (actualInOrderMint, minOOOTime int64, minMmapFile int) { return actualInOrderMint, minOOOTime, minMmapFile } -// Tombstones returns a new reader over the head's tombstones +// Tombstones returns a new reader over the head's tombstones. func (h *Head) Tombstones() (tombstones.Reader, error) { return h.tombstones, nil } @@ -1537,15 +1565,15 @@ func (h *Head) NumSeries() uint64 { return h.numSeries.Load() } +var headULID = ulid.MustParse("0000000000XXXXXXXXXXXXHEAD") + // Meta returns meta information about the head. // The head is dynamic so will return dynamic results. func (h *Head) Meta() BlockMeta { - var id [16]byte - copy(id[:], "______head______") return BlockMeta{ MinTime: h.MinTime(), MaxTime: h.MaxTime(), - ULID: ulid.ULID(id), + ULID: headULID, Stats: BlockStats{ NumSeries: h.NumSeries(), }, @@ -1593,9 +1621,6 @@ func (h *Head) Close() error { h.mmapHeadChunks() errs := tsdb_errors.NewMulti(h.chunkDiskMapper.Close()) - if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown { - errs.Add(h.performChunkSnapshot()) - } if h.wal != nil { errs.Add(h.wal.Close()) } @@ -1667,26 +1692,34 @@ func (h *Head) mmapHeadChunks() { var count int for i := 0; i < h.series.size; i++ { h.series.locks[i].RLock() - for _, all := range h.series.hashes[i] { - for _, series := range all { - series.Lock() - count += series.mmapChunks(h.chunkDiskMapper) - series.Unlock() - } + for _, series := range h.series.series[i] { + series.Lock() + count += series.mmapChunks(h.chunkDiskMapper) + series.Unlock() } h.series.locks[i].RUnlock() } h.metrics.mmapChunksTotal.Add(float64(count)) } -// seriesHashmap is a simple hashmap for memSeries by their label set. It is built -// on top of a regular hashmap and holds a slice of series to resolve hash collisions. +// seriesHashmap lets TSDB find a memSeries by its label set, via a 64-bit hash. +// There is one map for the common case where the hash value is unique, and a +// second map for the case that two series have the same hash value. +// Each series is in only one of the maps. // Its methods require the hash to be submitted with it to avoid re-computations throughout // the code. -type seriesHashmap map[uint64][]*memSeries +type seriesHashmap struct { + unique map[uint64]*memSeries + conflicts map[uint64][]*memSeries +} -func (m seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries { - for _, s := range m[hash] { +func (m *seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries { + if s, found := m.unique[hash]; found { + if labels.Equal(s.lset, lset) { + return s + } + } + for _, s := range m.conflicts[hash] { if labels.Equal(s.lset, lset) { return s } @@ -1694,28 +1727,50 @@ func (m seriesHashmap) get(hash uint64, lset labels.Labels) *memSeries { return nil } -func (m seriesHashmap) set(hash uint64, s *memSeries) { - l := m[hash] +func (m *seriesHashmap) set(hash uint64, s *memSeries) { + if existing, found := m.unique[hash]; !found || labels.Equal(existing.lset, s.lset) { + m.unique[hash] = s + return + } + if m.conflicts == nil { + m.conflicts = make(map[uint64][]*memSeries) + } + l := m.conflicts[hash] for i, prev := range l { if labels.Equal(prev.lset, s.lset) { l[i] = s return } } - m[hash] = append(l, s) + m.conflicts[hash] = append(l, s) } -func (m seriesHashmap) del(hash uint64, lset labels.Labels) { +func (m *seriesHashmap) del(hash uint64, lset labels.Labels) { var rem []*memSeries - for _, s := range m[hash] { - if !labels.Equal(s.lset, lset) { - rem = append(rem, s) + unique, found := m.unique[hash] + switch { + case !found: + return + case labels.Equal(unique.lset, lset): + conflicts := m.conflicts[hash] + if len(conflicts) == 0 { + delete(m.unique, hash) + return + } + rem = conflicts + default: + rem = append(rem, unique) + for _, s := range m.conflicts[hash] { + if !labels.Equal(s.lset, lset) { + rem = append(rem, s) + } } } - if len(rem) == 0 { - delete(m, hash) + m.unique[hash] = rem[0] + if len(rem) == 1 { + delete(m.conflicts, hash) } else { - m[hash] = rem + m.conflicts[hash] = rem[1:] } } @@ -1757,7 +1812,10 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st s.series[i] = map[chunks.HeadSeriesRef]*memSeries{} } for i := range s.hashes { - s.hashes[i] = seriesHashmap{} + s.hashes[i] = seriesHashmap{ + unique: map[uint64]*memSeries{}, + conflicts: nil, // Initialized on demand in set(). + } } return s } @@ -1777,72 +1835,76 @@ func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) ( deletedFromPrevStripe = 0 ) minMmapFile = math.MaxInt32 - // Run through all series and truncate old chunks. Mark those with no - // chunks left as deleted and store their ID. + + // For one series, truncate old chunks and check if any chunks left. If not, mark as deleted and collect the ID. + check := func(hashShard int, hash uint64, series *memSeries, deletedForCallback map[chunks.HeadSeriesRef]labels.Labels) { + series.Lock() + defer series.Unlock() + + rmChunks += series.truncateChunksBefore(mint, minOOOMmapRef) + + if len(series.mmappedChunks) > 0 { + seq, _ := series.mmappedChunks[0].ref.Unpack() + if seq < minMmapFile { + minMmapFile = seq + } + } + if series.ooo != nil && len(series.ooo.oooMmappedChunks) > 0 { + seq, _ := series.ooo.oooMmappedChunks[0].ref.Unpack() + if seq < minMmapFile { + minMmapFile = seq + } + for _, ch := range series.ooo.oooMmappedChunks { + if ch.minTime < minOOOTime { + minOOOTime = ch.minTime + } + } + } + if series.ooo != nil && series.ooo.oooHeadChunk != nil { + if series.ooo.oooHeadChunk.minTime < minOOOTime { + minOOOTime = series.ooo.oooHeadChunk.minTime + } + } + if len(series.mmappedChunks) > 0 || series.headChunks != nil || series.pendingCommit || + (series.ooo != nil && (len(series.ooo.oooMmappedChunks) > 0 || series.ooo.oooHeadChunk != nil)) { + seriesMint := series.minTime() + if seriesMint < actualMint { + actualMint = seriesMint + } + return + } + // The series is gone entirely. We need to keep the series lock + // and make sure we have acquired the stripe locks for hash and ID of the + // series alike. + // If we don't hold them all, there's a very small chance that a series receives + // samples again while we are half-way into deleting it. + refShard := int(series.ref) & (s.size - 1) + if hashShard != refShard { + s.locks[refShard].Lock() + defer s.locks[refShard].Unlock() + } + + deleted[storage.SeriesRef(series.ref)] = struct{}{} + s.hashes[hashShard].del(hash, series.lset) + delete(s.series[refShard], series.ref) + deletedForCallback[series.ref] = series.lset + } + + // Run through all series shard by shard, checking which should be deleted. for i := 0; i < s.size; i++ { deletedForCallback := make(map[chunks.HeadSeriesRef]labels.Labels, deletedFromPrevStripe) s.locks[i].Lock() - for hash, all := range s.hashes[i] { + // Delete conflicts first so seriesHashmap.del doesn't move them to the `unique` field, + // after deleting `unique`. + for hash, all := range s.hashes[i].conflicts { for _, series := range all { - series.Lock() - rmChunks += series.truncateChunksBefore(mint, minOOOMmapRef) - - if len(series.mmappedChunks) > 0 { - seq, _ := series.mmappedChunks[0].ref.Unpack() - if seq < minMmapFile { - minMmapFile = seq - } - } - if series.ooo != nil && len(series.ooo.oooMmappedChunks) > 0 { - seq, _ := series.ooo.oooMmappedChunks[0].ref.Unpack() - if seq < minMmapFile { - minMmapFile = seq - } - for _, ch := range series.ooo.oooMmappedChunks { - if ch.minTime < minOOOTime { - minOOOTime = ch.minTime - } - } - } - if series.ooo != nil && series.ooo.oooHeadChunk != nil { - if series.ooo.oooHeadChunk.minTime < minOOOTime { - minOOOTime = series.ooo.oooHeadChunk.minTime - } - } - if len(series.mmappedChunks) > 0 || series.headChunks != nil || series.pendingCommit || - (series.ooo != nil && (len(series.ooo.oooMmappedChunks) > 0 || series.ooo.oooHeadChunk != nil)) { - seriesMint := series.minTime() - if seriesMint < actualMint { - actualMint = seriesMint - } - series.Unlock() - continue - } - - // The series is gone entirely. We need to keep the series lock - // and make sure we have acquired the stripe locks for hash and ID of the - // series alike. - // If we don't hold them all, there's a very small chance that a series receives - // samples again while we are half-way into deleting it. - j := int(series.ref) & (s.size - 1) - - if i != j { - s.locks[j].Lock() - } - - deleted[storage.SeriesRef(series.ref)] = struct{}{} - s.hashes[i].del(hash, series.lset) - delete(s.series[j], series.ref) - deletedForCallback[series.ref] = series.lset - - if i != j { - s.locks[j].Unlock() - } - - series.Unlock() + check(i, hash, series, deletedForCallback) } } + for hash, series := range s.hashes[i].unique { + check(i, hash, series, deletedForCallback) + } s.locks[i].Unlock() @@ -2171,7 +2233,7 @@ func overlapsClosedInterval(mint1, maxt1, mint2, maxt2 int64) bool { return mint1 <= maxt2 && mint2 <= maxt1 } -// mmappedChunk describes a head chunk on disk that has been mmapped +// mmappedChunk describes a head chunk on disk that has been mmapped. type mmappedChunk struct { ref chunks.ChunkDiskMapperRef numSamples uint16 diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index d1f4d3035..be53a4f3f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -521,13 +521,13 @@ func (a *headAppender) AppendHistogram(ref storage.SeriesRef, lset labels.Labels } if h != nil { - if err := ValidateHistogram(h); err != nil { + if err := h.Validate(); err != nil { return 0, err } } if fh != nil { - if err := ValidateFloatHistogram(fh); err != nil { + if err := fh.Validate(); err != nil { return 0, err } } @@ -642,103 +642,6 @@ func (a *headAppender) UpdateMetadata(ref storage.SeriesRef, lset labels.Labels, return ref, nil } -func ValidateHistogram(h *histogram.Histogram) error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return errors.Wrap(err, "negative side") - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return errors.Wrap(err, "positive side") - } - var nCount, pCount uint64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, true) - if err != nil { - return errors.Wrap(err, "negative side") - } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, true) - if err != nil { - return errors.Wrap(err, "positive side") - } - - if c := nCount + pCount + h.ZeroCount; c > h.Count { - return errors.Wrap( - storage.ErrHistogramCountNotBigEnough, - fmt.Sprintf("%d observations found in buckets, but the Count field is %d", c, h.Count), - ) - } - - return nil -} - -func ValidateFloatHistogram(h *histogram.FloatHistogram) error { - if err := checkHistogramSpans(h.NegativeSpans, len(h.NegativeBuckets)); err != nil { - return errors.Wrap(err, "negative side") - } - if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { - return errors.Wrap(err, "positive side") - } - var nCount, pCount float64 - err := checkHistogramBuckets(h.NegativeBuckets, &nCount, false) - if err != nil { - return errors.Wrap(err, "negative side") - } - err = checkHistogramBuckets(h.PositiveBuckets, &pCount, false) - if err != nil { - return errors.Wrap(err, "positive side") - } - - // We do not check for h.Count being at least as large as the sum of the - // counts in the buckets because floating point precision issues can - // create false positives here. - - return nil -} - -func checkHistogramSpans(spans []histogram.Span, numBuckets int) error { - var spanBuckets int - for n, span := range spans { - if n > 0 && span.Offset < 0 { - return errors.Wrap( - storage.ErrHistogramSpanNegativeOffset, - fmt.Sprintf("span number %d with offset %d", n+1, span.Offset), - ) - } - spanBuckets += int(span.Length) - } - if spanBuckets != numBuckets { - return errors.Wrap( - storage.ErrHistogramSpansBucketsMismatch, - fmt.Sprintf("spans need %d buckets, have %d buckets", spanBuckets, numBuckets), - ) - } - return nil -} - -func checkHistogramBuckets[BC histogram.BucketCount, IBC histogram.InternalBucketCount](buckets []IBC, count *BC, deltas bool) error { - if len(buckets) == 0 { - return nil - } - - var last IBC - for i := 0; i < len(buckets); i++ { - var c IBC - if deltas { - c = last + buckets[i] - } else { - c = buckets[i] - } - if c < 0 { - return errors.Wrap( - storage.ErrHistogramNegativeBucketCount, - fmt.Sprintf("bucket number %d has observation count of %v", i+1, c), - ) - } - last = c - *count += BC(c) - } - - return nil -} - var _ storage.GetRef = &headAppender{} func (a *headAppender) GetRef(lset labels.Labels, hash uint64) (storage.SeriesRef, labels.Labels) { @@ -786,14 +689,6 @@ func (a *headAppender) log() error { return errors.Wrap(err, "log samples") } } - if len(a.exemplars) > 0 { - rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf) - buf = rec[:0] - - if err := a.head.wal.Log(rec); err != nil { - return errors.Wrap(err, "log exemplars") - } - } if len(a.histograms) > 0 { rec = enc.HistogramSamples(a.histograms, buf) buf = rec[:0] @@ -808,6 +703,18 @@ func (a *headAppender) log() error { return errors.Wrap(err, "log float histograms") } } + // Exemplars should be logged after samples (float/native histogram/etc), + // otherwise it might happen that we send the exemplars in a remote write + // batch before the samples, which in turn means the exemplar is rejected + // for missing series, since series are created due to samples. + if len(a.exemplars) > 0 { + rec = enc.Exemplars(exemplarsForEncoding(a.exemplars), buf) + buf = rec[:0] + + if err := a.head.wal.Log(rec); err != nil { + return errors.Wrap(err, "log exemplars") + } + } return nil } @@ -844,6 +751,12 @@ func (a *headAppender) Commit() (err error) { // No errors logging to WAL, so pass the exemplars along to the in memory storage. for _, e := range a.exemplars { s := a.head.series.getByID(chunks.HeadSeriesRef(e.ref)) + if s == nil { + // This is very unlikely to happen, but we have seen it in the wild. + // It means that the series was truncated between AppendExemplar and Commit. + // See TestHeadCompactionWhileAppendAndCommitExemplar. + continue + } // We don't instrument exemplar appends here, all is instrumented by storage. if err := a.head.exemplars.AddExemplar(s.lset, e.exemplar); err != nil { if err == storage.ErrOutOfOrderExemplar { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index 6964d5472..35ef26a58 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -202,7 +202,7 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchB // headChunkID returns the HeadChunkID referred to by the given position. // * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos] -// * pos >= len(s.mmappedChunks) refers to s.headChunks linked list +// * pos >= len(s.mmappedChunks) refers to s.headChunks linked list. func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID { return chunks.HeadChunkID(pos) + s.firstChunkID } @@ -289,10 +289,10 @@ func (h *headChunkReader) Close() error { return nil } -// Chunk returns the chunk for the reference number. -func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { +// ChunkOrIterable returns the chunk for the reference number. +func (h *headChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { chk, _, err := h.chunk(meta, false) - return chk, err + return chk, nil, err } // ChunkWithCopy returns the chunk for the reference number. @@ -416,13 +416,13 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, chunkDiskMapper *chunks.ChunkDi return elem, true, offset == 0, nil } -// oooMergedChunk returns the requested chunk based on the given chunks.Meta -// reference from memory or by m-mapping it from the disk. The returned chunk -// might be a merge of all the overlapping chunks, if any, amongst all the -// chunks in the OOOHead. +// oooMergedChunks return an iterable over one or more OOO chunks for the given +// chunks.Meta reference from memory or by m-mapping it from the disk. The +// returned iterable will be a merge of all the overlapping chunks, if any, +// amongst all the chunks in the OOOHead. // This function is not thread safe unless the caller holds a lock. // The caller must ensure that s.ooo is not nil. -func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (chunk *mergedOOOChunks, err error) { +func (s *memSeries) oooMergedChunks(meta chunks.Meta, cdm *chunks.ChunkDiskMapper, mint, maxt int64) (*mergedOOOChunks, error) { _, cid := chunks.HeadChunkRef(meta.Ref).Unpack() // ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are @@ -499,11 +499,13 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper mc := &mergedOOOChunks{} absoluteMax := int64(math.MinInt64) for _, c := range tmpChks { - if c.meta.Ref != meta.Ref && (len(mc.chunks) == 0 || c.meta.MinTime > absoluteMax) { + if c.meta.Ref != meta.Ref && (len(mc.chunkIterables) == 0 || c.meta.MinTime > absoluteMax) { continue } + var iterable chunkenc.Iterable if c.meta.Ref == oooHeadRef { var xor *chunkenc.XORChunk + var err error // If head chunk min and max time match the meta OOO markers // that means that the chunk has not expanded so we can append // it as it is. @@ -516,7 +518,7 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper if err != nil { return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk") } - c.meta.Chunk = xor + iterable = xor } else { chk, err := cdm.Chunk(c.ref) if err != nil { @@ -531,12 +533,12 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper // wrap the chunk within a chunk that doesnt allows us to iterate // through samples out of the OOOLastMinT and OOOLastMaxT // markers. - c.meta.Chunk = boundedChunk{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime} + iterable = boundedIterable{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime} } else { - c.meta.Chunk = chk + iterable = chk } } - mc.chunks = append(mc.chunks, c.meta) + mc.chunkIterables = append(mc.chunkIterables, iterable) if c.meta.MaxTime > absoluteMax { absoluteMax = c.meta.MaxTime } @@ -545,77 +547,30 @@ func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm *chunks.ChunkDiskMapper return mc, nil } -var _ chunkenc.Chunk = &mergedOOOChunks{} +var _ chunkenc.Iterable = &mergedOOOChunks{} -// mergedOOOChunks holds the list of overlapping chunks. This struct satisfies -// chunkenc.Chunk. +// mergedOOOChunks holds the list of iterables for overlapping chunks. type mergedOOOChunks struct { - chunks []chunks.Meta -} - -// Bytes is a very expensive method because its calling the iterator of all the -// chunks in the mergedOOOChunk and building a new chunk with the samples. -func (o mergedOOOChunks) Bytes() []byte { - xc := chunkenc.NewXORChunk() - app, err := xc.Appender() - if err != nil { - panic(err) - } - it := o.Iterator(nil) - for it.Next() == chunkenc.ValFloat { - t, v := it.At() - app.Append(t, v) - } - - return xc.Bytes() -} - -func (o mergedOOOChunks) Encoding() chunkenc.Encoding { - return chunkenc.EncXOR -} - -func (o mergedOOOChunks) Appender() (chunkenc.Appender, error) { - return nil, errors.New("can't append to mergedOOOChunks") + chunkIterables []chunkenc.Iterable } func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { - return storage.ChainSampleIteratorFromMetas(iterator, o.chunks) + return storage.ChainSampleIteratorFromIterables(iterator, o.chunkIterables) } -func (o mergedOOOChunks) NumSamples() int { - samples := 0 - for _, c := range o.chunks { - samples += c.Chunk.NumSamples() - } - return samples -} +var _ chunkenc.Iterable = &boundedIterable{} -func (o mergedOOOChunks) Compact() {} - -var _ chunkenc.Chunk = &boundedChunk{} - -// boundedChunk is an implementation of chunkenc.Chunk that uses a +// boundedIterable is an implementation of chunkenc.Iterable that uses a // boundedIterator that only iterates through samples which timestamps are -// >= minT and <= maxT -type boundedChunk struct { - chunkenc.Chunk - minT int64 - maxT int64 +// >= minT and <= maxT. +type boundedIterable struct { + chunk chunkenc.Chunk + minT int64 + maxT int64 } -func (b boundedChunk) Bytes() []byte { - xor := chunkenc.NewXORChunk() - a, _ := xor.Appender() - it := b.Iterator(nil) - for it.Next() == chunkenc.ValFloat { - t, v := it.At() - a.Append(t, v) - } - return xor.Bytes() -} - -func (b boundedChunk) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { - it := b.Chunk.Iterator(iterator) +func (b boundedIterable) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { + it := b.chunk.Iterator(iterator) if it == nil { panic("iterator shouldn't be nil") } @@ -625,7 +580,7 @@ func (b boundedChunk) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { var _ chunkenc.Iterator = &boundedIterator{} // boundedIterator is an implementation of Iterator that only iterates through -// samples which timestamps are >= minT and <= maxT +// samples which timestamps are >= minT and <= maxT. type boundedIterator struct { chunkenc.Iterator minT int64 @@ -671,7 +626,7 @@ func (b boundedIterator) Seek(t int64) chunkenc.ValueType { return b.Iterator.Seek(t) } -// safeHeadChunk makes sure that the chunk can be accessed without a race condition +// safeHeadChunk makes sure that the chunk can be accessed without a race condition. type safeHeadChunk struct { chunkenc.Chunk s *memSeries diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index d6780c021..07fa8280c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many legitimately empty blocks in this file. package tsdb import ( @@ -971,7 +970,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh dec := encoding.Decbuf{B: b} if flag := dec.Byte(); flag != chunkSnapshotRecordTypeSeries { - return csr, errors.Errorf("invalid record type %x", flag) + return csr, fmt.Errorf("invalid record type %x", flag) } csr.ref = chunks.HeadSeriesRef(dec.Be64()) @@ -1019,7 +1018,7 @@ func decodeSeriesFromChunkSnapshot(d *record.Decoder, b []byte) (csr chunkSnapsh err = dec.Err() if err != nil && len(dec.B) > 0 { - err = errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + err = fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return @@ -1042,7 +1041,7 @@ func decodeTombstonesSnapshotRecord(b []byte) (tombstones.Reader, error) { dec := encoding.Decbuf{B: b} if flag := dec.Byte(); flag != chunkSnapshotRecordTypeTombstones { - return nil, errors.Errorf("invalid record type %x", flag) + return nil, fmt.Errorf("invalid record type %x", flag) } tr, err := tombstones.Decode(dec.UvarintBytes()) @@ -1255,7 +1254,7 @@ func LastChunkSnapshot(dir string) (string, int, int, error) { continue } if !fi.IsDir() { - return "", 0, 0, errors.Errorf("chunk snapshot %s is not a directory", fi.Name()) + return "", 0, 0, fmt.Errorf("chunk snapshot %s is not a directory", fi.Name()) } splits := strings.Split(fi.Name()[len(chunkSnapshotPrefix):], ".") @@ -1493,7 +1492,7 @@ Outer: default: // This is a record type we don't understand. It is either and old format from earlier versions, // or a new format and the code was rolled back to old version. - loopErr = errors.Errorf("unsupported snapshot record type 0b%b", rec[0]) + loopErr = fmt.Errorf("unsupported snapshot record type 0b%b", rec[0]) break Outer } } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go index ccf06e8ea..44ee66386 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/index.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/index.go @@ -28,7 +28,6 @@ import ( "sort" "unsafe" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -107,8 +106,8 @@ func newCRC32() hash.Hash32 { type symbolCacheEntry struct { index uint32 - lastValue string lastValueIndex uint32 + lastValue string } // Writer implements the IndexWriter interface for the standard @@ -172,7 +171,7 @@ func NewTOCFromByteSlice(bs ByteSlice) (*TOC, error) { d := encoding.Decbuf{B: b[:len(b)-4]} if d.Crc32(castagnoliTable) != expCRC { - return nil, errors.Wrap(encoding.ErrInvalidChecksum, "read TOC") + return nil, fmt.Errorf("read TOC: %w", encoding.ErrInvalidChecksum) } toc := &TOC{ @@ -197,7 +196,7 @@ func NewWriter(ctx context.Context, fn string) (*Writer, error) { defer df.Close() // Close for platform windows. if err := os.RemoveAll(fn); err != nil { - return nil, errors.Wrap(err, "remove any existing index at path") + return nil, fmt.Errorf("remove any existing index at path: %w", err) } // Main index file we are building. @@ -216,7 +215,7 @@ func NewWriter(ctx context.Context, fn string) (*Writer, error) { return nil, err } if err := df.Sync(); err != nil { - return nil, errors.Wrap(err, "sync dir") + return nil, fmt.Errorf("sync dir: %w", err) } iw := &Writer{ @@ -288,7 +287,7 @@ func (fw *FileWriter) Write(bufs ...[]byte) error { // Once we move to compressed/varint representations in those areas, this limitation // can be lifted. if fw.pos > 16*math.MaxUint32 { - return errors.Errorf("%q exceeding max size of 64GiB", fw.name) + return fmt.Errorf("%q exceeding max size of 64GiB", fw.name) } } return nil @@ -315,7 +314,7 @@ func (fw *FileWriter) AddPadding(size int) error { p = uint64(size) - p if err := fw.Write(make([]byte, p)); err != nil { - return errors.Wrap(err, "add padding") + return fmt.Errorf("add padding: %w", err) } return nil } @@ -353,7 +352,7 @@ func (w *Writer) ensureStage(s indexWriterStage) error { } } if w.stage > s { - return errors.Errorf("invalid stage %q, currently at %q", s, w.stage) + return fmt.Errorf("invalid stage %q, currently at %q", s, w.stage) } // Mark start of sections in table of contents. @@ -417,20 +416,20 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... return err } if labels.Compare(lset, w.lastSeries) <= 0 { - return errors.Errorf("out-of-order series added with label set %q", lset) + return fmt.Errorf("out-of-order series added with label set %q", lset) } if ref < w.lastRef && !w.lastSeries.IsEmpty() { - return errors.Errorf("series with reference greater than %d already added", ref) + return fmt.Errorf("series with reference greater than %d already added", ref) } // We add padding to 16 bytes to increase the addressable space we get through 4 byte // series references. if err := w.addPadding(16); err != nil { - return errors.Errorf("failed to write padding bytes: %v", err) + return fmt.Errorf("failed to write padding bytes: %v", err) } if w.f.pos%16 != 0 { - return errors.Errorf("series write not 16-byte aligned at %d", w.f.pos) + return fmt.Errorf("series write not 16-byte aligned at %d", w.f.pos) } w.buf2.Reset() @@ -443,7 +442,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok { nameIndex, err = w.symbols.ReverseLookup(l.Name) if err != nil { - return errors.Errorf("symbol entry for %q does not exist, %v", l.Name, err) + return fmt.Errorf("symbol entry for %q does not exist, %v", l.Name, err) } } w.labelNames[l.Name]++ @@ -453,12 +452,12 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... if !ok || cacheEntry.lastValue != l.Value { valueIndex, err = w.symbols.ReverseLookup(l.Value) if err != nil { - return errors.Errorf("symbol entry for %q does not exist, %v", l.Value, err) + return fmt.Errorf("symbol entry for %q does not exist, %v", l.Value, err) } w.symbolCache[l.Name] = symbolCacheEntry{ index: nameIndex, - lastValue: l.Value, lastValueIndex: valueIndex, + lastValue: l.Value, } } w.buf2.PutUvarint32(valueIndex) @@ -493,7 +492,7 @@ func (w *Writer) AddSeries(ref storage.SeriesRef, lset labels.Labels, chunks ... w.buf2.PutHash(w.crc32) if err := w.write(w.buf1.Get(), w.buf2.Get()); err != nil { - return errors.Wrap(err, "write series data") + return fmt.Errorf("write series data: %w", err) } w.lastSeries.CopyFrom(lset) @@ -514,7 +513,7 @@ func (w *Writer) AddSymbol(sym string) error { return err } if w.numSymbols != 0 && sym <= w.lastSymbol { - return errors.Errorf("symbol %q out-of-order", sym) + return fmt.Errorf("symbol %q out-of-order", sym) } w.lastSymbol = sym w.numSymbols++ @@ -527,7 +526,7 @@ func (w *Writer) finishSymbols() error { symbolTableSize := w.f.pos - w.toc.Symbols - 4 // The symbol table's part is 4 bytes. So the total symbol table size must be less than or equal to 2^32-1 if symbolTableSize > math.MaxUint32 { - return errors.Errorf("symbol table size exceeds %d bytes: %d", uint32(math.MaxUint32), symbolTableSize) + return fmt.Errorf("symbol table size exceeds %d bytes: %d", uint32(math.MaxUint32), symbolTableSize) } // Write out the length and symbol count. @@ -563,7 +562,7 @@ func (w *Writer) finishSymbols() error { // Load in the symbol table efficiently for the rest of the index writing. w.symbols, err = NewSymbols(realByteSlice(w.symbolFile.Bytes()), FormatV2, int(w.toc.Symbols)) if err != nil { - return errors.Wrap(err, "read symbols") + return fmt.Errorf("read symbols: %w", err) } return nil } @@ -660,7 +659,7 @@ func (w *Writer) writeLabelIndex(name string, values []uint32) error { w.buf1.Reset() l := w.f.pos - startPos - 4 if l > math.MaxUint32 { - return errors.Errorf("label index size exceeds 4 bytes: %d", l) + return fmt.Errorf("label index size exceeds 4 bytes: %d", l) } w.buf1.PutBE32int(int(l)) if err := w.writeAt(w.buf1.Get(), startPos); err != nil { @@ -704,7 +703,7 @@ func (w *Writer) writeLabelIndexesOffsetTable() error { w.buf1.Reset() l := w.f.pos - startPos - 4 if l > math.MaxUint32 { - return errors.Errorf("label indexes offset table size exceeds 4 bytes: %d", l) + return fmt.Errorf("label indexes offset table size exceeds 4 bytes: %d", l) } w.buf1.PutBE32int(int(l)) if err := w.writeAt(w.buf1.Get(), startPos); err != nil { @@ -785,7 +784,7 @@ func (w *Writer) writePostingsOffsetTable() error { w.buf1.Reset() l := w.f.pos - startPos - 4 if l > math.MaxUint32 { - return errors.Errorf("postings offset table size exceeds 4 bytes: %d", l) + return fmt.Errorf("postings offset table size exceeds 4 bytes: %d", l) } w.buf1.PutBE32int(int(l)) if err := w.writeAt(w.buf1.Get(), startPos); err != nil { @@ -839,7 +838,7 @@ func (w *Writer) writePostingsToTmpFiles() error { d.ConsumePadding() startPos := w.toc.LabelIndices - uint64(d.Len()) if startPos%16 != 0 { - return errors.Errorf("series not 16-byte aligned at %d", startPos) + return fmt.Errorf("series not 16-byte aligned at %d", startPos) } offsets = append(offsets, uint32(startPos/16)) // Skip to next series. @@ -923,7 +922,7 @@ func (w *Writer) writePostingsToTmpFiles() error { // Symbol numbers are in order, so the strings will also be in order. slices.Sort(values) for _, v := range values { - value, err := w.symbols.Lookup(w.ctx, v) + value, err := w.symbols.Lookup(v) if err != nil { return err } @@ -964,7 +963,7 @@ func (w *Writer) writePosting(name, value string, offs []uint32) error { for _, off := range offs { if off > (1<<32)-1 { - return errors.Errorf("series offset %d exceeds 4 bytes", off) + return fmt.Errorf("series offset %d exceeds 4 bytes", off) } w.buf1.PutBE32(off) } @@ -973,7 +972,7 @@ func (w *Writer) writePosting(name, value string, offs []uint32) error { l := w.buf1.Len() // We convert to uint to make code compile on 32-bit systems, as math.MaxUint32 doesn't fit into int there. if uint(l) > math.MaxUint32 { - return errors.Errorf("posting size exceeds 4 bytes: %d", l) + return fmt.Errorf("posting size exceeds 4 bytes: %d", l) } w.buf2.PutBE32int(l) w.buf1.PutHash(w.crc32) @@ -1000,7 +999,7 @@ func (w *Writer) writePostings() error { return err } if uint64(n) != w.fP.pos { - return errors.Errorf("wrote %d bytes to posting temporary file, but only read back %d", w.fP.pos, n) + return fmt.Errorf("wrote %d bytes to posting temporary file, but only read back %d", w.fP.pos, n) } w.f.pos += uint64(n) @@ -1135,26 +1134,26 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { // Verify header. if r.b.Len() < HeaderLen { - return nil, errors.Wrap(encoding.ErrInvalidSize, "index header") + return nil, fmt.Errorf("index header: %w", encoding.ErrInvalidSize) } if m := binary.BigEndian.Uint32(r.b.Range(0, 4)); m != MagicIndex { - return nil, errors.Errorf("invalid magic number %x", m) + return nil, fmt.Errorf("invalid magic number %x", m) } r.version = int(r.b.Range(4, 5)[0]) if r.version != FormatV1 && r.version != FormatV2 { - return nil, errors.Errorf("unknown index file version %d", r.version) + return nil, fmt.Errorf("unknown index file version %d", r.version) } var err error r.toc, err = NewTOCFromByteSlice(b) if err != nil { - return nil, errors.Wrap(err, "read TOC") + return nil, fmt.Errorf("read TOC: %w", err) } r.symbols, err = NewSymbols(r.b, r.version, int(r.toc.Symbols)) if err != nil { - return nil, errors.Wrap(err, "read symbols") + return nil, fmt.Errorf("read symbols: %w", err) } if r.version == FormatV1 { @@ -1169,7 +1168,7 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { r.postingsV1[string(name)][string(value)] = off return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return nil, fmt.Errorf("read postings table: %w", err) } } else { var lastName, lastValue []byte @@ -1197,7 +1196,7 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { valueCount++ return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return nil, fmt.Errorf("read postings table: %w", err) } if lastName != nil { r.postings[string(lastName)] = append(r.postings[string(lastName)], postingOffset{value: string(lastValue), off: lastOff}) @@ -1217,7 +1216,7 @@ func newReader(b ByteSlice, c io.Closer) (*Reader, error) { } off, err := r.symbols.ReverseLookup(k) if err != nil { - return nil, errors.Wrap(err, "reverse symbol lookup") + return nil, fmt.Errorf("reverse symbol lookup: %w", err) } r.nameSymbols[off] = k } @@ -1252,7 +1251,7 @@ func (r *Reader) PostingsRanges() (map[labels.Label]Range, error) { } return nil }); err != nil { - return nil, errors.Wrap(err, "read postings table") + return nil, fmt.Errorf("read postings table: %w", err) } return m, nil } @@ -1295,21 +1294,18 @@ func NewSymbols(bs ByteSlice, version, off int) (*Symbols, error) { return s, nil } -func (s Symbols) Lookup(ctx context.Context, o uint32) (string, error) { +func (s Symbols) Lookup(o uint32) (string, error) { d := encoding.Decbuf{ B: s.bs.Range(0, s.bs.Len()), } if s.version == FormatV2 { if int(o) >= s.seen { - return "", errors.Errorf("unknown symbol offset %d", o) + return "", fmt.Errorf("unknown symbol offset %d", o) } d.Skip(s.offsets[int(o/symbolFactor)]) // Walk until we find the one we want. for i := o - (o / symbolFactor * symbolFactor); i > 0; i-- { - if ctx.Err() != nil { - return "", ctx.Err() - } d.UvarintBytes() } } else { @@ -1324,7 +1320,7 @@ func (s Symbols) Lookup(ctx context.Context, o uint32) (string, error) { func (s Symbols) ReverseLookup(sym string) (uint32, error) { if len(s.offsets) == 0 { - return 0, errors.Errorf("unknown symbol %q - no symbols", sym) + return 0, fmt.Errorf("unknown symbol %q - no symbols", sym) } i := sort.Search(len(s.offsets), func(i int) bool { // Any decoding errors here will be lost, however @@ -1357,7 +1353,7 @@ func (s Symbols) ReverseLookup(sym string) (uint32, error) { return 0, d.Err() } if lastSymbol != sym { - return 0, errors.Errorf("unknown symbol %q", sym) + return 0, fmt.Errorf("unknown symbol %q", sym) } if s.version == FormatV2 { return uint32(res), nil @@ -1416,7 +1412,7 @@ func ReadPostingsOffsetTable(bs ByteSlice, off uint64, f func(name, value []byte offsetPos := startLen - d.Len() if keyCount := d.Uvarint(); keyCount != 2 { - return errors.Errorf("unexpected number of keys for postings offset table %d", keyCount) + return fmt.Errorf("unexpected number of keys for postings offset table %d", keyCount) } name := d.UvarintBytes() value := d.UvarintBytes() @@ -1441,7 +1437,7 @@ func (r *Reader) lookupSymbol(ctx context.Context, o uint32) (string, error) { if s, ok := r.nameSymbols[o]; ok { return s, nil } - return r.symbols.Lookup(ctx, o) + return r.symbols.Lookup(o) } // Symbols returns an iterator over the symbols that exist within the index. @@ -1468,10 +1464,10 @@ func (r *Reader) SortedLabelValues(ctx context.Context, name string, matchers .. // LabelValues returns value tuples that exist for the given label name. // It is not safe to use the return value beyond the lifetime of the byte slice // passed into the Reader. -// TODO(replay): Support filtering by matchers +// TODO(replay): Support filtering by matchers. func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) > 0 { - return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) + return nil, fmt.Errorf("matchers parameter is not implemented: %+v", matchers) } if r.version == FormatV1 { @@ -1519,7 +1515,7 @@ func (r *Reader) LabelValues(ctx context.Context, name string, matchers ...*labe d.Uvarint64() // Offset. } if d.Err() != nil { - return nil, errors.Wrap(d.Err(), "get postings offset entry") + return nil, fmt.Errorf("get postings offset entry: %w", d.Err()) } return values, ctx.Err() @@ -1545,12 +1541,12 @@ func (r *Reader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([ d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) buf := d.Get() if d.Err() != nil { - return nil, errors.Wrap(d.Err(), "get buffer for series") + return nil, fmt.Errorf("get buffer for series: %w", d.Err()) } offsets, err := r.dec.LabelNamesOffsetsFor(buf) if err != nil { - return nil, errors.Wrap(err, "get label name offsets") + return nil, fmt.Errorf("get label name offsets: %w", err) } for _, off := range offsets { offsetsMap[off] = struct{}{} @@ -1562,7 +1558,7 @@ func (r *Reader) LabelNamesFor(ctx context.Context, ids ...storage.SeriesRef) ([ for off := range offsetsMap { name, err := r.lookupSymbol(ctx, off) if err != nil { - return nil, errors.Wrap(err, "lookup symbol in LabelNamesFor") + return nil, fmt.Errorf("lookup symbol in LabelNamesFor: %w", err) } names = append(names, name) } @@ -1583,7 +1579,7 @@ func (r *Reader) LabelValueFor(ctx context.Context, id storage.SeriesRef, label d := encoding.NewDecbufUvarintAt(r.b, int(offset), castagnoliTable) buf := d.Get() if d.Err() != nil { - return "", errors.Wrap(d.Err(), "label values for") + return "", fmt.Errorf("label values for: %w", d.Err()) } value, err := r.dec.LabelValueFor(ctx, buf, label) @@ -1610,7 +1606,11 @@ func (r *Reader) Series(id storage.SeriesRef, builder *labels.ScratchBuilder, ch if d.Err() != nil { return d.Err() } - return errors.Wrap(r.dec.Series(d.Get(), builder, chks), "read series") + err := r.dec.Series(d.Get(), builder, chks) + if err != nil { + return fmt.Errorf("read series: %w", err) + } + return nil } func (r *Reader) Postings(ctx context.Context, name string, values ...string) (Postings, error) { @@ -1629,7 +1629,7 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P d := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) _, p, err := r.dec.Postings(d.Get()) if err != nil { - return nil, errors.Wrap(err, "decode postings") + return nil, fmt.Errorf("decode postings: %w", err) } res = append(res, p) } @@ -1691,7 +1691,7 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P d2 := encoding.NewDecbufAt(r.b, int(postingsOff), castagnoliTable) _, p, err := r.dec.Postings(d2.Get()) if err != nil { - return nil, errors.Wrap(err, "decode postings") + return nil, fmt.Errorf("decode postings: %w", err) } res = append(res, p) } @@ -1707,10 +1707,10 @@ func (r *Reader) Postings(ctx context.Context, name string, values ...string) (P } } if d.Err() != nil { - return nil, errors.Wrap(d.Err(), "get postings offset entry") + return nil, fmt.Errorf("get postings offset entry: %w", d.Err()) } if ctx.Err() != nil { - return nil, errors.Wrap(ctx.Err(), "get postings offset entry") + return nil, fmt.Errorf("get postings offset entry: %w", ctx.Err()) } } @@ -1729,10 +1729,10 @@ func (r *Reader) Size() int64 { } // LabelNames returns all the unique label names present in the index. -// TODO(twilkie) implement support for matchers +// TODO(twilkie) implement support for matchers. func (r *Reader) LabelNames(_ context.Context, matchers ...*labels.Matcher) ([]string, error) { if len(matchers) > 0 { - return nil, errors.Errorf("matchers parameter is not implemented: %+v", matchers) + return nil, fmt.Errorf("matchers parameter is not implemented: %+v", matchers) } labelNames := make([]string, 0, len(r.postings)) @@ -1803,7 +1803,7 @@ func (dec *Decoder) LabelNamesOffsetsFor(b []byte) ([]uint32, error) { _ = d.Uvarint() // skip the label value if d.Err() != nil { - return nil, errors.Wrap(d.Err(), "read series label offsets") + return nil, fmt.Errorf("read series label offsets: %w", d.Err()) } } @@ -1820,18 +1820,18 @@ func (dec *Decoder) LabelValueFor(ctx context.Context, b []byte, label string) ( lvo := uint32(d.Uvarint()) if d.Err() != nil { - return "", errors.Wrap(d.Err(), "read series label offsets") + return "", fmt.Errorf("read series label offsets: %w", d.Err()) } ln, err := dec.LookupSymbol(ctx, lno) if err != nil { - return "", errors.Wrap(err, "lookup label name") + return "", fmt.Errorf("lookup label name: %w", err) } if ln == label { lv, err := dec.LookupSymbol(ctx, lvo) if err != nil { - return "", errors.Wrap(err, "lookup label value") + return "", fmt.Errorf("lookup label value: %w", err) } return lv, nil @@ -1856,16 +1856,16 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu lvo := uint32(d.Uvarint()) if d.Err() != nil { - return errors.Wrap(d.Err(), "read series label offsets") + return fmt.Errorf("read series label offsets: %w", d.Err()) } ln, err := dec.LookupSymbol(context.TODO(), lno) if err != nil { - return errors.Wrap(err, "lookup label name") + return fmt.Errorf("lookup label name: %w", err) } lv, err := dec.LookupSymbol(context.TODO(), lvo) if err != nil { - return errors.Wrap(err, "lookup label value") + return fmt.Errorf("lookup label value: %w", err) } builder.Add(ln, lv) @@ -1897,7 +1897,7 @@ func (dec *Decoder) Series(b []byte, builder *labels.ScratchBuilder, chks *[]chu t0 = maxt if d.Err() != nil { - return errors.Wrapf(d.Err(), "read meta for chunk %d", i) + return fmt.Errorf("read meta for chunk %d: %w", i, d.Err()) } *chks = append(*chks, chunks.Meta{ diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index f79a8d4cf..c83957427 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -17,12 +17,12 @@ import ( "container/heap" "context" "encoding/binary" + "fmt" "runtime" "sort" "strings" "sync" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -927,7 +927,7 @@ func (h *postingsWithIndexHeap) next() error { } if err := pi.p.Err(); err != nil { - return errors.Wrapf(err, "postings %d", pi.index) + return fmt.Errorf("postings %d: %w", pi.index, err) } h.popIndex() return nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go index 70622b3d2..0ad4e0857 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postingsstats.go @@ -63,6 +63,15 @@ func (m *maxHeap) push(item Stat) { } func (m *maxHeap) get() []Stat { - slices.SortFunc(m.Items, func(a, b Stat) int { return int(b.Count - a.Count) }) + slices.SortFunc(m.Items, func(a, b Stat) int { + switch { + case b.Count < a.Count: + return -1 + case b.Count > a.Count: + return 1 + default: + return 0 + } + }) return m.Items } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go index 45827889e..7f2110fa6 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go @@ -17,7 +17,10 @@ import ( "fmt" "sort" + "github.com/oklog/ulid" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/tsdb/tombstones" ) @@ -111,22 +114,27 @@ type OOORangeHead struct { // the timerange of the query and having preexisting pointers to the first // and last timestamp help with that. mint, maxt int64 + + isoState *oooIsolationState } -func NewOOORangeHead(head *Head, mint, maxt int64) *OOORangeHead { +func NewOOORangeHead(head *Head, mint, maxt int64, minRef chunks.ChunkDiskMapperRef) *OOORangeHead { + isoState := head.oooIso.TrackReadAfter(minRef) + return &OOORangeHead{ - head: head, - mint: mint, - maxt: maxt, + head: head, + mint: mint, + maxt: maxt, + isoState: isoState, } } func (oh *OOORangeHead) Index() (IndexReader, error) { - return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt), nil + return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt, oh.isoState.minRef), nil } func (oh *OOORangeHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt), nil + return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt, oh.isoState), nil } func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) { @@ -135,13 +143,13 @@ func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) { return tombstones.NewMemTombstones(), nil } +var oooRangeHeadULID = ulid.MustParse("0000000000XXXX000RANGEHEAD") + func (oh *OOORangeHead) Meta() BlockMeta { - var id [16]byte - copy(id[:], "____ooo_head____") return BlockMeta{ MinTime: oh.mint, MaxTime: oh.maxt, - ULID: id, + ULID: oooRangeHeadULID, Stats: BlockStats{ NumSeries: oh.head.NumSeries(), }, diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go index 198bc4f2f..440130f7d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many unused function arguments in this file by design. package tsdb import ( @@ -19,6 +18,7 @@ import ( "errors" "math" + "github.com/oklog/ulid" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/model/labels" @@ -38,26 +38,29 @@ var _ IndexReader = &OOOHeadIndexReader{} // decided to do this to avoid code duplication. // The only methods that change are the ones about getting Series and Postings. type OOOHeadIndexReader struct { - *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. + *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. + lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef } -func NewOOOHeadIndexReader(head *Head, mint, maxt int64) *OOOHeadIndexReader { +func NewOOOHeadIndexReader(head *Head, mint, maxt int64, lastGarbageCollectedMmapRef chunks.ChunkDiskMapperRef) *OOOHeadIndexReader { hr := &headIndexReader{ head: head, mint: mint, maxt: maxt, } - return &OOOHeadIndexReader{hr} + return &OOOHeadIndexReader{hr, lastGarbageCollectedMmapRef} } func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { - return oh.series(ref, builder, chks, 0) + return oh.series(ref, builder, chks, oh.lastGarbageCollectedMmapRef, 0) } -// The passed lastMmapRef tells upto what max m-map chunk that we can consider. -// If it is 0, it means all chunks need to be considered. -// If it is non-0, then the oooHeadChunk must not be considered. -func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastMmapRef chunks.ChunkDiskMapperRef) error { +// lastGarbageCollectedMmapRef gives the last mmap chunk that may be being garbage collected and so +// any chunk at or before this ref will not be considered. 0 disables this check. +// +// maxMmapRef tells upto what max m-map chunk that we can consider. If it is non-0, then +// the oooHeadChunk will not be considered. +func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta, lastGarbageCollectedMmapRef, maxMmapRef chunks.ChunkDiskMapperRef) error { s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) if s == nil { @@ -112,14 +115,14 @@ func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, builder *labels.Scra // so we can set the correct markers. if s.ooo.oooHeadChunk != nil { c := s.ooo.oooHeadChunk - if c.OverlapsClosedInterval(oh.mint, oh.maxt) && lastMmapRef == 0 { + if c.OverlapsClosedInterval(oh.mint, oh.maxt) && maxMmapRef == 0 { ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.ooo.oooMmappedChunks)))) addChunk(c.minTime, c.maxTime, ref) } } for i := len(s.ooo.oooMmappedChunks) - 1; i >= 0; i-- { c := s.ooo.oooMmappedChunks[i] - if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (lastMmapRef == 0 || lastMmapRef.GreaterThanOrEqualTo(c.ref)) { + if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (maxMmapRef == 0 || maxMmapRef.GreaterThanOrEqualTo(c.ref)) && (lastGarbageCollectedMmapRef == 0 || c.ref.GreaterThan(lastGarbageCollectedMmapRef)) { ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) addChunk(c.minTime, c.maxTime, ref) } @@ -178,17 +181,39 @@ type chunkMetaAndChunkDiskMapperRef struct { } func refLessByMinTimeAndMinRef(a, b chunkMetaAndChunkDiskMapperRef) int { - if a.meta.MinTime == b.meta.MinTime { - return int(a.meta.Ref - b.meta.Ref) + switch { + case a.meta.MinTime < b.meta.MinTime: + return -1 + case a.meta.MinTime > b.meta.MinTime: + return 1 + } + + switch { + case a.meta.Ref < b.meta.Ref: + return -1 + case a.meta.Ref > b.meta.Ref: + return 1 + default: + return 0 } - return int(a.meta.MinTime - b.meta.MinTime) } func lessByMinTimeAndMinRef(a, b chunks.Meta) int { - if a.MinTime == b.MinTime { - return int(a.Ref - b.Ref) + switch { + case a.MinTime < b.MinTime: + return -1 + case a.MinTime > b.MinTime: + return 1 + } + + switch { + case a.Ref < b.Ref: + return -1 + case a.Ref > b.Ref: + return 1 + default: + return 0 } - return int(a.MinTime - b.MinTime) } func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values ...string) (index.Postings, error) { @@ -210,46 +235,51 @@ func (oh *OOOHeadIndexReader) Postings(ctx context.Context, name string, values type OOOHeadChunkReader struct { head *Head mint, maxt int64 + isoState *oooIsolationState } -func NewOOOHeadChunkReader(head *Head, mint, maxt int64) *OOOHeadChunkReader { +func NewOOOHeadChunkReader(head *Head, mint, maxt int64, isoState *oooIsolationState) *OOOHeadChunkReader { return &OOOHeadChunkReader{ - head: head, - mint: mint, - maxt: maxt, + head: head, + mint: mint, + maxt: maxt, + isoState: isoState, } } -func (cr OOOHeadChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { +func (cr OOOHeadChunkReader) ChunkOrIterable(meta chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack() s := cr.head.series.getByID(sid) // This means that the series has been garbage collected. if s == nil { - return nil, storage.ErrNotFound + return nil, nil, storage.ErrNotFound } s.Lock() if s.ooo == nil { // There is no OOO data for this series. s.Unlock() - return nil, storage.ErrNotFound + return nil, nil, storage.ErrNotFound } - c, err := s.oooMergedChunk(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt) + mc, err := s.oooMergedChunks(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt) s.Unlock() if err != nil { - return nil, err + return nil, nil, err } // This means that the query range did not overlap with the requested chunk. - if len(c.chunks) == 0 { - return nil, storage.ErrNotFound + if len(mc.chunkIterables) == 0 { + return nil, nil, storage.ErrNotFound } - return c, nil + return nil, mc, nil } func (cr OOOHeadChunkReader) Close() error { + if cr.isoState != nil { + cr.isoState.Close() + } return nil } @@ -284,7 +314,7 @@ func NewOOOCompactionHead(ctx context.Context, head *Head) (*OOOCompactionHead, ch.lastWBLFile = lastWBLFile } - ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64) + ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64, 0) n, v := index.AllPostingsKey() // TODO: verify this gets only ooo samples. @@ -343,20 +373,20 @@ func (ch *OOOCompactionHead) Index() (IndexReader, error) { } func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) { - return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt), nil + return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt, nil), nil } func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { return tombstones.NewMemTombstones(), nil } +var oooCompactionHeadULID = ulid.MustParse("0000000000XX000COMPACTHEAD") + func (ch *OOOCompactionHead) Meta() BlockMeta { - var id [16]byte - copy(id[:], "copy(id[:], \"ooo_compact_head\")") return BlockMeta{ MinTime: ch.mint, MaxTime: ch.maxt, - ULID: id, + ULID: oooCompactionHeadULID, Stats: BlockStats{ NumSeries: uint64(len(ch.postings)), }, @@ -369,7 +399,7 @@ func (ch *OOOCompactionHead) Meta() BlockMeta { // Only the method of BlockReader interface are valid for the cloned OOOCompactionHead. func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead { return &OOOCompactionHead{ - oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt), + oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt, 0), lastMmapRef: ch.lastMmapRef, postings: ch.postings, chunkRange: ch.chunkRange, @@ -411,7 +441,7 @@ func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.P } func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, builder *labels.ScratchBuilder, chks *[]chunks.Meta) error { - return ir.ch.oooIR.series(ref, builder, chks, ir.ch.lastMmapRef) + return ir.ch.oooIR.series(ref, builder, chks, 0, ir.ch.lastMmapRef) } func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(_ context.Context, name string, matchers ...*labels.Matcher) ([]string, error) { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_isolation.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_isolation.go new file mode 100644 index 000000000..3e3e165a0 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_isolation.go @@ -0,0 +1,79 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tsdb + +import ( + "container/list" + "sync" + + "github.com/prometheus/prometheus/tsdb/chunks" +) + +type oooIsolation struct { + mtx sync.RWMutex + openReads *list.List +} + +type oooIsolationState struct { + i *oooIsolation + e *list.Element + + minRef chunks.ChunkDiskMapperRef +} + +func newOOOIsolation() *oooIsolation { + return &oooIsolation{ + openReads: list.New(), + } +} + +// HasOpenReadsAtOrBefore returns true if this oooIsolation is aware of any reads that use +// chunks with reference at or before ref. +func (i *oooIsolation) HasOpenReadsAtOrBefore(ref chunks.ChunkDiskMapperRef) bool { + i.mtx.RLock() + defer i.mtx.RUnlock() + + for e := i.openReads.Front(); e != nil; e = e.Next() { + s := e.Value.(*oooIsolationState) + + if ref.GreaterThan(s.minRef) { + return true + } + } + + return false +} + +// TrackReadAfter records a read that uses chunks with reference after minRef. +// +// The caller must ensure that the returned oooIsolationState is eventually closed when +// the read is complete. +func (i *oooIsolation) TrackReadAfter(minRef chunks.ChunkDiskMapperRef) *oooIsolationState { + s := &oooIsolationState{ + i: i, + minRef: minRef, + } + + i.mtx.Lock() + s.e = i.openReads.PushBack(s) + i.mtx.Unlock() + + return s +} + +func (s oooIsolationState) Close() { + s.i.mtx.Lock() + s.i.openReads.Remove(s.e) + s.i.mtx.Unlock() +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index a832c0d1b..6584d7da0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -632,36 +632,42 @@ func (b *blockBaseSeriesSet) Warnings() annotations.Annotations { return nil } // populateWithDelGenericSeriesIterator assumes that chunks that would be fully // removed by intervals are filtered out in previous phase. // -// On each iteration currChkMeta is available. If currDelIter is not nil, it -// means that the chunk iterator in currChkMeta is invalid and a chunk rewrite -// is needed, for which currDelIter should be used. +// On each iteration currMeta is available. If currDelIter is not nil, it +// means that the chunk in currMeta is invalid and a chunk rewrite is needed, +// for which currDelIter should be used. type populateWithDelGenericSeriesIterator struct { blockID ulid.ULID - chunks ChunkReader - // chks are expected to be sorted by minTime and should be related to + cr ChunkReader + // metas are expected to be sorted by minTime and should be related to // the same, single series. - chks []chunks.Meta + // It's possible for a single chunks.Meta to refer to multiple chunks. + // cr.ChunkOrIterator() would return an iterable and a nil chunk in this + // case. + metas []chunks.Meta - i int // Index into chks; -1 if not started yet. + i int // Index into metas; -1 if not started yet. err error bufIter DeletedIterator // Retained for memory re-use. currDelIter may point here. intervals tombstones.Intervals currDelIter chunkenc.Iterator - currChkMeta chunks.Meta + // currMeta is the current chunks.Meta from metas. currMeta.Chunk is set to + // the chunk returned from cr.ChunkOrIterable(). As that can return a nil + // chunk, currMeta.Chunk is not always guaranteed to be set. + currMeta chunks.Meta } func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) { p.blockID = blockID - p.chunks = cr - p.chks = chks + p.cr = cr + p.metas = chks p.i = -1 p.err = nil // Note we don't touch p.bufIter.Iter; it is holding on to an iterator we might reuse in next(). p.bufIter.Intervals = p.bufIter.Intervals[:0] p.intervals = intervals p.currDelIter = nil - p.currChkMeta = chunks.Meta{} + p.currMeta = chunks.Meta{} } // If copyHeadChunk is true, then the head chunk (i.e. the in-memory chunk of the TSDB) @@ -669,43 +675,54 @@ func (p *populateWithDelGenericSeriesIterator) reset(blockID ulid.ULID, cr Chunk // However, if the deletion intervals overlaps with the head chunk, then the head chunk is // not copied irrespective of copyHeadChunk because it will be re-encoded later anyway. func (p *populateWithDelGenericSeriesIterator) next(copyHeadChunk bool) bool { - if p.err != nil || p.i >= len(p.chks)-1 { + if p.err != nil || p.i >= len(p.metas)-1 { return false } p.i++ - p.currChkMeta = p.chks[p.i] + p.currMeta = p.metas[p.i] p.bufIter.Intervals = p.bufIter.Intervals[:0] for _, interval := range p.intervals { - if p.currChkMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) { + if p.currMeta.OverlapsClosedInterval(interval.Mint, interval.Maxt) { p.bufIter.Intervals = p.bufIter.Intervals.Add(interval) } } - hcr, ok := p.chunks.(*headChunkReader) + hcr, ok := p.cr.(*headChunkReader) + var iterable chunkenc.Iterable if ok && copyHeadChunk && len(p.bufIter.Intervals) == 0 { // ChunkWithCopy will copy the head chunk. var maxt int64 - p.currChkMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currChkMeta) + p.currMeta.Chunk, maxt, p.err = hcr.ChunkWithCopy(p.currMeta) // For the in-memory head chunk the index reader sets maxt as MaxInt64. We fix it here. - p.currChkMeta.MaxTime = maxt + p.currMeta.MaxTime = maxt } else { - p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta) + p.currMeta.Chunk, iterable, p.err = p.cr.ChunkOrIterable(p.currMeta) } + if p.err != nil { - p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currChkMeta.Ref, p.blockID.String()) + p.err = errors.Wrapf(p.err, "cannot populate chunk %d from block %s", p.currMeta.Ref, p.blockID.String()) return false } - if len(p.bufIter.Intervals) == 0 { - // If there is no overlap with deletion intervals, we can take chunk as it is. - p.currDelIter = nil + // Use the single chunk if possible. + if p.currMeta.Chunk != nil { + if len(p.bufIter.Intervals) == 0 { + // If there is no overlap with deletion intervals and a single chunk is + // returned, we can take chunk as it is. + p.currDelIter = nil + return true + } + // Otherwise we need to iterate over the samples in the single chunk + // and create new chunks. + p.bufIter.Iter = p.currMeta.Chunk.Iterator(p.bufIter.Iter) + p.currDelIter = &p.bufIter return true } - // We don't want the full chunk, take just a part of it. - p.bufIter.Iter = p.currChkMeta.Chunk.Iterator(p.bufIter.Iter) + // Otherwise, use the iterable to create an iterator. + p.bufIter.Iter = iterable.Iterator(p.bufIter.Iter) p.currDelIter = &p.bufIter return true } @@ -765,7 +782,7 @@ func (p *populateWithDelSeriesIterator) Next() chunkenc.ValueType { if p.currDelIter != nil { p.curr = p.currDelIter } else { - p.curr = p.currChkMeta.Chunk.Iterator(p.curr) + p.curr = p.currMeta.Chunk.Iterator(p.curr) } if valueType := p.curr.Next(); valueType != chunkenc.ValNone { return valueType @@ -817,22 +834,69 @@ func (p *populateWithDelSeriesIterator) Err() error { type populateWithDelChunkSeriesIterator struct { populateWithDelGenericSeriesIterator - curr chunks.Meta + // currMetaWithChunk is current meta with its chunk field set. This meta + // is guaranteed to map to a single chunk. This differs from + // populateWithDelGenericSeriesIterator.currMeta as that + // could refer to multiple chunks. + currMetaWithChunk chunks.Meta + + // chunksFromIterable stores the chunks created from iterating through + // the iterable returned by cr.ChunkOrIterable() (with deleted samples + // removed). + chunksFromIterable []chunks.Meta + chunksFromIterableIdx int } func (p *populateWithDelChunkSeriesIterator) reset(blockID ulid.ULID, cr ChunkReader, chks []chunks.Meta, intervals tombstones.Intervals) { p.populateWithDelGenericSeriesIterator.reset(blockID, cr, chks, intervals) - p.curr = chunks.Meta{} + p.currMetaWithChunk = chunks.Meta{} + p.chunksFromIterable = p.chunksFromIterable[:0] + p.chunksFromIterableIdx = -1 } func (p *populateWithDelChunkSeriesIterator) Next() bool { - if !p.next(true) { - return false + if p.currMeta.Chunk == nil { + // If we've been creating chunks from the iterable, check if there are + // any more chunks to iterate through. + if p.chunksFromIterableIdx < len(p.chunksFromIterable)-1 { + p.chunksFromIterableIdx++ + p.currMetaWithChunk = p.chunksFromIterable[p.chunksFromIterableIdx] + return true + } } - p.curr = p.currChkMeta - if p.currDelIter == nil { - return true + + // Move to the next chunk/deletion iterator. + // This is a for loop as if the current p.currDelIter returns no samples + // (which means a chunk won't be created), there still might be more + // samples/chunks from the rest of p.metas. + for p.next(true) { + if p.currDelIter == nil { + p.currMetaWithChunk = p.currMeta + return true + } + + if p.currMeta.Chunk != nil { + // If ChunkOrIterable() returned a non-nil chunk, the samples in + // p.currDelIter will only form one chunk, as the only change + // p.currDelIter might make is deleting some samples. + if p.populateCurrForSingleChunk() { + return true + } + } else { + // If ChunkOrIterable() returned an iterable, multiple chunks may be + // created from the samples in p.currDelIter. + if p.populateChunksFromIterable() { + return true + } + } + } + return false +} + +// populateCurrForSingleChunk sets the fields within p.currMetaWithChunk. This +// should be called if the samples in p.currDelIter only form one chunk. +func (p *populateWithDelChunkSeriesIterator) populateCurrForSingleChunk() bool { valueType := p.currDelIter.Next() if valueType == chunkenc.ValNone { if err := p.currDelIter.Err(); err != nil { @@ -840,9 +904,9 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { } return false } - p.curr.MinTime = p.currDelIter.AtT() + p.currMetaWithChunk.MinTime = p.currDelIter.AtT() - // Re-encode the chunk if iterator is provider. This means that it has + // Re-encode the chunk if iterator is provided. This means that it has // some samples to be deleted or chunk is opened. var ( newChunk chunkenc.Chunk @@ -900,7 +964,7 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { } } default: - err = fmt.Errorf("populateWithDelChunkSeriesIterator: value type %v unsupported", valueType) + err = fmt.Errorf("populateCurrForSingleChunk: value type %v unsupported", valueType) } if err != nil { @@ -912,12 +976,127 @@ func (p *populateWithDelChunkSeriesIterator) Next() bool { return false } - p.curr.Chunk = newChunk - p.curr.MaxTime = t + p.currMetaWithChunk.Chunk = newChunk + p.currMetaWithChunk.MaxTime = t return true } -func (p *populateWithDelChunkSeriesIterator) At() chunks.Meta { return p.curr } +// populateChunksFromIterable reads the samples from currDelIter to create +// chunks for chunksFromIterable. It also sets p.currMetaWithChunk to the first +// chunk. +func (p *populateWithDelChunkSeriesIterator) populateChunksFromIterable() bool { + p.chunksFromIterable = p.chunksFromIterable[:0] + p.chunksFromIterableIdx = -1 + + firstValueType := p.currDelIter.Next() + if firstValueType == chunkenc.ValNone { + if err := p.currDelIter.Err(); err != nil { + p.err = errors.Wrap(err, "populateChunksFromIterable: no samples could be read") + return false + } + return false + } + + var ( + // t is the timestamp for the current sample. + t int64 + cmint int64 + cmaxt int64 + + currentChunk chunkenc.Chunk + + app chunkenc.Appender + + newChunk chunkenc.Chunk + recoded bool + + err error + ) + + prevValueType := chunkenc.ValNone + + for currentValueType := firstValueType; currentValueType != chunkenc.ValNone; currentValueType = p.currDelIter.Next() { + // Check if the encoding has changed (i.e. we need to create a new + // chunk as chunks can't have multiple encoding types). + // For the first sample, the following condition will always be true as + // ValNoneNone != ValFloat | ValHistogram | ValFloatHistogram. + if currentValueType != prevValueType { + if prevValueType != chunkenc.ValNone { + p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt}) + } + cmint = p.currDelIter.AtT() + if currentChunk, err = currentValueType.NewChunk(); err != nil { + break + } + if app, err = currentChunk.Appender(); err != nil { + break + } + } + + switch currentValueType { + case chunkenc.ValFloat: + { + var v float64 + t, v = p.currDelIter.At() + app.Append(t, v) + } + case chunkenc.ValHistogram: + { + var v *histogram.Histogram + t, v = p.currDelIter.AtHistogram() + // No need to set prevApp as AppendHistogram will set the + // counter reset header for the appender that's returned. + newChunk, recoded, app, err = app.AppendHistogram(nil, t, v, false) + } + case chunkenc.ValFloatHistogram: + { + var v *histogram.FloatHistogram + t, v = p.currDelIter.AtFloatHistogram() + // No need to set prevApp as AppendHistogram will set the + // counter reset header for the appender that's returned. + newChunk, recoded, app, err = app.AppendFloatHistogram(nil, t, v, false) + } + } + + if err != nil { + break + } + + if newChunk != nil { + if !recoded { + p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt}) + } + currentChunk = newChunk + cmint = t + } + + cmaxt = t + prevValueType = currentValueType + } + + if err != nil { + p.err = errors.Wrap(err, "populateChunksFromIterable: error when writing new chunks") + return false + } + if err = p.currDelIter.Err(); err != nil { + p.err = errors.Wrap(err, "populateChunksFromIterable: currDelIter error when writing new chunks") + return false + } + + if prevValueType != chunkenc.ValNone { + p.chunksFromIterable = append(p.chunksFromIterable, chunks.Meta{Chunk: currentChunk, MinTime: cmint, MaxTime: cmaxt}) + } + + if len(p.chunksFromIterable) == 0 { + return false + } + + p.currMetaWithChunk = p.chunksFromIterable[0] + p.chunksFromIterableIdx = 0 + return true +} + +func (p *populateWithDelChunkSeriesIterator) At() chunks.Meta { return p.currMetaWithChunk } // blockSeriesSet allows to iterate over sorted, populated series with applied tombstones. // Series with all deleted chunks are still present as Series with no samples. @@ -1117,8 +1296,8 @@ func newNopChunkReader() ChunkReader { } } -func (cr nopChunkReader) Chunk(chunks.Meta) (chunkenc.Chunk, error) { - return cr.emptyChunk, nil +func (cr nopChunkReader) ChunkOrIterable(chunks.Meta) (chunkenc.Chunk, chunkenc.Iterable, error) { + return cr.emptyChunk, nil, nil } func (cr nopChunkReader) Close() error { return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 4cd51d46c..42a656dfe 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -16,10 +16,10 @@ package record import ( + "errors" + "fmt" "math" - "github.com/pkg/errors" - "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/textparse" @@ -229,7 +229,7 @@ func (d *Decoder) Series(rec []byte, series []RefSeries) ([]RefSeries, error) { return nil, dec.Err() } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return series, nil } @@ -272,20 +272,19 @@ func (d *Decoder) Metadata(rec []byte, metadata []RefMetadata) ([]RefMetadata, e return nil, dec.Err() } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return metadata, nil } // DecodeLabels decodes one set of labels from buf. func (d *Decoder) DecodeLabels(dec *encoding.Decbuf) labels.Labels { - // TODO: reconsider if this function could be pushed down into labels.Labels to be more efficient. d.builder.Reset() nLabels := dec.Uvarint() for i := 0; i < nLabels; i++ { - lName := dec.UvarintStr() - lValue := dec.UvarintStr() - d.builder.Add(lName, lValue) + lName := dec.UvarintBytes() + lValue := dec.UvarintBytes() + d.builder.UnsafeAddBytes(lName, lValue) } return d.builder.Labels() } @@ -304,6 +303,10 @@ func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) baseRef = dec.Be64() baseTime = dec.Be64int64() ) + // Allow 1 byte for each varint and 8 for the value; the output slice must be at least that big. + if minSize := dec.Len() / (1 + 1 + 8); cap(samples) < minSize { + samples = make([]RefSample, 0, minSize) + } for len(dec.B) > 0 && dec.Err() == nil { dref := dec.Varint64() dtime := dec.Varint64() @@ -317,10 +320,10 @@ func (d *Decoder) Samples(rec []byte, samples []RefSample) ([]RefSample, error) } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d samples", len(samples)) + return nil, fmt.Errorf("decode error after %d samples: %w", len(samples), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return samples, nil } @@ -344,7 +347,7 @@ func (d *Decoder) Tombstones(rec []byte, tstones []tombstones.Stone) ([]tombston return nil, dec.Err() } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return tstones, nil } @@ -382,10 +385,10 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d exemplars", len(exemplars)) + return nil, fmt.Errorf("decode error after %d exemplars: %w", len(exemplars), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return exemplars, nil } @@ -410,10 +413,10 @@ func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMar } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d mmap markers", len(markers)) + return nil, fmt.Errorf("decode error after %d mmap markers: %w", len(markers), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return markers, nil } @@ -446,10 +449,10 @@ func (d *Decoder) HistogramSamples(rec []byte, histograms []RefHistogramSample) } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms)) + return nil, fmt.Errorf("decode error after %d histograms: %w", len(histograms), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return histograms, nil } @@ -528,10 +531,10 @@ func (d *Decoder) FloatHistogramSamples(rec []byte, histograms []RefFloatHistogr } if dec.Err() != nil { - return nil, errors.Wrapf(dec.Err(), "decode error after %d histograms", len(histograms)) + return nil, fmt.Errorf("decode error after %d histograms: %w", len(histograms), dec.Err()) } if len(dec.B) > 0 { - return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return nil, fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return histograms, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/repair.go b/vendor/github.com/prometheus/prometheus/tsdb/repair.go index 0c2e08791..081116454 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/repair.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/repair.go @@ -15,6 +15,7 @@ package tsdb import ( "encoding/json" + "fmt" "io" "os" "path/filepath" @@ -124,7 +125,7 @@ func readBogusMetaFile(dir string) (*BlockMeta, error) { return nil, err } if m.Version != metaVersion1 && m.Version != 2 { - return nil, errors.Errorf("unexpected meta file version %d", m.Version) + return nil, fmt.Errorf("unexpected meta file version %d", m.Version) } return &m, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go index 94daf5195..4cea5005d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tombstones/tombstones.go @@ -15,6 +15,7 @@ package tombstones import ( "encoding/binary" + "errors" "fmt" "hash" "hash/crc32" @@ -26,7 +27,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/encoding" @@ -109,17 +109,17 @@ func WriteFile(logger log.Logger, dir string, tr Reader) (int64, error) { bytes, err := Encode(tr) if err != nil { - return 0, errors.Wrap(err, "encoding tombstones") + return 0, fmt.Errorf("encoding tombstones: %w", err) } // Ignore first byte which is the format type. We do this for compatibility. if _, err := hash.Write(bytes[tombstoneFormatVersionSize:]); err != nil { - return 0, errors.Wrap(err, "calculating hash for tombstones") + return 0, fmt.Errorf("calculating hash for tombstones: %w", err) } n, err = f.Write(bytes) if err != nil { - return 0, errors.Wrap(err, "writing tombstones") + return 0, fmt.Errorf("writing tombstones: %w", err) } size += n @@ -161,7 +161,7 @@ func Encode(tr Reader) ([]byte, error) { func Decode(b []byte) (Reader, error) { d := &encoding.Decbuf{B: b} if flag := d.Byte(); flag != tombstoneFormatV1 { - return nil, errors.Errorf("invalid tombstone format %x", flag) + return nil, fmt.Errorf("invalid tombstone format %x", flag) } if d.Err() != nil { @@ -199,7 +199,7 @@ func ReadTombstones(dir string) (Reader, int64, error) { } if len(b) < tombstonesHeaderSize { - return nil, 0, errors.Wrap(encoding.ErrInvalidSize, "tombstones header") + return nil, 0, fmt.Errorf("tombstones header: %w", encoding.ErrInvalidSize) } d := &encoding.Decbuf{B: b[:len(b)-tombstonesCRCSize]} @@ -211,7 +211,7 @@ func ReadTombstones(dir string) (Reader, int64, error) { hash := newCRC32() // Ignore first byte which is the format type. if _, err := hash.Write(d.Get()[tombstoneFormatVersionSize:]); err != nil { - return nil, 0, errors.Wrap(err, "write to hash") + return nil, 0, fmt.Errorf("write to hash: %w", err) } if binary.BigEndian.Uint32(b[len(b)-tombstonesCRCSize:]) != hash.Sum32() { return nil, 0, errors.New("checksum did not match") diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go index 155f58641..fa939879c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/dir_locker.go @@ -14,13 +14,13 @@ package tsdbutil import ( + "errors" "fmt" "os" "path/filepath" "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" @@ -83,7 +83,7 @@ func (l *DirLocker) Lock() error { lockf, _, err := fileutil.Flock(l.path) if err != nil { - return errors.Wrap(err, "lock DB directory") + return fmt.Errorf("lock DB directory: %w", err) } l.releaser = lockf return nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go index 0327815c4..0847f81a8 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/tsdbutil/histogram.go @@ -116,7 +116,17 @@ func SetHistogramNotCounterReset(h *histogram.Histogram) *histogram.Histogram { return h } +func SetHistogramCounterReset(h *histogram.Histogram) *histogram.Histogram { + h.CounterResetHint = histogram.CounterReset + return h +} + func SetFloatHistogramNotCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram { h.CounterResetHint = histogram.NotCounterReset return h } + +func SetFloatHistogramCounterReset(h *histogram.FloatHistogram) *histogram.FloatHistogram { + h.CounterResetHint = histogram.CounterReset + return h +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal.go index 3a410fb63..bc7db35bf 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal.go @@ -11,7 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// nolint:revive // Many unsued function arguments in this file by design. package tsdb import ( @@ -526,14 +525,14 @@ func (w *SegmentWAL) openSegmentFile(name string) (*os.File, error) { case err != nil: return nil, errors.Wrapf(err, "validate meta %q", f.Name()) case n != 8: - return nil, errors.Errorf("invalid header size %d in %q", n, f.Name()) + return nil, fmt.Errorf("invalid header size %d in %q", n, f.Name()) } if m := binary.BigEndian.Uint32(metab[:4]); m != WALMagic { - return nil, errors.Errorf("invalid magic header %x in %q", m, f.Name()) + return nil, fmt.Errorf("invalid magic header %x in %q", m, f.Name()) } if metab[4] != WALFormatDefault { - return nil, errors.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name()) + return nil, fmt.Errorf("unknown WAL segment format %d in %q", metab[4], f.Name()) } hasError = false return f, nil @@ -1053,7 +1052,7 @@ func (e walCorruptionErr) Error() string { func (r *walReader) corruptionErr(s string, args ...interface{}) error { return walCorruptionErr{ - err: errors.Errorf(s, args...), + err: fmt.Errorf(s, args...), file: r.cur, lastOffset: r.lastOffset, } @@ -1125,7 +1124,7 @@ func (r *walReader) decodeSeries(flag byte, b []byte, res *[]record.RefSeries) e return dec.Err() } if len(dec.B) > 0 { - return errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return nil } @@ -1157,7 +1156,7 @@ func (r *walReader) decodeSamples(flag byte, b []byte, res *[]record.RefSample) return errors.Wrapf(dec.Err(), "decode error after %d samples", len(*res)) } if len(dec.B) > 0 { - return errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return nil } @@ -1177,7 +1176,7 @@ func (r *walReader) decodeDeletes(flag byte, b []byte, res *[]tombstones.Stone) return dec.Err() } if len(dec.B) > 0 { - return errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + return fmt.Errorf("unexpected %d bytes left in entry", len(dec.B)) } return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go index d64599c27..3d5b56da2 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/checkpoint.go @@ -15,6 +15,7 @@ package wlog import ( + "errors" "fmt" "io" "math" @@ -25,7 +26,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "golang.org/x/exp/slices" "github.com/prometheus/prometheus/tsdb/chunks" @@ -102,8 +102,8 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head { var sgmRange []SegmentRange dir, idx, err := LastCheckpoint(w.Dir()) - if err != nil && err != record.ErrNotFound { - return nil, errors.Wrap(err, "find last checkpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return nil, fmt.Errorf("find last checkpoint: %w", err) } last := idx + 1 if err == nil { @@ -119,7 +119,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head sgmRange = append(sgmRange, SegmentRange{Dir: w.Dir(), First: from, Last: to}) sgmReader, err = NewSegmentsRangeReader(sgmRange...) if err != nil { - return nil, errors.Wrap(err, "create segment reader") + return nil, fmt.Errorf("create segment reader: %w", err) } defer sgmReader.Close() } @@ -128,15 +128,15 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head cpdirtmp := cpdir + ".tmp" if err := os.RemoveAll(cpdirtmp); err != nil { - return nil, errors.Wrap(err, "remove previous temporary checkpoint dir") + return nil, fmt.Errorf("remove previous temporary checkpoint dir: %w", err) } if err := os.MkdirAll(cpdirtmp, 0o777); err != nil { - return nil, errors.Wrap(err, "create checkpoint dir") + return nil, fmt.Errorf("create checkpoint dir: %w", err) } cp, err := New(nil, nil, cpdirtmp, w.CompressionType()) if err != nil { - return nil, errors.Wrap(err, "open checkpoint") + return nil, fmt.Errorf("open checkpoint: %w", err) } // Ensures that an early return caused by an error doesn't leave any tmp files. @@ -174,7 +174,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Series: series, err = dec.Series(rec, series) if err != nil { - return nil, errors.Wrap(err, "decode series") + return nil, fmt.Errorf("decode series: %w", err) } // Drop irrelevant series in place. repl := series[:0] @@ -192,7 +192,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Samples: samples, err = dec.Samples(rec, samples) if err != nil { - return nil, errors.Wrap(err, "decode samples") + return nil, fmt.Errorf("decode samples: %w", err) } // Drop irrelevant samples in place. repl := samples[:0] @@ -210,7 +210,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.HistogramSamples: histogramSamples, err = dec.HistogramSamples(rec, histogramSamples) if err != nil { - return nil, errors.Wrap(err, "decode histogram samples") + return nil, fmt.Errorf("decode histogram samples: %w", err) } // Drop irrelevant histogramSamples in place. repl := histogramSamples[:0] @@ -228,7 +228,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Tombstones: tstones, err = dec.Tombstones(rec, tstones) if err != nil { - return nil, errors.Wrap(err, "decode deletes") + return nil, fmt.Errorf("decode deletes: %w", err) } // Drop irrelevant tombstones in place. repl := tstones[:0] @@ -249,7 +249,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Exemplars: exemplars, err = dec.Exemplars(rec, exemplars) if err != nil { - return nil, errors.Wrap(err, "decode exemplars") + return nil, fmt.Errorf("decode exemplars: %w", err) } // Drop irrelevant exemplars in place. repl := exemplars[:0] @@ -266,7 +266,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head case record.Metadata: metadata, err := dec.Metadata(rec, metadata) if err != nil { - return nil, errors.Wrap(err, "decode metadata") + return nil, fmt.Errorf("decode metadata: %w", err) } // Only keep reference to the latest found metadata for each refID. repl := 0 @@ -292,7 +292,7 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head // Flush records in 1 MB increments. if len(buf) > 1*1024*1024 { if err := cp.Log(recs...); err != nil { - return nil, errors.Wrap(err, "flush records") + return nil, fmt.Errorf("flush records: %w", err) } buf, recs = buf[:0], recs[:0] } @@ -300,12 +300,12 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head // If we hit any corruption during checkpointing, repairing is not an option. // The head won't know which series records are lost. if r.Err() != nil { - return nil, errors.Wrap(r.Err(), "read segments") + return nil, fmt.Errorf("read segments: %w", r.Err()) } // Flush remaining records. if err := cp.Log(recs...); err != nil { - return nil, errors.Wrap(err, "flush records") + return nil, fmt.Errorf("flush records: %w", err) } // Flush latest metadata records for each series. @@ -315,29 +315,29 @@ func Checkpoint(logger log.Logger, w *WL, from, to int, keep func(id chunks.Head latestMetadata = append(latestMetadata, m) } if err := cp.Log(enc.Metadata(latestMetadata, buf[:0])); err != nil { - return nil, errors.Wrap(err, "flush metadata records") + return nil, fmt.Errorf("flush metadata records: %w", err) } } if err := cp.Close(); err != nil { - return nil, errors.Wrap(err, "close checkpoint") + return nil, fmt.Errorf("close checkpoint: %w", err) } // Sync temporary directory before rename. df, err := fileutil.OpenDir(cpdirtmp) if err != nil { - return nil, errors.Wrap(err, "open temporary checkpoint directory") + return nil, fmt.Errorf("open temporary checkpoint directory: %w", err) } if err := df.Sync(); err != nil { df.Close() - return nil, errors.Wrap(err, "sync temporary checkpoint directory") + return nil, fmt.Errorf("sync temporary checkpoint directory: %w", err) } if err = df.Close(); err != nil { - return nil, errors.Wrap(err, "close temporary checkpoint directory") + return nil, fmt.Errorf("close temporary checkpoint directory: %w", err) } if err := fileutil.Replace(cpdirtmp, cpdir); err != nil { - return nil, errors.Wrap(err, "rename checkpoint directory") + return nil, fmt.Errorf("rename checkpoint directory: %w", err) } return stats, nil @@ -364,7 +364,7 @@ func listCheckpoints(dir string) (refs []checkpointRef, err error) { continue } if !fi.IsDir() { - return nil, errors.Errorf("checkpoint %s is not a directory", fi.Name()) + return nil, fmt.Errorf("checkpoint %s is not a directory", fi.Name()) } idx, err := strconv.Atoi(fi.Name()[len(checkpointPrefix):]) if err != nil { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go index c69017051..905bbf00d 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/live_reader.go @@ -16,6 +16,7 @@ package wlog import ( "encoding/binary" + "errors" "fmt" "hash/crc32" "io" @@ -24,7 +25,6 @@ import ( "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" ) @@ -135,7 +135,7 @@ func (r *LiveReader) Next() bool { switch ok, err := r.buildRecord(); { case ok: return true - case err != nil && err != io.EOF: + case err != nil && !errors.Is(err, io.EOF): r.err = err return false } @@ -157,7 +157,7 @@ func (r *LiveReader) Next() bool { if r.writeIndex != pageSize { n, err := r.fillBuffer() - if n == 0 || (err != nil && err != io.EOF) { + if n == 0 || (err != nil && !errors.Is(err, io.EOF)) { r.err = err return false } @@ -174,7 +174,7 @@ func (r *LiveReader) Record() []byte { // Rebuild a full record from potentially partial records. Returns false // if there was an error or if we weren't able to read a record for any reason. // Returns true if we read a full record. Any record data is appended to -// LiveReader.rec +// LiveReader.rec. func (r *LiveReader) buildRecord() (bool, error) { for { // Check that we have data in the internal buffer to read. @@ -265,7 +265,7 @@ func validateRecord(typ recType, i int) error { } return nil default: - return errors.Errorf("unexpected record type %d", typ) + return fmt.Errorf("unexpected record type %d", typ) } } @@ -322,7 +322,7 @@ func (r *LiveReader) readRecord() ([]byte, int, error) { rec := r.buf[r.readIndex+recordHeaderSize : r.readIndex+recordHeaderSize+length] if c := crc32.Checksum(rec, castagnoliTable); c != crc { - return nil, 0, errors.Errorf("unexpected checksum %x, expected %x", c, crc) + return nil, 0, fmt.Errorf("unexpected checksum %x, expected %x", c, crc) } return rec, length + recordHeaderSize, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go index f77b03b8e..a744b0cc4 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/reader.go @@ -16,12 +16,13 @@ package wlog import ( "encoding/binary" + "errors" + "fmt" "hash/crc32" "io" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" ) // Reader reads WAL records from an io.Reader. @@ -47,7 +48,7 @@ func NewReader(r io.Reader) *Reader { // It must not be called again after it returned false. func (r *Reader) Next() bool { err := r.next() - if errors.Is(err, io.EOF) { + if err != nil && errors.Is(err, io.EOF) { // The last WAL segment record shouldn't be torn(should be full or last). // The last record would be torn after a crash just before // the last record part could be persisted to disk. @@ -72,7 +73,7 @@ func (r *Reader) next() (err error) { i := 0 for { if _, err = io.ReadFull(r.rdr, hdr[:1]); err != nil { - return errors.Wrap(err, "read first header byte") + return fmt.Errorf("read first header byte: %w", err) } r.total++ r.curRecTyp = recTypeFromHeader(hdr[0]) @@ -95,7 +96,7 @@ func (r *Reader) next() (err error) { } n, err := io.ReadFull(r.rdr, buf[:k]) if err != nil { - return errors.Wrap(err, "read remaining zeros") + return fmt.Errorf("read remaining zeros: %w", err) } r.total += int64(n) @@ -108,7 +109,7 @@ func (r *Reader) next() (err error) { } n, err := io.ReadFull(r.rdr, hdr[1:]) if err != nil { - return errors.Wrap(err, "read remaining header") + return fmt.Errorf("read remaining header: %w", err) } r.total += int64(n) @@ -118,7 +119,7 @@ func (r *Reader) next() (err error) { ) if length > pageSize-recordHeaderSize { - return errors.Errorf("invalid record size %d", length) + return fmt.Errorf("invalid record size %d", length) } n, err = io.ReadFull(r.rdr, buf[:length]) if err != nil { @@ -127,10 +128,10 @@ func (r *Reader) next() (err error) { r.total += int64(n) if n != int(length) { - return errors.Errorf("invalid size: expected %d, got %d", length, n) + return fmt.Errorf("invalid size: expected %d, got %d", length, n) } if c := crc32.Checksum(buf[:length], castagnoliTable); c != crc { - return errors.Errorf("unexpected checksum %x, expected %x", c, crc) + return fmt.Errorf("unexpected checksum %x, expected %x", c, crc) } if isSnappyCompressed || isZstdCompressed { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go index 3315d1922..1c76e3887 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/watcher.go @@ -14,6 +14,7 @@ package wlog import ( + "errors" "fmt" "io" "math" @@ -25,7 +26,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" @@ -264,7 +264,7 @@ func (w *Watcher) loop() { func (w *Watcher) Run() error { _, lastSegment, err := w.firstAndLast() if err != nil { - return errors.Wrap(err, "wal.Segments") + return fmt.Errorf("wal.Segments: %w", err) } // We want to ensure this is false across iterations since @@ -275,13 +275,13 @@ func (w *Watcher) Run() error { // Backfill from the checkpoint first if it exists. lastCheckpoint, checkpointIndex, err := LastCheckpoint(w.walDir) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "tsdb.LastCheckpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return fmt.Errorf("tsdb.LastCheckpoint: %w", err) } if err == nil { if err = w.readCheckpoint(lastCheckpoint, (*Watcher).readSegment); err != nil { - return errors.Wrap(err, "readCheckpoint") + return fmt.Errorf("readCheckpoint: %w", err) } } w.lastCheckpoint = lastCheckpoint @@ -371,7 +371,7 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s // Ignore all errors reading to end of segment whilst replaying the WAL. if !tail { - if err != nil && errors.Cause(err) != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "segment", segmentNum, "err", err) } else if r.Offset() != size { level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", r.Offset(), "size", size) @@ -380,7 +380,7 @@ func (w *Watcher) readAndHandleError(r *LiveReader, segmentNum int, tail bool, s } // Otherwise, when we are tailing, non-EOFs are fatal. - if errors.Cause(err) != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return err } return nil @@ -403,7 +403,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { var err error size, err = getSegmentSize(w.walDir, segmentNum) if err != nil { - return errors.Wrap(err, "getSegmentSize") + return fmt.Errorf("getSegmentSize: %w", err) } return w.readAndHandleError(reader, segmentNum, tail, size) @@ -447,7 +447,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { case <-segmentTicker.C: _, last, err := w.firstAndLast() if err != nil { - return errors.Wrap(err, "segments") + return fmt.Errorf("segments: %w", err) } // Check if new segments exists. @@ -459,7 +459,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { // Ignore errors reading to end of segment whilst replaying the WAL. if !tail { switch { - case err != nil && errors.Cause(err) != io.EOF: + case err != nil && !errors.Is(err, io.EOF): level.Warn(w.logger).Log("msg", "Ignoring error reading to end of segment, may have dropped data", "err", err) case reader.Offset() != size: level.Warn(w.logger).Log("msg", "Expected to have read whole segment, may have dropped data", "segment", segmentNum, "read", reader.Offset(), "size", size) @@ -468,7 +468,7 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { } // Otherwise, when we are tailing, non-EOFs are fatal. - if errors.Cause(err) != io.EOF { + if err != nil && !errors.Is(err, io.EOF) { return err } @@ -497,8 +497,8 @@ func (w *Watcher) watch(segmentNum int, tail bool) error { func (w *Watcher) garbageCollectSeries(segmentNum int) error { dir, _, err := LastCheckpoint(w.walDir) - if err != nil && err != record.ErrNotFound { - return errors.Wrap(err, "tsdb.LastCheckpoint") + if err != nil && !errors.Is(err, record.ErrNotFound) { + return fmt.Errorf("tsdb.LastCheckpoint: %w", err) } if dir == "" || dir == w.lastCheckpoint { @@ -508,7 +508,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { index, err := checkpointNum(dir) if err != nil { - return errors.Wrap(err, "error parsing checkpoint filename") + return fmt.Errorf("error parsing checkpoint filename: %w", err) } if index >= segmentNum { @@ -519,7 +519,7 @@ func (w *Watcher) garbageCollectSeries(segmentNum int) error { level.Debug(w.logger).Log("msg", "New checkpoint detected", "new", dir, "currentSegment", segmentNum) if err = w.readCheckpoint(dir, (*Watcher).readSegmentForGC); err != nil { - return errors.Wrap(err, "readCheckpoint") + return fmt.Errorf("readCheckpoint: %w", err) } // Clear series with a checkpoint or segment index # lower than the checkpoint we just read. @@ -658,7 +658,10 @@ func (w *Watcher) readSegment(r *LiveReader, segmentNum int, tail bool) error { w.recordDecodeFailsMetric.Inc() } } - return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) + if err := r.Err(); err != nil { + return fmt.Errorf("segment %d: %w", segmentNum, err) + } + return nil } // Go through all series in a segment updating the segmentNum, so we can delete older series. @@ -691,7 +694,10 @@ func (w *Watcher) readSegmentForGC(r *LiveReader, segmentNum int, _ bool) error w.recordDecodeFailsMetric.Inc() } } - return errors.Wrapf(r.Err(), "segment %d: %v", segmentNum, r.Err()) + if err := r.Err(); err != nil { + return fmt.Errorf("segment %d: %w", segmentNum, err) + } + return nil } func (w *Watcher) SetStartTime(t time.Time) { @@ -706,29 +712,29 @@ func (w *Watcher) readCheckpoint(checkpointDir string, readFn segmentReadFn) err level.Debug(w.logger).Log("msg", "Reading checkpoint", "dir", checkpointDir) index, err := checkpointNum(checkpointDir) if err != nil { - return errors.Wrap(err, "checkpointNum") + return fmt.Errorf("checkpointNum: %w", err) } // Ensure we read the whole contents of every segment in the checkpoint dir. segs, err := w.segments(checkpointDir) if err != nil { - return errors.Wrap(err, "Unable to get segments checkpoint dir") + return fmt.Errorf("Unable to get segments checkpoint dir: %w", err) } for _, seg := range segs { size, err := getSegmentSize(checkpointDir, seg) if err != nil { - return errors.Wrap(err, "getSegmentSize") + return fmt.Errorf("getSegmentSize: %w", err) } sr, err := OpenReadSegment(SegmentName(checkpointDir, seg)) if err != nil { - return errors.Wrap(err, "unable to open segment") + return fmt.Errorf("unable to open segment: %w", err) } defer sr.Close() r := NewLiveReader(w.logger, w.readerMetrics, sr) - if err := readFn(w, r, index, false); errors.Cause(err) != io.EOF && err != nil { - return errors.Wrap(err, "readSegment") + if err := readFn(w, r, index, false); err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("readSegment: %w", err) } if r.Offset() != size { @@ -745,12 +751,12 @@ func checkpointNum(dir string) (int, error) { // dir may contain a hidden directory, so only check the base directory chunks := strings.Split(filepath.Base(dir), ".") if len(chunks) != 2 { - return 0, errors.Errorf("invalid checkpoint dir string: %s", dir) + return 0, fmt.Errorf("invalid checkpoint dir string: %s", dir) } result, err := strconv.Atoi(chunks[1]) if err != nil { - return 0, errors.Errorf("invalid checkpoint dir string: %s", dir) + return 0, fmt.Errorf("invalid checkpoint dir string: %s", dir) } return result, nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go index fd65fca07..c3ae001d9 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wlog/wlog.go @@ -17,6 +17,7 @@ package wlog import ( "bufio" "encoding/binary" + "errors" "fmt" "hash/crc32" "io" @@ -30,7 +31,6 @@ import ( "github.com/go-kit/log/level" "github.com/golang/snappy" "github.com/klauspost/compress/zstd" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "golang.org/x/exp/slices" @@ -137,7 +137,7 @@ func OpenWriteSegment(logger log.Logger, dir string, k int) (*Segment, error) { level.Warn(logger).Log("msg", "Last page of the wlog is torn, filling it with zeros", "segment", segName) if _, err := f.Write(make([]byte, pageSize-d)); err != nil { f.Close() - return nil, errors.Wrap(err, "zero-pad torn page") + return nil, fmt.Errorf("zero-pad torn page: %w", err) } } return &Segment{SegmentFile: f, i: k, dir: dir}, nil @@ -298,7 +298,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi return nil, errors.New("invalid segment size") } if err := os.MkdirAll(dir, 0o777); err != nil { - return nil, errors.Wrap(err, "create dir") + return nil, fmt.Errorf("create dir: %w", err) } if logger == nil { logger = log.NewNopLogger() @@ -331,7 +331,7 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi _, last, err := Segments(w.Dir()) if err != nil { - return nil, errors.Wrap(err, "get segment range") + return nil, fmt.Errorf("get segment range: %w", err) } // Index of the Segment we want to open and write to. @@ -414,11 +414,9 @@ func (w *WL) Repair(origErr error) error { // But that's not generally applicable if the records have any kind of causality. // Maybe as an extra mode in the future if mid-WAL corruptions become // a frequent concern. - err := errors.Cause(origErr) // So that we can pick up errors even if wrapped. - - cerr, ok := err.(*CorruptionErr) - if !ok { - return errors.Wrap(origErr, "cannot handle error") + var cerr *CorruptionErr + if !errors.As(origErr, &cerr) { + return fmt.Errorf("cannot handle error: %w", origErr) } if cerr.Segment < 0 { return errors.New("corruption error does not specify position") @@ -429,7 +427,7 @@ func (w *WL) Repair(origErr error) error { // All segments behind the corruption can no longer be used. segs, err := listSegments(w.Dir()) if err != nil { - return errors.Wrap(err, "list segments") + return fmt.Errorf("list segments: %w", err) } level.Warn(w.logger).Log("msg", "Deleting all segments newer than corrupted segment", "segment", cerr.Segment) @@ -440,14 +438,14 @@ func (w *WL) Repair(origErr error) error { // as we set the current segment to repaired file // below. if err := w.segment.Close(); err != nil { - return errors.Wrap(err, "close active segment") + return fmt.Errorf("close active segment: %w", err) } } if s.index <= cerr.Segment { continue } if err := os.Remove(filepath.Join(w.Dir(), s.name)); err != nil { - return errors.Wrapf(err, "delete segment:%v", s.index) + return fmt.Errorf("delete segment:%v: %w", s.index, err) } } // Regardless of the corruption offset, no record reaches into the previous segment. @@ -472,7 +470,7 @@ func (w *WL) Repair(origErr error) error { f, err := os.Open(tmpfn) if err != nil { - return errors.Wrap(err, "open segment") + return fmt.Errorf("open segment: %w", err) } defer f.Close() @@ -484,24 +482,24 @@ func (w *WL) Repair(origErr error) error { break } if err := w.Log(r.Record()); err != nil { - return errors.Wrap(err, "insert record") + return fmt.Errorf("insert record: %w", err) } } // We expect an error here from r.Err(), so nothing to handle. // We need to pad to the end of the last page in the repaired segment if err := w.flushPage(true); err != nil { - return errors.Wrap(err, "flush page in repair") + return fmt.Errorf("flush page in repair: %w", err) } // We explicitly close even when there is a defer for Windows to be // able to delete it. The defer is in place to close it in-case there // are errors above. if err := f.Close(); err != nil { - return errors.Wrap(err, "close corrupted file") + return fmt.Errorf("close corrupted file: %w", err) } if err := os.Remove(tmpfn); err != nil { - return errors.Wrap(err, "delete corrupted segment") + return fmt.Errorf("delete corrupted segment: %w", err) } // Explicitly close the segment we just repaired to avoid issues with Windows. @@ -553,7 +551,7 @@ func (w *WL) nextSegment(async bool) (int, error) { } next, err := CreateSegment(w.Dir(), w.segment.Index()+1) if err != nil { - return 0, errors.Wrap(err, "create new segment file") + return 0, fmt.Errorf("create new segment file: %w", err) } prev := w.segment if err := w.setSegment(next); err != nil { @@ -622,7 +620,8 @@ func (w *WL) flushPage(clear bool) error { } // First Byte of header format: -// [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ] +// +// [3 bits unallocated] [1 bit zstd compression flag] [1 bit snappy compression flag] [3 bit record type ] const ( snappyMask = 1 << 3 zstdMask = 1 << 4 @@ -836,7 +835,7 @@ func (w *WL) fsync(f *Segment) error { // Sync forces a file sync on the current write log segment. This function is meant // to be used only on tests due to different behaviour on Operating Systems -// like windows and linux +// like windows and linux. func (w *WL) Sync() error { return w.fsync(w.segment) } @@ -939,7 +938,7 @@ func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) { for _, sgmRange := range sr { refs, err := listSegments(sgmRange.Dir) if err != nil { - return nil, errors.Wrapf(err, "list segment in dir:%v", sgmRange.Dir) + return nil, fmt.Errorf("list segment in dir:%v: %w", sgmRange.Dir, err) } for _, r := range refs { @@ -951,7 +950,7 @@ func NewSegmentsRangeReader(sr ...SegmentRange) (io.ReadCloser, error) { } s, err := OpenReadSegment(filepath.Join(sgmRange.Dir, r.name)) if err != nil { - return nil, errors.Wrapf(err, "open segment:%v in dir:%v", r.name, sgmRange.Dir) + return nil, fmt.Errorf("open segment:%v in dir:%v: %w", r.name, sgmRange.Dir, err) } segs = append(segs, s) } @@ -971,8 +970,7 @@ type segmentBufReader struct { off int // Offset of read data into current segment. } -// nolint:revive // TODO: Consider exporting segmentBufReader -func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { +func NewSegmentBufReader(segs ...*Segment) io.ReadCloser { if len(segs) == 0 { return &segmentBufReader{} } @@ -983,16 +981,16 @@ func NewSegmentBufReader(segs ...*Segment) *segmentBufReader { } } -// nolint:revive -func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (sbr *segmentBufReader, err error) { +func NewSegmentBufReaderWithOffset(offset int, segs ...*Segment) (io.ReadCloser, error) { if offset == 0 || len(segs) == 0 { return NewSegmentBufReader(segs...), nil } - sbr = &segmentBufReader{ + sbr := &segmentBufReader{ buf: bufio.NewReaderSize(segs[0], 16*pageSize), segs: segs, } + var err error if offset > 0 { _, err = sbr.buf.Discard(offset) } @@ -1018,7 +1016,7 @@ func (r *segmentBufReader) Read(b []byte) (n int, err error) { r.off += n // If we succeeded, or hit a non-EOF, we can stop. - if err == nil || err != io.EOF { + if err == nil || !errors.Is(err, io.EOF) { return n, err } diff --git a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go index 1d339646e..180d40843 100644 --- a/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go +++ b/vendor/github.com/prometheus/prometheus/util/annotations/annotations.go @@ -81,8 +81,8 @@ func (a Annotations) AsStrings(query string, maxAnnos int) []string { if maxAnnos > 0 && len(arr) >= maxAnnos { break } - anErr, ok := err.(annoErr) - if ok { + var anErr annoErr + if errors.As(err, &anErr) { anErr.Query = query err = anErr } @@ -94,7 +94,7 @@ func (a Annotations) AsStrings(query string, maxAnnos int) []string { return arr } -//nolint:revive // Ignore ST1012 +//nolint:revive // error-naming. var ( // Currently there are only 2 types, warnings and info. // For now, info are visually identical with warnings as we have not updated @@ -109,7 +109,7 @@ var ( MixedClassicNativeHistogramsWarning = fmt.Errorf("%w: vector contains a mix of classic and native histograms for metric name", PromQLWarning) PossibleNonCounterInfo = fmt.Errorf("%w: metric might not be a counter, name does not end in _total/_sum/_count/_bucket:", PromQLInfo) - HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (and may give inaccurate results) for metric name", PromQLInfo) + HistogramQuantileForcedMonotonicityInfo = fmt.Errorf("%w: input to histogram_quantile needed to be fixed for monotonicity (see https://prometheus.io/docs/prometheus/latest/querying/functions/#histogram_quantile) for metric name", PromQLInfo) ) type annoErr struct { @@ -119,12 +119,19 @@ type annoErr struct { } func (e annoErr) Error() string { + if e.Query == "" { + return e.Err.Error() + } return fmt.Sprintf("%s (%s)", e.Err, e.PositionRange.StartPosInput(e.Query, 0)) } +func (e annoErr) Unwrap() error { + return e.Err +} + // NewInvalidQuantileWarning is used when the user specifies an invalid quantile // value, i.e. a float that is outside the range [0, 1] or NaN. -func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) annoErr { +func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w, got %g", InvalidQuantileWarning, q), @@ -133,7 +140,7 @@ func NewInvalidQuantileWarning(q float64, pos posrange.PositionRange) annoErr { // NewBadBucketLabelWarning is used when there is an error parsing the bucket label // of a classic histogram. -func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) annoErr { +func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w of %q for metric name %q", BadBucketLabelWarning, label, metricName), @@ -143,7 +150,7 @@ func NewBadBucketLabelWarning(metricName, label string, pos posrange.PositionRan // NewMixedFloatsHistogramsWarning is used when the queried series includes both // float samples and histogram samples for functions that do not support mixed // samples. -func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) annoErr { +func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", MixedFloatsHistogramsWarning, metricName), @@ -152,7 +159,7 @@ func NewMixedFloatsHistogramsWarning(metricName string, pos posrange.PositionRan // NewMixedClassicNativeHistogramsWarning is used when the queried series includes // both classic and native histograms. -func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) annoErr { +func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", MixedClassicNativeHistogramsWarning, metricName), @@ -161,7 +168,7 @@ func NewMixedClassicNativeHistogramsWarning(metricName string, pos posrange.Posi // NewPossibleNonCounterInfo is used when a named counter metric with only float samples does not // have the suffixes _total, _sum, _count, or _bucket. -func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) annoErr { +func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", PossibleNonCounterInfo, metricName), @@ -170,7 +177,7 @@ func NewPossibleNonCounterInfo(metricName string, pos posrange.PositionRange) an // NewHistogramQuantileForcedMonotonicityInfo is used when the input (classic histograms) to // histogram_quantile needs to be forced to be monotonic. -func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) annoErr { +func NewHistogramQuantileForcedMonotonicityInfo(metricName string, pos posrange.PositionRange) error { return annoErr{ PositionRange: pos, Err: fmt.Errorf("%w %q", HistogramQuantileForcedMonotonicityInfo, metricName), diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/context.go b/vendor/github.com/prometheus/prometheus/util/testutil/context.go index c1f4a831c..3f63b030d 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/context.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/context.go @@ -15,18 +15,18 @@ package testutil import "time" -// A MockContext provides a simple stub implementation of a Context +// A MockContext provides a simple stub implementation of a Context. type MockContext struct { Error error DoneCh chan struct{} } -// Deadline always will return not set +// Deadline always will return not set. func (c *MockContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } -// Done returns a read channel for listening to the Done event +// Done returns a read channel for listening to the Done event. func (c *MockContext) Done() <-chan struct{} { return c.DoneCh } @@ -36,7 +36,7 @@ func (c *MockContext) Err() error { return c.Error } -// Value ignores the Value and always returns nil +// Value ignores the Value and always returns nil. func (c *MockContext) Value(interface{}) interface{} { return nil } diff --git a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go index 71ed74ba5..8aa17702d 100644 --- a/vendor/github.com/prometheus/prometheus/util/testutil/directory.go +++ b/vendor/github.com/prometheus/prometheus/util/testutil/directory.go @@ -33,7 +33,7 @@ const ( // NilCloser is a no-op Closer. NilCloser = nilCloser(true) - // The number of times that a TemporaryDirectory will retry its removal + // The number of times that a TemporaryDirectory will retry its removal. temporaryDirectoryRemoveRetries = 2 ) diff --git a/vendor/github.com/rivo/uniseg/README.md b/vendor/github.com/rivo/uniseg/README.md index 25e934687..a8191b815 100644 --- a/vendor/github.com/rivo/uniseg/README.md +++ b/vendor/github.com/rivo/uniseg/README.md @@ -3,7 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/rivo/uniseg.svg)](https://pkg.go.dev/github.com/rivo/uniseg) [![Go Report](https://img.shields.io/badge/go%20report-A%2B-brightgreen.svg)](https://goreportcard.com/report/github.com/rivo/uniseg) -This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](https://unicode.org/reports/tr29/), Unicode Line Breaking according to [Unicode Standard Annex #14](https://unicode.org/reports/tr14/) (Unicode version 14.0.0), and monospace font string width calculation similar to [wcwidth](https://man7.org/linux/man-pages/man3/wcwidth.3.html). +This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](https://unicode.org/reports/tr29/), Unicode Line Breaking according to [Unicode Standard Annex #14](https://unicode.org/reports/tr14/) (Unicode version 15.0.0), and monospace font string width calculation similar to [wcwidth](https://man7.org/linux/man-pages/man3/wcwidth.3.html). ## Background @@ -73,7 +73,7 @@ for gr.Next() { ### Using the [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) Function -This is orders of magnitude faster than the `Graphemes` class, but it requires the handling of states and boundaries: +This avoids allocating a new `Graphemes` object but it requires the handling of states and boundaries: ```go str := "🇩🇪🏳️‍🌈" @@ -88,29 +88,7 @@ for len(str) > 0 { ### Advanced Examples -Breaking into grapheme clusters and evaluating line breaks: - -```go -str := "First line.\nSecond line." -state := -1 -var ( - c string - boundaries int -) -for len(str) > 0 { - c, str, boundaries, state = uniseg.StepString(str, state) - fmt.Print(c) - if boundaries&uniseg.MaskLine == uniseg.LineCanBreak { - fmt.Print("|") - } else if boundaries&uniseg.MaskLine == uniseg.LineMustBreak { - fmt.Print("‖") - } -} -// First |line. -// ‖Second |line.‖ -``` - -If you're only interested in word segmentation, use [`FirstWord`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWord) or [`FirstWordInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWordInString): +The [`Graphemes`](https://pkg.go.dev/github.com/rivo/uniseg#Graphemes) class offers the most convenient way to access all functionality of this package. But in some cases, it may be better to use the specialized functions directly. For example, if you're only interested in word segmentation, use [`FirstWord`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWord) or [`FirstWordInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstWordInString): ```go str := "Hello, world!" @@ -133,6 +111,8 @@ Similarly, use - [`FirstSentence`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentence) or [`FirstSentenceInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstSentenceInString) for sentence segmentation only, and - [`FirstLineSegment`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegment) or [`FirstLineSegmentInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstLineSegmentInString) for line breaking / word wrapping (although using [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step) or [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString) is preferred as it will observe grapheme cluster boundaries). +If you're only interested in the width of characters, use [`FirstGraphemeCluster`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeCluster) or [`FirstGraphemeClusterInString`](https://pkg.go.dev/github.com/rivo/uniseg#FirstGraphemeClusterInString). It is much faster than using [`Step`](https://pkg.go.dev/github.com/rivo/uniseg#Step), [`StepString`](https://pkg.go.dev/github.com/rivo/uniseg#StepString), or the [`Graphemes`](https://pkg.go.dev/github.com/rivo/uniseg#Graphemes) class because it does not include the logic for word / sentence / line boundaries. + Finally, if you need to reverse a string while preserving grapheme clusters, use [`ReverseString`](https://pkg.go.dev/github.com/rivo/uniseg#ReverseString): ```go diff --git a/vendor/github.com/rivo/uniseg/eastasianwidth.go b/vendor/github.com/rivo/uniseg/eastasianwidth.go index 661934ac2..5fc54d991 100644 --- a/vendor/github.com/rivo/uniseg/eastasianwidth.go +++ b/vendor/github.com/rivo/uniseg/eastasianwidth.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // eastAsianWidth are taken from -// https://www.unicode.org/Public/14.0.0/ucd/EastAsianWidth.txt +// https://www.unicode.org/Public/15.0.0/ucd/EastAsianWidth.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var eastAsianWidth = [][3]int{ {0x0000, 0x001F, prN}, // Cc [32] .. @@ -504,6 +504,7 @@ var eastAsianWidth = [][3]int{ {0x0CE2, 0x0CE3, prN}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prN}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prN}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prN}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prN}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prN}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prN}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -565,7 +566,7 @@ var eastAsianWidth = [][3]int{ {0x0EBD, 0x0EBD, prN}, // Lo LAO SEMIVOWEL SIGN NYO {0x0EC0, 0x0EC4, prN}, // Lo [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI {0x0EC6, 0x0EC6, prN}, // Lm LAO KO LA - {0x0EC8, 0x0ECD, prN}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prN}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prN}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0EDC, 0x0EDF, prN}, // Lo [4] LAO HO NO..LAO LETTER KHMU NYO {0x0F00, 0x0F00, prN}, // Lo TIBETAN SYLLABLE OM @@ -1916,6 +1917,7 @@ var eastAsianWidth = [][3]int{ {0x10EAB, 0x10EAC, prN}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EAD, 0x10EAD, prN}, // Pd YEZIDI HYPHENATION MARK {0x10EB0, 0x10EB1, prN}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prN}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prN}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F1D, 0x10F26, prN}, // No [10] OLD SOGDIAN NUMBER ONE..OLD SOGDIAN FRACTION ONE HALF {0x10F27, 0x10F27, prN}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH @@ -1998,6 +2000,8 @@ var eastAsianWidth = [][3]int{ {0x11236, 0x11237, prN}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x11238, 0x1123D, prN}, // Po [6] KHOJKI DANDA..KHOJKI ABBREVIATION SIGN {0x1123E, 0x1123E, prN}, // Mn KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prN}, // Lo [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prN}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prN}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prN}, // Lo MULTANI LETTER GHA {0x1128A, 0x1128D, prN}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -2160,6 +2164,7 @@ var eastAsianWidth = [][3]int{ {0x11A9E, 0x11AA2, prN}, // Po [5] SOYOMBO HEAD MARK WITH MOON AND SUN AND TRIPLE FLAME..SOYOMBO TERMINAL MARK-2 {0x11AB0, 0x11ABF, prN}, // Lo [16] CANADIAN SYLLABICS NATTILIK HI..CANADIAN SYLLABICS SPA {0x11AC0, 0x11AF8, prN}, // Lo [57] PAU CIN HAU LETTER PA..PAU CIN HAU GLOTTAL STOP FINAL + {0x11B00, 0x11B09, prN}, // Po [10] DEVANAGARI HEAD MARK..DEVANAGARI SIGN MINDU {0x11C00, 0x11C08, prN}, // Lo [9] BHAIKSUKI LETTER A..BHAIKSUKI LETTER VOCALIC L {0x11C0A, 0x11C2E, prN}, // Lo [37] BHAIKSUKI LETTER E..BHAIKSUKI LETTER HA {0x11C2F, 0x11C2F, prN}, // Mc BHAIKSUKI VOWEL SIGN AA @@ -2205,6 +2210,19 @@ var eastAsianWidth = [][3]int{ {0x11EF3, 0x11EF4, prN}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prN}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x11EF7, 0x11EF8, prN}, // Po [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION + {0x11F00, 0x11F01, prN}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prN}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prN}, // Mc KAWI SIGN VISARGA + {0x11F04, 0x11F10, prN}, // Lo [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prN}, // Lo [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prN}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prN}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prN}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prN}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prN}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prN}, // Mn KAWI CONJOINER + {0x11F43, 0x11F4F, prN}, // Po [13] KAWI DANDA..KAWI PUNCTUATION CLOSING SPIRAL + {0x11F50, 0x11F59, prN}, // Nd [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prN}, // Lo LISU LETTER YHA {0x11FC0, 0x11FD4, prN}, // No [21] TAMIL FRACTION ONE THREE-HUNDRED-AND-TWENTIETH..TAMIL FRACTION DOWNSCALING FACTOR KIIZH {0x11FD5, 0x11FDC, prN}, // So [8] TAMIL SIGN NEL..TAMIL SIGN MUKKURUNI @@ -2217,8 +2235,11 @@ var eastAsianWidth = [][3]int{ {0x12480, 0x12543, prN}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU {0x12F90, 0x12FF0, prN}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114 {0x12FF1, 0x12FF2, prN}, // Po [2] CYPRO-MINOAN SIGN CM301..CYPRO-MINOAN SIGN CM302 - {0x13000, 0x1342E, prN}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032 - {0x13430, 0x13438, prN}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x13000, 0x1342F, prN}, // Lo [1072] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH V011D + {0x13430, 0x1343F, prN}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prN}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prN}, // Lo [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prN}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x14646, prN}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530 {0x16800, 0x16A38, prN}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ {0x16A40, 0x16A5E, prN}, // Lo [31] MRO LETTER TA..MRO LETTER TEK @@ -2263,7 +2284,9 @@ var eastAsianWidth = [][3]int{ {0x1AFFD, 0x1AFFE, prW}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B0FF, prW}, // Lo [256] KATAKANA LETTER ARCHAIC E..HENTAIGANA LETTER RE-2 {0x1B100, 0x1B122, prW}, // Lo [35] HENTAIGANA LETTER RE-3..KATAKANA LETTER ARCHAIC WU + {0x1B132, 0x1B132, prW}, // Lo HIRAGANA LETTER SMALL KO {0x1B150, 0x1B152, prW}, // Lo [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO + {0x1B155, 0x1B155, prW}, // Lo KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prW}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1B170, 0x1B2FB, prW}, // Lo [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB {0x1BC00, 0x1BC6A, prN}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M @@ -2294,6 +2317,7 @@ var eastAsianWidth = [][3]int{ {0x1D200, 0x1D241, prN}, // So [66] GREEK VOCAL NOTATION SYMBOL-1..GREEK INSTRUMENTAL NOTATION SYMBOL-54 {0x1D242, 0x1D244, prN}, // Mn [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME {0x1D245, 0x1D245, prN}, // So GREEK MUSICAL LEIMMA + {0x1D2C0, 0x1D2D3, prN}, // No [20] KAKTOVIK NUMERAL ZERO..KAKTOVIK NUMERAL NINETEEN {0x1D2E0, 0x1D2F3, prN}, // No [20] MAYAN NUMERAL ZERO..MAYAN NUMERAL NINETEEN {0x1D300, 0x1D356, prN}, // So [87] MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING {0x1D360, 0x1D378, prN}, // No [25] COUNTING ROD UNIT DIGIT ONE..TALLY MARK FIVE @@ -2353,11 +2377,14 @@ var eastAsianWidth = [][3]int{ {0x1DF00, 0x1DF09, prN}, // Ll [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prN}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prN}, // Ll [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prN}, // Ll [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prN}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prN}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prN}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prN}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prN}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prN}, // Lm [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prN}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prN}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prN}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prN}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -2370,6 +2397,10 @@ var eastAsianWidth = [][3]int{ {0x1E2EC, 0x1E2EF, prN}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prN}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE {0x1E2FF, 0x1E2FF, prN}, // Sc WANCHO NGUN SIGN + {0x1E4D0, 0x1E4EA, prN}, // Lo [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prN}, // Lm NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prN}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prN}, // Nd [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prN}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prN}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prN}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -2498,13 +2529,14 @@ var eastAsianWidth = [][3]int{ {0x1F6D0, 0x1F6D2, prW}, // So [3] PLACE OF WORSHIP..SHOPPING TROLLEY {0x1F6D3, 0x1F6D4, prN}, // So [2] STUPA..PAGODA {0x1F6D5, 0x1F6D7, prW}, // So [3] HINDU TEMPLE..ELEVATOR - {0x1F6DD, 0x1F6DF, prW}, // So [3] PLAYGROUND SLIDE..RING BUOY + {0x1F6DC, 0x1F6DF, prW}, // So [4] WIRELESS..RING BUOY {0x1F6E0, 0x1F6EA, prN}, // So [11] HAMMER AND WRENCH..NORTHEAST-POINTING AIRPLANE {0x1F6EB, 0x1F6EC, prW}, // So [2] AIRPLANE DEPARTURE..AIRPLANE ARRIVING {0x1F6F0, 0x1F6F3, prN}, // So [4] SATELLITE..PASSENGER SHIP {0x1F6F4, 0x1F6FC, prW}, // So [9] SCOOTER..ROLLER SKATE - {0x1F700, 0x1F773, prN}, // So [116] ALCHEMICAL SYMBOL FOR QUINTESSENCE..ALCHEMICAL SYMBOL FOR HALF OUNCE - {0x1F780, 0x1F7D8, prN}, // So [89] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..NEGATIVE CIRCLED SQUARE + {0x1F700, 0x1F776, prN}, // So [119] ALCHEMICAL SYMBOL FOR QUINTESSENCE..LUNAR ECLIPSE + {0x1F77B, 0x1F77F, prN}, // So [5] HAUMEA..ORCUS + {0x1F780, 0x1F7D9, prN}, // So [90] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..NINE POINTED WHITE STAR {0x1F7E0, 0x1F7EB, prW}, // So [12] LARGE ORANGE CIRCLE..LARGE BROWN SQUARE {0x1F7F0, 0x1F7F0, prW}, // So HEAVY EQUALS SIGN {0x1F800, 0x1F80B, prN}, // So [12] LEFTWARDS ARROW WITH SMALL TRIANGLE ARROWHEAD..DOWNWARDS ARROW WITH LARGE TRIANGLE ARROWHEAD @@ -2521,22 +2553,20 @@ var eastAsianWidth = [][3]int{ {0x1F947, 0x1F9FF, prW}, // So [185] FIRST PLACE MEDAL..NAZAR AMULET {0x1FA00, 0x1FA53, prN}, // So [84] NEUTRAL CHESS KING..BLACK CHESS KNIGHT-BISHOP {0x1FA60, 0x1FA6D, prN}, // So [14] XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER - {0x1FA70, 0x1FA74, prW}, // So [5] BALLET SHOES..THONG SANDAL - {0x1FA78, 0x1FA7C, prW}, // So [5] DROP OF BLOOD..CRUTCH - {0x1FA80, 0x1FA86, prW}, // So [7] YO-YO..NESTING DOLLS - {0x1FA90, 0x1FAAC, prW}, // So [29] RINGED PLANET..HAMSA - {0x1FAB0, 0x1FABA, prW}, // So [11] FLY..NEST WITH EGGS - {0x1FAC0, 0x1FAC5, prW}, // So [6] ANATOMICAL HEART..PERSON WITH CROWN - {0x1FAD0, 0x1FAD9, prW}, // So [10] BLUEBERRIES..JAR - {0x1FAE0, 0x1FAE7, prW}, // So [8] MELTING FACE..BUBBLES - {0x1FAF0, 0x1FAF6, prW}, // So [7] HAND WITH INDEX FINGER AND THUMB CROSSED..HEART HANDS + {0x1FA70, 0x1FA7C, prW}, // So [13] BALLET SHOES..CRUTCH + {0x1FA80, 0x1FA88, prW}, // So [9] YO-YO..FLUTE + {0x1FA90, 0x1FABD, prW}, // So [46] RINGED PLANET..WING + {0x1FABF, 0x1FAC5, prW}, // So [7] GOOSE..PERSON WITH CROWN + {0x1FACE, 0x1FADB, prW}, // So [14] MOOSE..PEA POD + {0x1FAE0, 0x1FAE8, prW}, // So [9] MELTING FACE..SHAKING FACE + {0x1FAF0, 0x1FAF8, prW}, // So [9] HAND WITH INDEX FINGER AND THUMB CROSSED..RIGHTWARDS PUSHING HAND {0x1FB00, 0x1FB92, prN}, // So [147] BLOCK SEXTANT-1..UPPER HALF INVERSE MEDIUM SHADE AND LOWER HALF BLOCK {0x1FB94, 0x1FBCA, prN}, // So [55] LEFT HALF INVERSE MEDIUM SHADE AND RIGHT HALF BLOCK..WHITE UP-POINTING CHEVRON {0x1FBF0, 0x1FBF9, prN}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x20000, 0x2A6DF, prW}, // Lo [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF {0x2A6E0, 0x2A6FF, prW}, // Cn [32] .. - {0x2A700, 0x2B738, prW}, // Lo [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738 - {0x2B739, 0x2B73F, prW}, // Cn [7] .. + {0x2A700, 0x2B739, prW}, // Lo [4154] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B739 + {0x2B73A, 0x2B73F, prW}, // Cn [6] .. {0x2B740, 0x2B81D, prW}, // Lo [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D {0x2B81E, 0x2B81F, prW}, // Cn [2] .. {0x2B820, 0x2CEA1, prW}, // Lo [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 @@ -2547,7 +2577,9 @@ var eastAsianWidth = [][3]int{ {0x2FA1E, 0x2FA1F, prW}, // Cn [2] .. {0x2FA20, 0x2FFFD, prW}, // Cn [1502] .. {0x30000, 0x3134A, prW}, // Lo [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A - {0x3134B, 0x3FFFD, prW}, // Cn [60595] .. + {0x3134B, 0x3134F, prW}, // Cn [5] .. + {0x31350, 0x323AF, prW}, // Lo [4192] CJK UNIFIED IDEOGRAPH-31350..CJK UNIFIED IDEOGRAPH-323AF + {0x323B0, 0x3FFFD, prW}, // Cn [56398] .. {0xE0001, 0xE0001, prN}, // Cf LANGUAGE TAG {0xE0020, 0xE007F, prN}, // Cf [96] TAG SPACE..CANCEL TAG {0xE0100, 0xE01EF, prA}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 diff --git a/vendor/github.com/rivo/uniseg/emojipresentation.go b/vendor/github.com/rivo/uniseg/emojipresentation.go index fd0f7451a..9b5f499c4 100644 --- a/vendor/github.com/rivo/uniseg/emojipresentation.go +++ b/vendor/github.com/rivo/uniseg/emojipresentation.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // emojiPresentation are taken from // // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var emojiPresentation = [][3]int{ {0x231A, 0x231B, prEmojiPresentation}, // E0.6 [2] (⌚..⌛) watch..hourglass done @@ -211,6 +211,7 @@ var emojiPresentation = [][3]int{ {0x1F6D1, 0x1F6D2, prEmojiPresentation}, // E3.0 [2] (🛑..🛒) stop sign..shopping cart {0x1F6D5, 0x1F6D5, prEmojiPresentation}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prEmojiPresentation}, // E13.0 [2] (🛖..🛗) hut..elevator + {0x1F6DC, 0x1F6DC, prEmojiPresentation}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prEmojiPresentation}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6EB, 0x1F6EC, prEmojiPresentation}, // E1.0 [2] (🛫..🛬) airplane departure..airplane arrival {0x1F6F4, 0x1F6F6, prEmojiPresentation}, // E3.0 [3] (🛴..🛶) kick scooter..canoe @@ -267,19 +268,28 @@ var emojiPresentation = [][3]int{ {0x1F9E7, 0x1F9FF, prEmojiPresentation}, // E11.0 [25] (🧧..🧿) red envelope..nazar amulet {0x1FA70, 0x1FA73, prEmojiPresentation}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prEmojiPresentation}, // E13.0 [1] (🩴) thong sandal + {0x1FA75, 0x1FA77, prEmojiPresentation}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prEmojiPresentation}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prEmojiPresentation}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA80, 0x1FA82, prEmojiPresentation}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prEmojiPresentation}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls + {0x1FA87, 0x1FA88, prEmojiPresentation}, // E15.0 [2] (🪇..🪈) maracas..flute {0x1FA90, 0x1FA95, prEmojiPresentation}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prEmojiPresentation}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prEmojiPresentation}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa + {0x1FAAD, 0x1FAAF, prEmojiPresentation}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prEmojiPresentation}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prEmojiPresentation}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs + {0x1FABB, 0x1FABD, prEmojiPresentation}, // E15.0 [3] (🪻..🪽) hyacinth..wing + {0x1FABF, 0x1FABF, prEmojiPresentation}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prEmojiPresentation}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prEmojiPresentation}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown + {0x1FACE, 0x1FACF, prEmojiPresentation}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prEmojiPresentation}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prEmojiPresentation}, // E14.0 [3] (🫗..🫙) pouring liquid..jar + {0x1FADA, 0x1FADB, prEmojiPresentation}, // E15.0 [2] (🫚..🫛) ginger root..pea pod {0x1FAE0, 0x1FAE7, prEmojiPresentation}, // E14.0 [8] (🫠..🫧) melting face..bubbles + {0x1FAE8, 0x1FAE8, prEmojiPresentation}, // E15.0 [1] (🫨) shaking face {0x1FAF0, 0x1FAF6, prEmojiPresentation}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands + {0x1FAF7, 0x1FAF8, prEmojiPresentation}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand } diff --git a/vendor/github.com/rivo/uniseg/gen_breaktest.go b/vendor/github.com/rivo/uniseg/gen_breaktest.go index e613c4cd0..6bfbeb5e7 100644 --- a/vendor/github.com/rivo/uniseg/gen_breaktest.go +++ b/vendor/github.com/rivo/uniseg/gen_breaktest.go @@ -32,7 +32,7 @@ import ( // We want to test against a specific version rather than the latest. When the // package is upgraded to a new version, change these to generate new tests. const ( - testCaseURL = `https://www.unicode.org/Public/14.0.0/ucd/auxiliary/%s.txt` + testCaseURL = `https://www.unicode.org/Public/15.0.0/ucd/auxiliary/%s.txt` ) func main() { @@ -76,9 +76,9 @@ func parse(url string) ([]byte, error) { buf := new(bytes.Buffer) buf.Grow(120 << 10) - buf.WriteString(`package uniseg + buf.WriteString(`// Code generated via go generate from gen_breaktest.go. DO NOT EDIT. -// Code generated via go generate from gen_breaktest.go. DO NOT EDIT. +package uniseg // ` + os.Args[3] + ` are Grapheme testcases taken from // ` + url + ` @@ -136,7 +136,9 @@ var ( // // E.g. for the input b="÷ 0020 × 0308 ÷ 1F1E6 ÷" // it will append -// "\u0020\u0308\U0001F1E6" +// +// "\u0020\u0308\U0001F1E6" +// // and "[][]rune{{0x0020,0x0308},{0x1F1E6},}" // to orig and exp respectively. // diff --git a/vendor/github.com/rivo/uniseg/gen_properties.go b/vendor/github.com/rivo/uniseg/gen_properties.go index 999d5efdd..8992d2c5f 100644 --- a/vendor/github.com/rivo/uniseg/gen_properties.go +++ b/vendor/github.com/rivo/uniseg/gen_properties.go @@ -41,8 +41,8 @@ import ( // We want to test against a specific version rather than the latest. When the // package is upgraded to a new version, change these to generate new tests. const ( - propertyURL = `https://www.unicode.org/Public/14.0.0/ucd/%s.txt` - emojiURL = `https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt` + propertyURL = `https://www.unicode.org/Public/15.0.0/ucd/%s.txt` + emojiURL = `https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt` ) // The regular expression for a line containing a code point range property. @@ -178,6 +178,11 @@ func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (stri } } + // Avoid overflow during binary search. + if len(properties) >= 1<<31 { + return "", errors.New("too many properties") + } + // Sort properties. sort.Slice(properties, func(i, j int) bool { left, _ := strconv.ParseUint(properties[i][0], 16, 64) @@ -200,9 +205,9 @@ func parse(propertyURL, emojiProperty string, includeGeneralCategory bool) (stri // ` + emojiURL + ` // ("Extended_Pictographic" only)` } - buf.WriteString(`package uniseg + buf.WriteString(`// Code generated via go generate from gen_properties.go. DO NOT EDIT. -// Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg // ` + os.Args[3] + ` are taken from // ` + propertyURL + emojiComment + ` diff --git a/vendor/github.com/rivo/uniseg/grapheme.go b/vendor/github.com/rivo/uniseg/grapheme.go index 0086fc1b2..a0bcc554b 100644 --- a/vendor/github.com/rivo/uniseg/grapheme.go +++ b/vendor/github.com/rivo/uniseg/grapheme.go @@ -222,7 +222,7 @@ func FirstGraphemeCluster(b []byte, state int) (cluster, rest []byte, width, new if len(b) <= length { // If we're already past the end, there is nothing else to parse. var prop int if state < 0 { - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) } else { prop = state >> shiftGraphemePropState } @@ -284,7 +284,7 @@ func FirstGraphemeClusterInString(str string, state int) (cluster, rest string, if len(str) <= length { // If we're already past the end, there is nothing else to parse. var prop int if state < 0 { - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) } else { prop = state >> shiftGraphemePropState } diff --git a/vendor/github.com/rivo/uniseg/graphemeproperties.go b/vendor/github.com/rivo/uniseg/graphemeproperties.go index a87d140bf..0aff4a619 100644 --- a/vendor/github.com/rivo/uniseg/graphemeproperties.go +++ b/vendor/github.com/rivo/uniseg/graphemeproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // graphemeCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/GraphemeBreakProperty.txt +// https://www.unicode.org/Public/15.0.0/ucd/auxiliary/GraphemeBreakProperty.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var graphemeCodePoints = [][3]int{ {0x0000, 0x0009, prControl}, // Cc [10] .. @@ -143,6 +143,7 @@ var graphemeCodePoints = [][3]int{ {0x0CCC, 0x0CCD, prExtend}, // Mn [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA {0x0CD5, 0x0CD6, prExtend}, // Mc [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL + {0x0CF3, 0x0CF3, prSpacingMark}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prSpacingMark}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D3B, 0x0D3C, prExtend}, // Mn [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA @@ -172,7 +173,7 @@ var graphemeCodePoints = [][3]int{ {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN {0x0EB3, 0x0EB3, prSpacingMark}, // Lo LAO VOWEL SIGN AM {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prExtend}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS {0x0F35, 0x0F35, prExtend}, // Mn TIBETAN MARK NGAS BZUNG NYI ZLA {0x0F37, 0x0F37, prExtend}, // Mn TIBETAN MARK NGAS BZUNG SGOR RTAGS @@ -1336,6 +1337,7 @@ var graphemeCodePoints = [][3]int{ {0x10AE5, 0x10AE6, prExtend}, // Mn [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW {0x10D24, 0x10D27, prExtend}, // Mn [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK + {0x10EFD, 0x10EFF, prExtend}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F46, 0x10F50, prExtend}, // Mn [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW {0x10F82, 0x10F85, prExtend}, // Mn [4] OLD UYGHUR COMBINING DOT ABOVE..OLD UYGHUR COMBINING TWO DOTS BELOW {0x11000, 0x11000, prSpacingMark}, // Mc BRAHMI SIGN CANDRABINDU @@ -1375,6 +1377,7 @@ var graphemeCodePoints = [][3]int{ {0x11235, 0x11235, prSpacingMark}, // Mc KHOJKI SIGN VIRAMA {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x11241, 0x11241, prExtend}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x112DF, 0x112DF, prExtend}, // Mn KHUDAWADI SIGN ANUSVARA {0x112E0, 0x112E2, prSpacingMark}, // Mc [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II {0x112E3, 0x112EA, prExtend}, // Mn [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA @@ -1494,7 +1497,18 @@ var graphemeCodePoints = [][3]int{ {0x11D97, 0x11D97, prExtend}, // Mn GUNJALA GONDI VIRAMA {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prSpacingMark}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O - {0x13430, 0x13438, prControl}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x11F00, 0x11F01, prExtend}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prPrepend}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prSpacingMark}, // Mc KAWI SIGN VISARGA + {0x11F34, 0x11F35, prSpacingMark}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prExtend}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prSpacingMark}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prExtend}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prSpacingMark}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prExtend}, // Mn KAWI CONJOINER + {0x13430, 0x1343F, prControl}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prExtend}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13447, 0x13455, prExtend}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x16AF0, 0x16AF4, prExtend}, // Mn [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE {0x16B30, 0x16B36, prExtend}, // Mn [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM {0x16F4F, 0x16F4F, prExtend}, // Mn MIAO SIGN CONSONANT MODIFIER BAR @@ -1527,9 +1541,11 @@ var graphemeCodePoints = [][3]int{ {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E08F, 0x1E08F, prExtend}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E2AE, 0x1E2AE, prExtend}, // Mn TOTO SIGN RISING TONE {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI + {0x1E4EC, 0x1E4EF, prExtend}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH {0x1E8D0, 0x1E8D6, prExtend}, // Mn [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS {0x1E944, 0x1E94A, prExtend}, // Mn [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA {0x1F000, 0x1F003, prExtendedPictographic}, // E0.0 [4] (🀀..🀃) MAHJONG TILE EAST WIND..MAHJONG TILE NORTH WIND @@ -1780,7 +1796,8 @@ var graphemeCodePoints = [][3]int{ {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // E0.0 [2] (🛓..🛔) STUPA..PAGODA {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prExtendedPictographic}, // E13.0 [2] (🛖..🛗) hut..elevator - {0x1F6D8, 0x1F6DC, prExtendedPictographic}, // E0.0 [5] (🛘..🛜) .. + {0x1F6D8, 0x1F6DB, prExtendedPictographic}, // E0.0 [4] (🛘..🛛) .. + {0x1F6DC, 0x1F6DC, prExtendedPictographic}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prExtendedPictographic}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6E0, 0x1F6E5, prExtendedPictographic}, // E0.7 [6] (🛠️..🛥️) hammer and wrench..motor boat {0x1F6E6, 0x1F6E8, prExtendedPictographic}, // E0.0 [3] (🛦..🛨) UP-POINTING MILITARY AIRPLANE..UP-POINTING SMALL AIRPLANE @@ -1797,7 +1814,7 @@ var graphemeCodePoints = [][3]int{ {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // E12.0 [1] (🛺) auto rickshaw {0x1F6FB, 0x1F6FC, prExtendedPictographic}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate {0x1F6FD, 0x1F6FF, prExtendedPictographic}, // E0.0 [3] (🛽..🛿) .. - {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) .. + {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) LOT OF FORTUNE..ORCUS {0x1F7D5, 0x1F7DF, prExtendedPictographic}, // E0.0 [11] (🟕..🟟) CIRCLED TRIANGLE.. {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // E12.0 [12] (🟠..🟫) orange circle..brown square {0x1F7EC, 0x1F7EF, prExtendedPictographic}, // E0.0 [4] (🟬..🟯) .. @@ -1856,30 +1873,37 @@ var graphemeCodePoints = [][3]int{ {0x1FA00, 0x1FA6F, prExtendedPictographic}, // E0.0 [112] (🨀..🩯) NEUTRAL CHESS KING.. {0x1FA70, 0x1FA73, prExtendedPictographic}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prExtendedPictographic}, // E13.0 [1] (🩴) thong sandal - {0x1FA75, 0x1FA77, prExtendedPictographic}, // E0.0 [3] (🩵..🩷) .. + {0x1FA75, 0x1FA77, prExtendedPictographic}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prExtendedPictographic}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prExtendedPictographic}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA7D, 0x1FA7F, prExtendedPictographic}, // E0.0 [3] (🩽..🩿) .. {0x1FA80, 0x1FA82, prExtendedPictographic}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prExtendedPictographic}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls - {0x1FA87, 0x1FA8F, prExtendedPictographic}, // E0.0 [9] (🪇..🪏) .. + {0x1FA87, 0x1FA88, prExtendedPictographic}, // E15.0 [2] (🪇..🪈) maracas..flute + {0x1FA89, 0x1FA8F, prExtendedPictographic}, // E0.0 [7] (🪉..🪏) .. {0x1FA90, 0x1FA95, prExtendedPictographic}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prExtendedPictographic}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prExtendedPictographic}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa - {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E0.0 [3] (🪭..🪯) .. + {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prExtendedPictographic}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prExtendedPictographic}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs - {0x1FABB, 0x1FABF, prExtendedPictographic}, // E0.0 [5] (🪻..🪿) .. + {0x1FABB, 0x1FABD, prExtendedPictographic}, // E15.0 [3] (🪻..🪽) hyacinth..wing + {0x1FABE, 0x1FABE, prExtendedPictographic}, // E0.0 [1] (🪾) + {0x1FABF, 0x1FABF, prExtendedPictographic}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prExtendedPictographic}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prExtendedPictographic}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown - {0x1FAC6, 0x1FACF, prExtendedPictographic}, // E0.0 [10] (🫆..🫏) .. + {0x1FAC6, 0x1FACD, prExtendedPictographic}, // E0.0 [8] (🫆..🫍) .. + {0x1FACE, 0x1FACF, prExtendedPictographic}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prExtendedPictographic}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prExtendedPictographic}, // E14.0 [3] (🫗..🫙) pouring liquid..jar - {0x1FADA, 0x1FADF, prExtendedPictographic}, // E0.0 [6] (🫚..🫟) .. + {0x1FADA, 0x1FADB, prExtendedPictographic}, // E15.0 [2] (🫚..🫛) ginger root..pea pod + {0x1FADC, 0x1FADF, prExtendedPictographic}, // E0.0 [4] (🫜..🫟) .. {0x1FAE0, 0x1FAE7, prExtendedPictographic}, // E14.0 [8] (🫠..🫧) melting face..bubbles - {0x1FAE8, 0x1FAEF, prExtendedPictographic}, // E0.0 [8] (🫨..🫯) .. + {0x1FAE8, 0x1FAE8, prExtendedPictographic}, // E15.0 [1] (🫨) shaking face + {0x1FAE9, 0x1FAEF, prExtendedPictographic}, // E0.0 [7] (🫩..🫯) .. {0x1FAF0, 0x1FAF6, prExtendedPictographic}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands - {0x1FAF7, 0x1FAFF, prExtendedPictographic}, // E0.0 [9] (🫷..🫿) .. + {0x1FAF7, 0x1FAF8, prExtendedPictographic}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand + {0x1FAF9, 0x1FAFF, prExtendedPictographic}, // E0.0 [7] (🫹..🫿) .. {0x1FC00, 0x1FFFD, prExtendedPictographic}, // E0.0[1022] (🰀..🿽) .. {0xE0000, 0xE0000, prControl}, // Cn {0xE0001, 0xE0001, prControl}, // Cf LANGUAGE TAG diff --git a/vendor/github.com/rivo/uniseg/graphemerules.go b/vendor/github.com/rivo/uniseg/graphemerules.go index 9f46b575b..5d399d29c 100644 --- a/vendor/github.com/rivo/uniseg/graphemerules.go +++ b/vendor/github.com/rivo/uniseg/graphemerules.go @@ -21,11 +21,12 @@ const ( grBoundary ) -// The grapheme cluster parser's state transitions. Maps (state, property) to -// (new state, breaking instruction, rule number). The breaking instruction -// always refers to the boundary between the last and next code point. +// grTransitions implements the grapheme cluster parser's state transitions. +// Maps state and property to a new state, a breaking instruction, and rule +// number. The breaking instruction always refers to the boundary between the +// last and next code point. Returns negative values if no transition is found. // -// This map is queried as follows: +// This function is used as follows: // // 1. Find specific state + specific property. Stop if found. // 2. Find specific state + any property. @@ -36,59 +37,96 @@ const ( // are equal. Stop. // 6. Assume grAny and grBoundary. // -// Unicode version 14.0.0. -var grTransitions = map[[2]int][3]int{ +// Unicode version 15.0.0. +func grTransitions(state, prop int) (newState int, newProp int, boundary int) { + // It turns out that using a big switch statement is much faster than using + // a map. + + switch uint64(state) | uint64(prop)<<32 { // GB5 - {grAny, prCR}: {grCR, grBoundary, 50}, - {grAny, prLF}: {grControlLF, grBoundary, 50}, - {grAny, prControl}: {grControlLF, grBoundary, 50}, + case grAny | prCR<<32: + return grCR, grBoundary, 50 + case grAny | prLF<<32: + return grControlLF, grBoundary, 50 + case grAny | prControl<<32: + return grControlLF, grBoundary, 50 // GB4 - {grCR, prAny}: {grAny, grBoundary, 40}, - {grControlLF, prAny}: {grAny, grBoundary, 40}, + case grCR | prAny<<32: + return grAny, grBoundary, 40 + case grControlLF | prAny<<32: + return grAny, grBoundary, 40 - // GB3. - {grCR, prLF}: {grControlLF, grNoBoundary, 30}, + // GB3 + case grCR | prLF<<32: + return grControlLF, grNoBoundary, 30 - // GB6. - {grAny, prL}: {grL, grBoundary, 9990}, - {grL, prL}: {grL, grNoBoundary, 60}, - {grL, prV}: {grLVV, grNoBoundary, 60}, - {grL, prLV}: {grLVV, grNoBoundary, 60}, - {grL, prLVT}: {grLVTT, grNoBoundary, 60}, + // GB6 + case grAny | prL<<32: + return grL, grBoundary, 9990 + case grL | prL<<32: + return grL, grNoBoundary, 60 + case grL | prV<<32: + return grLVV, grNoBoundary, 60 + case grL | prLV<<32: + return grLVV, grNoBoundary, 60 + case grL | prLVT<<32: + return grLVTT, grNoBoundary, 60 - // GB7. - {grAny, prLV}: {grLVV, grBoundary, 9990}, - {grAny, prV}: {grLVV, grBoundary, 9990}, - {grLVV, prV}: {grLVV, grNoBoundary, 70}, - {grLVV, prT}: {grLVTT, grNoBoundary, 70}, + // GB7 + case grAny | prLV<<32: + return grLVV, grBoundary, 9990 + case grAny | prV<<32: + return grLVV, grBoundary, 9990 + case grLVV | prV<<32: + return grLVV, grNoBoundary, 70 + case grLVV | prT<<32: + return grLVTT, grNoBoundary, 70 - // GB8. - {grAny, prLVT}: {grLVTT, grBoundary, 9990}, - {grAny, prT}: {grLVTT, grBoundary, 9990}, - {grLVTT, prT}: {grLVTT, grNoBoundary, 80}, + // GB8 + case grAny | prLVT<<32: + return grLVTT, grBoundary, 9990 + case grAny | prT<<32: + return grLVTT, grBoundary, 9990 + case grLVTT | prT<<32: + return grLVTT, grNoBoundary, 80 - // GB9. - {grAny, prExtend}: {grAny, grNoBoundary, 90}, - {grAny, prZWJ}: {grAny, grNoBoundary, 90}, + // GB9 + case grAny | prExtend<<32: + return grAny, grNoBoundary, 90 + case grAny | prZWJ<<32: + return grAny, grNoBoundary, 90 - // GB9a. - {grAny, prSpacingMark}: {grAny, grNoBoundary, 91}, + // GB9a + case grAny | prSpacingMark<<32: + return grAny, grNoBoundary, 91 - // GB9b. - {grAny, prPrepend}: {grPrepend, grBoundary, 9990}, - {grPrepend, prAny}: {grAny, grNoBoundary, 92}, + // GB9b + case grAny | prPrepend<<32: + return grPrepend, grBoundary, 9990 + case grPrepend | prAny<<32: + return grAny, grNoBoundary, 92 - // GB11. - {grAny, prExtendedPictographic}: {grExtendedPictographic, grBoundary, 9990}, - {grExtendedPictographic, prExtend}: {grExtendedPictographic, grNoBoundary, 110}, - {grExtendedPictographic, prZWJ}: {grExtendedPictographicZWJ, grNoBoundary, 110}, - {grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110}, + // GB11 + case grAny | prExtendedPictographic<<32: + return grExtendedPictographic, grBoundary, 9990 + case grExtendedPictographic | prExtend<<32: + return grExtendedPictographic, grNoBoundary, 110 + case grExtendedPictographic | prZWJ<<32: + return grExtendedPictographicZWJ, grNoBoundary, 110 + case grExtendedPictographicZWJ | prExtendedPictographic<<32: + return grExtendedPictographic, grNoBoundary, 110 - // GB12 / GB13. - {grAny, prRegionalIndicator}: {grRIOdd, grBoundary, 9990}, - {grRIOdd, prRegionalIndicator}: {grRIEven, grNoBoundary, 120}, - {grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120}, + // GB12 / GB13 + case grAny | prRegionalIndicator<<32: + return grRIOdd, grBoundary, 9990 + case grRIOdd | prRegionalIndicator<<32: + return grRIEven, grNoBoundary, 120 + case grRIEven | prRegionalIndicator<<32: + return grRIOdd, grBoundary, 120 + default: + return -1, -1, -1 + } } // transitionGraphemeState determines the new state of the grapheme cluster @@ -97,40 +135,40 @@ var grTransitions = map[[2]int][3]int{ // table) and whether a cluster boundary was detected. func transitionGraphemeState(state int, r rune) (newState, prop int, boundary bool) { // Determine the property of the next character. - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) // Find the applicable transition. - transition, ok := grTransitions[[2]int{state, prop}] - if ok { + nextState, nextProp, _ := grTransitions(state, prop) + if nextState >= 0 { // We have a specific transition. We'll use it. - return transition[0], prop, transition[1] == grBoundary + return nextState, prop, nextProp == grBoundary } // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := grTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := grTransitions[[2]int{grAny, prop}] - if okAnyProp && okAnyState { + anyPropState, anyPropProp, anyPropRule := grTransitions(state, prAny) + anyStateState, anyStateProp, anyStateRule := grTransitions(grAny, prop) + if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState = transAnyState[0] - boundary = transAnyState[1] == grBoundary - if transAnyProp[2] < transAnyState[2] { - boundary = transAnyProp[1] == grBoundary + newState = anyStateState + boundary = anyStateProp == grBoundary + if anyPropRule < anyStateRule { + boundary = anyPropProp == grBoundary } return } - if okAnyProp { + if anyPropState >= 0 { // We only have a specific state. - return transAnyProp[0], prop, transAnyProp[1] == grBoundary + return anyPropState, prop, anyPropProp == grBoundary // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. } - if okAnyState { + if anyStateState >= 0 { // We only have a specific property. - return transAnyState[0], prop, transAnyState[1] == grBoundary + return anyStateState, prop, anyStateProp == grBoundary } // No known transition. GB999: Any ÷ Any. diff --git a/vendor/github.com/rivo/uniseg/line.go b/vendor/github.com/rivo/uniseg/line.go index 87f28503f..7a46318d9 100644 --- a/vendor/github.com/rivo/uniseg/line.go +++ b/vendor/github.com/rivo/uniseg/line.go @@ -80,7 +80,7 @@ func FirstLineSegment(b []byte, state int) (segment, rest []byte, mustBreak bool } } -// FirstLineSegmentInString is like FirstLineSegment() but its input and outputs +// FirstLineSegmentInString is like [FirstLineSegment] but its input and outputs // are strings. func FirstLineSegmentInString(str string, state int) (segment, rest string, mustBreak bool, newState int) { // An empty byte slice returns nothing. @@ -122,13 +122,13 @@ func FirstLineSegmentInString(str string, state int) (segment, rest string, must // [UAX #14]: https://www.unicode.org/reports/tr14/#Algorithm func HasTrailingLineBreak(b []byte) bool { r, _ := utf8.DecodeLastRune(b) - property, _ := propertyWithGenCat(lineBreakCodePoints, r) - return property == lbBK || property == lbCR || property == lbLF || property == lbNL + property, _ := propertyLineBreak(r) + return property == prBK || property == prCR || property == prLF || property == prNL } // HasTrailingLineBreakInString is like [HasTrailingLineBreak] but for a string. func HasTrailingLineBreakInString(str string) bool { r, _ := utf8.DecodeLastRuneInString(str) - property, _ := propertyWithGenCat(lineBreakCodePoints, r) - return property == lbBK || property == lbCR || property == lbLF || property == lbNL + property, _ := propertyLineBreak(r) + return property == prBK || property == prCR || property == prLF || property == prNL } diff --git a/vendor/github.com/rivo/uniseg/lineproperties.go b/vendor/github.com/rivo/uniseg/lineproperties.go index 32169306e..ac7fac4c0 100644 --- a/vendor/github.com/rivo/uniseg/lineproperties.go +++ b/vendor/github.com/rivo/uniseg/lineproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // lineBreakCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/LineBreak.txt +// https://www.unicode.org/Public/15.0.0/ucd/LineBreak.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var lineBreakCodePoints = [][4]int{ {0x0000, 0x0008, prCM, gcCc}, // [9] .. @@ -439,6 +439,7 @@ var lineBreakCodePoints = [][4]int{ {0x0CE2, 0x0CE3, prCM, gcMn}, // [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prNU, gcNd}, // [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prAL, gcLo}, // [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prCM, gcMc}, // KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prCM, gcMn}, // [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prCM, gcMc}, // [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prAL, gcLo}, // [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -500,7 +501,7 @@ var lineBreakCodePoints = [][4]int{ {0x0EBD, 0x0EBD, prSA, gcLo}, // LAO SEMIVOWEL SIGN NYO {0x0EC0, 0x0EC4, prSA, gcLo}, // [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI {0x0EC6, 0x0EC6, prSA, gcLm}, // LAO KO LA - {0x0EC8, 0x0ECD, prSA, gcMn}, // [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prSA, gcMn}, // [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prNU, gcNd}, // [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0EDC, 0x0EDF, prSA, gcLo}, // [4] LAO HO NO..LAO LETTER KHMU NYO {0x0F00, 0x0F00, prAL, gcLo}, // TIBETAN SYLLABLE OM @@ -813,7 +814,11 @@ var lineBreakCodePoints = [][4]int{ {0x1D79, 0x1D7F, prAL, gcLl}, // [7] LATIN SMALL LETTER INSULAR G..LATIN SMALL LETTER UPSILON WITH STROKE {0x1D80, 0x1D9A, prAL, gcLl}, // [27] LATIN SMALL LETTER B WITH PALATAL HOOK..LATIN SMALL LETTER EZH WITH RETROFLEX HOOK {0x1D9B, 0x1DBF, prAL, gcLm}, // [37] MODIFIER LETTER SMALL TURNED ALPHA..MODIFIER LETTER SMALL THETA - {0x1DC0, 0x1DFF, prCM, gcMn}, // [64] COMBINING DOTTED GRAVE ACCENT..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW + {0x1DC0, 0x1DCC, prCM, gcMn}, // [13] COMBINING DOTTED GRAVE ACCENT..COMBINING MACRON-BREVE + {0x1DCD, 0x1DCD, prGL, gcMn}, // COMBINING DOUBLE CIRCUMFLEX ABOVE + {0x1DCE, 0x1DFB, prCM, gcMn}, // [46] COMBINING OGONEK ABOVE..COMBINING DELETION MARK + {0x1DFC, 0x1DFC, prGL, gcMn}, // COMBINING DOUBLE INVERTED BREVE BELOW + {0x1DFD, 0x1DFF, prCM, gcMn}, // [3] COMBINING ALMOST EQUAL TO BELOW..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW {0x1E00, 0x1EFF, prAL, gcLC}, // [256] LATIN CAPITAL LETTER A WITH RING BELOW..LATIN SMALL LETTER Y WITH LOOP {0x1F00, 0x1F15, prAL, gcLC}, // [22] GREEK SMALL LETTER ALPHA WITH PSILI..GREEK SMALL LETTER EPSILON WITH DASIA AND OXIA {0x1F18, 0x1F1D, prAL, gcLu}, // [6] GREEK CAPITAL LETTER EPSILON WITH PSILI..GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA @@ -889,7 +894,7 @@ var lineBreakCodePoints = [][4]int{ {0x2054, 0x2054, prAL, gcPc}, // INVERTED UNDERTIE {0x2055, 0x2055, prAL, gcPo}, // FLOWER PUNCTUATION MARK {0x2056, 0x2056, prBA, gcPo}, // THREE DOT PUNCTUATION - {0x2057, 0x2057, prAL, gcPo}, // QUADRUPLE PRIME + {0x2057, 0x2057, prPO, gcPo}, // QUADRUPLE PRIME {0x2058, 0x205B, prBA, gcPo}, // [4] FOUR DOT PUNCTUATION..FOUR DOT MARK {0x205C, 0x205C, prAL, gcPo}, // DOTTED CROSS {0x205D, 0x205E, prBA, gcPo}, // [2] TRICOLON..VERTICAL FOUR DOTS @@ -2751,6 +2756,7 @@ var lineBreakCodePoints = [][4]int{ {0x10EAB, 0x10EAC, prCM, gcMn}, // [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EAD, 0x10EAD, prBA, gcPd}, // YEZIDI HYPHENATION MARK {0x10EB0, 0x10EB1, prAL, gcLo}, // [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prCM, gcMn}, // [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prAL, gcLo}, // [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F1D, 0x10F26, prAL, gcNo}, // [10] OLD SOGDIAN NUMBER ONE..OLD SOGDIAN FRACTION ONE HALF {0x10F27, 0x10F27, prAL, gcLo}, // OLD SOGDIAN LIGATURE AYIN-DALETH @@ -2840,6 +2846,8 @@ var lineBreakCodePoints = [][4]int{ {0x1123B, 0x1123C, prBA, gcPo}, // [2] KHOJKI SECTION MARK..KHOJKI DOUBLE SECTION MARK {0x1123D, 0x1123D, prAL, gcPo}, // KHOJKI ABBREVIATION SIGN {0x1123E, 0x1123E, prCM, gcMn}, // KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prAL, gcLo}, // [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prCM, gcMn}, // KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prAL, gcLo}, // [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prAL, gcLo}, // MULTANI LETTER GHA {0x1128A, 0x1128D, prAL, gcLo}, // [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -3013,6 +3021,7 @@ var lineBreakCodePoints = [][4]int{ {0x11AA1, 0x11AA2, prBA, gcPo}, // [2] SOYOMBO TERMINAL MARK-1..SOYOMBO TERMINAL MARK-2 {0x11AB0, 0x11ABF, prAL, gcLo}, // [16] CANADIAN SYLLABICS NATTILIK HI..CANADIAN SYLLABICS SPA {0x11AC0, 0x11AF8, prAL, gcLo}, // [57] PAU CIN HAU LETTER PA..PAU CIN HAU GLOTTAL STOP FINAL + {0x11B00, 0x11B09, prBB, gcPo}, // [10] DEVANAGARI HEAD MARK..DEVANAGARI SIGN MINDU {0x11C00, 0x11C08, prAL, gcLo}, // [9] BHAIKSUKI LETTER A..BHAIKSUKI LETTER VOCALIC L {0x11C0A, 0x11C2E, prAL, gcLo}, // [37] BHAIKSUKI LETTER E..BHAIKSUKI LETTER HA {0x11C2F, 0x11C2F, prCM, gcMc}, // BHAIKSUKI VOWEL SIGN AA @@ -3059,6 +3068,20 @@ var lineBreakCodePoints = [][4]int{ {0x11EF3, 0x11EF4, prCM, gcMn}, // [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prCM, gcMc}, // [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x11EF7, 0x11EF8, prAL, gcPo}, // [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION + {0x11F00, 0x11F01, prCM, gcMn}, // [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prAL, gcLo}, // KAWI SIGN REPHA + {0x11F03, 0x11F03, prCM, gcMc}, // KAWI SIGN VISARGA + {0x11F04, 0x11F10, prAL, gcLo}, // [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prAL, gcLo}, // [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prCM, gcMc}, // [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prCM, gcMn}, // [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prCM, gcMc}, // [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prCM, gcMn}, // KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prCM, gcMc}, // KAWI SIGN KILLER + {0x11F42, 0x11F42, prCM, gcMn}, // KAWI CONJOINER + {0x11F43, 0x11F44, prBA, gcPo}, // [2] KAWI DANDA..KAWI DOUBLE DANDA + {0x11F45, 0x11F4F, prID, gcPo}, // [11] KAWI PUNCTUATION SECTION MARKER..KAWI PUNCTUATION CLOSING SPIRAL + {0x11F50, 0x11F59, prNU, gcNd}, // [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prAL, gcLo}, // LISU LETTER YHA {0x11FC0, 0x11FD4, prAL, gcNo}, // [21] TAMIL FRACTION ONE THREE-HUNDRED-AND-TWENTIETH..TAMIL FRACTION DOWNSCALING FACTOR KIIZH {0x11FD5, 0x11FDC, prAL, gcSo}, // [8] TAMIL SIGN NEL..TAMIL SIGN MUKKURUNI @@ -3084,10 +3107,18 @@ var lineBreakCodePoints = [][4]int{ {0x1328A, 0x13378, prAL, gcLo}, // [239] EGYPTIAN HIEROGLYPH O037..EGYPTIAN HIEROGLYPH V011 {0x13379, 0x13379, prOP, gcLo}, // EGYPTIAN HIEROGLYPH V011A {0x1337A, 0x1337B, prCL, gcLo}, // [2] EGYPTIAN HIEROGLYPH V011B..EGYPTIAN HIEROGLYPH V011C - {0x1337C, 0x1342E, prAL, gcLo}, // [179] EGYPTIAN HIEROGLYPH V012..EGYPTIAN HIEROGLYPH AA032 + {0x1337C, 0x1342F, prAL, gcLo}, // [180] EGYPTIAN HIEROGLYPH V012..EGYPTIAN HIEROGLYPH V011D {0x13430, 0x13436, prGL, gcCf}, // [7] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH OVERLAY MIDDLE {0x13437, 0x13437, prOP, gcCf}, // EGYPTIAN HIEROGLYPH BEGIN SEGMENT {0x13438, 0x13438, prCL, gcCf}, // EGYPTIAN HIEROGLYPH END SEGMENT + {0x13439, 0x1343B, prGL, gcCf}, // [3] EGYPTIAN HIEROGLYPH INSERT AT MIDDLE..EGYPTIAN HIEROGLYPH INSERT AT BOTTOM + {0x1343C, 0x1343C, prOP, gcCf}, // EGYPTIAN HIEROGLYPH BEGIN ENCLOSURE + {0x1343D, 0x1343D, prCL, gcCf}, // EGYPTIAN HIEROGLYPH END ENCLOSURE + {0x1343E, 0x1343E, prOP, gcCf}, // EGYPTIAN HIEROGLYPH BEGIN WALLED ENCLOSURE + {0x1343F, 0x1343F, prCL, gcCf}, // EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prCM, gcMn}, // EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prAL, gcLo}, // [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prCM, gcMn}, // [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x145CD, prAL, gcLo}, // [462] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A409 {0x145CE, 0x145CE, prOP, gcLo}, // ANATOLIAN HIEROGLYPH A410 BEGIN LOGOGRAM MARK {0x145CF, 0x145CF, prCL, gcLo}, // ANATOLIAN HIEROGLYPH A410A END LOGOGRAM MARK @@ -3137,7 +3168,9 @@ var lineBreakCodePoints = [][4]int{ {0x1AFFD, 0x1AFFE, prAL, gcLm}, // [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B0FF, prID, gcLo}, // [256] KATAKANA LETTER ARCHAIC E..HENTAIGANA LETTER RE-2 {0x1B100, 0x1B122, prID, gcLo}, // [35] HENTAIGANA LETTER RE-3..KATAKANA LETTER ARCHAIC WU + {0x1B132, 0x1B132, prCJ, gcLo}, // HIRAGANA LETTER SMALL KO {0x1B150, 0x1B152, prCJ, gcLo}, // [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO + {0x1B155, 0x1B155, prCJ, gcLo}, // KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prCJ, gcLo}, // [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1B170, 0x1B2FB, prID, gcLo}, // [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB {0x1BC00, 0x1BC6A, prAL, gcLo}, // [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M @@ -3168,6 +3201,7 @@ var lineBreakCodePoints = [][4]int{ {0x1D200, 0x1D241, prAL, gcSo}, // [66] GREEK VOCAL NOTATION SYMBOL-1..GREEK INSTRUMENTAL NOTATION SYMBOL-54 {0x1D242, 0x1D244, prCM, gcMn}, // [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME {0x1D245, 0x1D245, prAL, gcSo}, // GREEK MUSICAL LEIMMA + {0x1D2C0, 0x1D2D3, prAL, gcNo}, // [20] KAKTOVIK NUMERAL ZERO..KAKTOVIK NUMERAL NINETEEN {0x1D2E0, 0x1D2F3, prAL, gcNo}, // [20] MAYAN NUMERAL ZERO..MAYAN NUMERAL NINETEEN {0x1D300, 0x1D356, prAL, gcSo}, // [87] MONOGRAM FOR EARTH..TETRAGRAM FOR FOSTERING {0x1D360, 0x1D378, prAL, gcNo}, // [25] COUNTING ROD UNIT DIGIT ONE..TALLY MARK FIVE @@ -3228,11 +3262,14 @@ var lineBreakCodePoints = [][4]int{ {0x1DF00, 0x1DF09, prAL, gcLl}, // [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prAL, gcLo}, // LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prAL, gcLl}, // [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prAL, gcLl}, // [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prCM, gcMn}, // [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prCM, gcMn}, // [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prCM, gcMn}, // [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prCM, gcMn}, // [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prCM, gcMn}, // [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prAL, gcLm}, // [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prCM, gcMn}, // COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prAL, gcLo}, // [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prCM, gcMn}, // [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prAL, gcLm}, // [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -3245,6 +3282,10 @@ var lineBreakCodePoints = [][4]int{ {0x1E2EC, 0x1E2EF, prCM, gcMn}, // [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prNU, gcNd}, // [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE {0x1E2FF, 0x1E2FF, prPR, gcSc}, // WANCHO NGUN SIGN + {0x1E4D0, 0x1E4EA, prAL, gcLo}, // [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prAL, gcLm}, // NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prCM, gcMn}, // [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prNU, gcNd}, // [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prAL, gcLo}, // [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prAL, gcLo}, // [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prAL, gcLo}, // [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -3412,16 +3453,18 @@ var lineBreakCodePoints = [][4]int{ {0x1F6C1, 0x1F6CB, prID, gcSo}, // [11] BATHTUB..COUCH AND LAMP {0x1F6CC, 0x1F6CC, prEB, gcSo}, // SLEEPING ACCOMMODATION {0x1F6CD, 0x1F6D7, prID, gcSo}, // [11] SHOPPING BAGS..ELEVATOR - {0x1F6D8, 0x1F6DC, prID, gcCn}, // [5] .. - {0x1F6DD, 0x1F6EC, prID, gcSo}, // [16] PLAYGROUND SLIDE..AIRPLANE ARRIVING + {0x1F6D8, 0x1F6DB, prID, gcCn}, // [4] .. + {0x1F6DC, 0x1F6EC, prID, gcSo}, // [17] WIRELESS..AIRPLANE ARRIVING {0x1F6ED, 0x1F6EF, prID, gcCn}, // [3] .. {0x1F6F0, 0x1F6FC, prID, gcSo}, // [13] SATELLITE..ROLLER SKATE {0x1F6FD, 0x1F6FF, prID, gcCn}, // [3] .. {0x1F700, 0x1F773, prAL, gcSo}, // [116] ALCHEMICAL SYMBOL FOR QUINTESSENCE..ALCHEMICAL SYMBOL FOR HALF OUNCE - {0x1F774, 0x1F77F, prID, gcCn}, // [12] .. + {0x1F774, 0x1F776, prID, gcSo}, // [3] LOT OF FORTUNE..LUNAR ECLIPSE + {0x1F777, 0x1F77A, prID, gcCn}, // [4] .. + {0x1F77B, 0x1F77F, prID, gcSo}, // [5] HAUMEA..ORCUS {0x1F780, 0x1F7D4, prAL, gcSo}, // [85] BLACK LEFT-POINTING ISOSCELES RIGHT TRIANGLE..HEAVY TWELVE POINTED PINWHEEL STAR - {0x1F7D5, 0x1F7D8, prID, gcSo}, // [4] CIRCLED TRIANGLE..NEGATIVE CIRCLED SQUARE - {0x1F7D9, 0x1F7DF, prID, gcCn}, // [7] .. + {0x1F7D5, 0x1F7D9, prID, gcSo}, // [5] CIRCLED TRIANGLE..NINE POINTED WHITE STAR + {0x1F7DA, 0x1F7DF, prID, gcCn}, // [6] .. {0x1F7E0, 0x1F7EB, prID, gcSo}, // [12] LARGE ORANGE CIRCLE..LARGE BROWN SQUARE {0x1F7EC, 0x1F7EF, prID, gcCn}, // [4] .. {0x1F7F0, 0x1F7F0, prID, gcSo}, // HEAVY EQUALS SIGN @@ -3467,33 +3510,29 @@ var lineBreakCodePoints = [][4]int{ {0x1FA54, 0x1FA5F, prID, gcCn}, // [12] .. {0x1FA60, 0x1FA6D, prID, gcSo}, // [14] XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER {0x1FA6E, 0x1FA6F, prID, gcCn}, // [2] .. - {0x1FA70, 0x1FA74, prID, gcSo}, // [5] BALLET SHOES..THONG SANDAL - {0x1FA75, 0x1FA77, prID, gcCn}, // [3] .. - {0x1FA78, 0x1FA7C, prID, gcSo}, // [5] DROP OF BLOOD..CRUTCH + {0x1FA70, 0x1FA7C, prID, gcSo}, // [13] BALLET SHOES..CRUTCH {0x1FA7D, 0x1FA7F, prID, gcCn}, // [3] .. - {0x1FA80, 0x1FA86, prID, gcSo}, // [7] YO-YO..NESTING DOLLS - {0x1FA87, 0x1FA8F, prID, gcCn}, // [9] .. - {0x1FA90, 0x1FAAC, prID, gcSo}, // [29] RINGED PLANET..HAMSA - {0x1FAAD, 0x1FAAF, prID, gcCn}, // [3] .. - {0x1FAB0, 0x1FABA, prID, gcSo}, // [11] FLY..NEST WITH EGGS - {0x1FABB, 0x1FABF, prID, gcCn}, // [5] .. - {0x1FAC0, 0x1FAC2, prID, gcSo}, // [3] ANATOMICAL HEART..PEOPLE HUGGING + {0x1FA80, 0x1FA88, prID, gcSo}, // [9] YO-YO..FLUTE + {0x1FA89, 0x1FA8F, prID, gcCn}, // [7] .. + {0x1FA90, 0x1FABD, prID, gcSo}, // [46] RINGED PLANET..WING + {0x1FABE, 0x1FABE, prID, gcCn}, // + {0x1FABF, 0x1FAC2, prID, gcSo}, // [4] GOOSE..PEOPLE HUGGING {0x1FAC3, 0x1FAC5, prEB, gcSo}, // [3] PREGNANT MAN..PERSON WITH CROWN - {0x1FAC6, 0x1FACF, prID, gcCn}, // [10] .. - {0x1FAD0, 0x1FAD9, prID, gcSo}, // [10] BLUEBERRIES..JAR - {0x1FADA, 0x1FADF, prID, gcCn}, // [6] .. - {0x1FAE0, 0x1FAE7, prID, gcSo}, // [8] MELTING FACE..BUBBLES - {0x1FAE8, 0x1FAEF, prID, gcCn}, // [8] .. - {0x1FAF0, 0x1FAF6, prEB, gcSo}, // [7] HAND WITH INDEX FINGER AND THUMB CROSSED..HEART HANDS - {0x1FAF7, 0x1FAFF, prID, gcCn}, // [9] .. + {0x1FAC6, 0x1FACD, prID, gcCn}, // [8] .. + {0x1FACE, 0x1FADB, prID, gcSo}, // [14] MOOSE..PEA POD + {0x1FADC, 0x1FADF, prID, gcCn}, // [4] .. + {0x1FAE0, 0x1FAE8, prID, gcSo}, // [9] MELTING FACE..SHAKING FACE + {0x1FAE9, 0x1FAEF, prID, gcCn}, // [7] .. + {0x1FAF0, 0x1FAF8, prEB, gcSo}, // [9] HAND WITH INDEX FINGER AND THUMB CROSSED..RIGHTWARDS PUSHING HAND + {0x1FAF9, 0x1FAFF, prID, gcCn}, // [7] .. {0x1FB00, 0x1FB92, prAL, gcSo}, // [147] BLOCK SEXTANT-1..UPPER HALF INVERSE MEDIUM SHADE AND LOWER HALF BLOCK {0x1FB94, 0x1FBCA, prAL, gcSo}, // [55] LEFT HALF INVERSE MEDIUM SHADE AND RIGHT HALF BLOCK..WHITE UP-POINTING CHEVRON {0x1FBF0, 0x1FBF9, prNU, gcNd}, // [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x1FC00, 0x1FFFD, prID, gcCn}, // [1022] .. {0x20000, 0x2A6DF, prID, gcLo}, // [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF {0x2A6E0, 0x2A6FF, prID, gcCn}, // [32] .. - {0x2A700, 0x2B738, prID, gcLo}, // [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738 - {0x2B739, 0x2B73F, prID, gcCn}, // [7] .. + {0x2A700, 0x2B739, prID, gcLo}, // [4154] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B739 + {0x2B73A, 0x2B73F, prID, gcCn}, // [6] .. {0x2B740, 0x2B81D, prID, gcLo}, // [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D {0x2B81E, 0x2B81F, prID, gcCn}, // [2] .. {0x2B820, 0x2CEA1, prID, gcLo}, // [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 @@ -3504,7 +3543,9 @@ var lineBreakCodePoints = [][4]int{ {0x2FA1E, 0x2FA1F, prID, gcCn}, // [2] .. {0x2FA20, 0x2FFFD, prID, gcCn}, // [1502] .. {0x30000, 0x3134A, prID, gcLo}, // [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A - {0x3134B, 0x3FFFD, prID, gcCn}, // [60595] .. + {0x3134B, 0x3134F, prID, gcCn}, // [5] .. + {0x31350, 0x323AF, prID, gcLo}, // [4192] CJK UNIFIED IDEOGRAPH-31350..CJK UNIFIED IDEOGRAPH-323AF + {0x323B0, 0x3FFFD, prID, gcCn}, // [56398] .. {0xE0001, 0xE0001, prCM, gcCf}, // LANGUAGE TAG {0xE0020, 0xE007F, prCM, gcCf}, // [96] TAG SPACE..CANCEL TAG {0xE0100, 0xE01EF, prCM, gcMn}, // [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 diff --git a/vendor/github.com/rivo/uniseg/linerules.go b/vendor/github.com/rivo/uniseg/linerules.go index d2ad51680..7708ae0fb 100644 --- a/vendor/github.com/rivo/uniseg/linerules.go +++ b/vendor/github.com/rivo/uniseg/linerules.go @@ -64,222 +64,381 @@ const ( LineMustBreak // You must break the line here. ) -// The line break parser's state transitions. It's anologous to grTransitions, -// see comments there for details. Unicode version 14.0.0. -var lbTransitions = map[[2]int][3]int{ +// lbTransitions implements the line break parser's state transitions. It's +// anologous to [grTransitions], see comments there for details. +// +// Unicode version 15.0.0. +func lbTransitions(state, prop int) (newState, lineBreak, rule int) { + switch uint64(state) | uint64(prop)<<32 { // LB4. - {lbAny, prBK}: {lbBK, LineCanBreak, 310}, - {lbBK, prAny}: {lbAny, LineMustBreak, 40}, + case lbBK | prAny<<32: + return lbAny, LineMustBreak, 40 // LB5. - {lbAny, prCR}: {lbCR, LineCanBreak, 310}, - {lbAny, prLF}: {lbLF, LineCanBreak, 310}, - {lbAny, prNL}: {lbNL, LineCanBreak, 310}, - {lbCR, prLF}: {lbLF, LineDontBreak, 50}, - {lbCR, prAny}: {lbAny, LineMustBreak, 50}, - {lbLF, prAny}: {lbAny, LineMustBreak, 50}, - {lbNL, prAny}: {lbAny, LineMustBreak, 50}, + case lbCR | prLF<<32: + return lbLF, LineDontBreak, 50 + case lbCR | prAny<<32: + return lbAny, LineMustBreak, 50 + case lbLF | prAny<<32: + return lbAny, LineMustBreak, 50 + case lbNL | prAny<<32: + return lbAny, LineMustBreak, 50 // LB6. - {lbAny, prBK}: {lbBK, LineDontBreak, 60}, - {lbAny, prCR}: {lbCR, LineDontBreak, 60}, - {lbAny, prLF}: {lbLF, LineDontBreak, 60}, - {lbAny, prNL}: {lbNL, LineDontBreak, 60}, + case lbAny | prBK<<32: + return lbBK, LineDontBreak, 60 + case lbAny | prCR<<32: + return lbCR, LineDontBreak, 60 + case lbAny | prLF<<32: + return lbLF, LineDontBreak, 60 + case lbAny | prNL<<32: + return lbNL, LineDontBreak, 60 // LB7. - {lbAny, prSP}: {lbSP, LineDontBreak, 70}, - {lbAny, prZW}: {lbZW, LineDontBreak, 70}, + case lbAny | prSP<<32: + return lbSP, LineDontBreak, 70 + case lbAny | prZW<<32: + return lbZW, LineDontBreak, 70 // LB8. - {lbZW, prSP}: {lbZW, LineDontBreak, 70}, - {lbZW, prAny}: {lbAny, LineCanBreak, 80}, + case lbZW | prSP<<32: + return lbZW, LineDontBreak, 70 + case lbZW | prAny<<32: + return lbAny, LineCanBreak, 80 // LB11. - {lbAny, prWJ}: {lbWJ, LineDontBreak, 110}, - {lbWJ, prAny}: {lbAny, LineDontBreak, 110}, + case lbAny | prWJ<<32: + return lbWJ, LineDontBreak, 110 + case lbWJ | prAny<<32: + return lbAny, LineDontBreak, 110 // LB12. - {lbAny, prGL}: {lbGL, LineCanBreak, 310}, - {lbGL, prAny}: {lbAny, LineDontBreak, 120}, + case lbAny | prGL<<32: + return lbGL, LineCanBreak, 310 + case lbGL | prAny<<32: + return lbAny, LineDontBreak, 120 // LB13 (simple transitions). - {lbAny, prCL}: {lbCL, LineCanBreak, 310}, - {lbAny, prCP}: {lbCP, LineCanBreak, 310}, - {lbAny, prEX}: {lbEX, LineDontBreak, 130}, - {lbAny, prIS}: {lbIS, LineCanBreak, 310}, - {lbAny, prSY}: {lbSY, LineCanBreak, 310}, + case lbAny | prCL<<32: + return lbCL, LineCanBreak, 310 + case lbAny | prCP<<32: + return lbCP, LineCanBreak, 310 + case lbAny | prEX<<32: + return lbEX, LineDontBreak, 130 + case lbAny | prIS<<32: + return lbIS, LineCanBreak, 310 + case lbAny | prSY<<32: + return lbSY, LineCanBreak, 310 // LB14. - {lbAny, prOP}: {lbOP, LineCanBreak, 310}, - {lbOP, prSP}: {lbOP, LineDontBreak, 70}, - {lbOP, prAny}: {lbAny, LineDontBreak, 140}, + case lbAny | prOP<<32: + return lbOP, LineCanBreak, 310 + case lbOP | prSP<<32: + return lbOP, LineDontBreak, 70 + case lbOP | prAny<<32: + return lbAny, LineDontBreak, 140 // LB15. - {lbQU, prSP}: {lbQUSP, LineDontBreak, 70}, - {lbQU, prOP}: {lbOP, LineDontBreak, 150}, - {lbQUSP, prOP}: {lbOP, LineDontBreak, 150}, + case lbQU | prSP<<32: + return lbQUSP, LineDontBreak, 70 + case lbQU | prOP<<32: + return lbOP, LineDontBreak, 150 + case lbQUSP | prOP<<32: + return lbOP, LineDontBreak, 150 // LB16. - {lbCL, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbNUCL, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbCP, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbNUCP, prSP}: {lbCLCPSP, LineDontBreak, 70}, - {lbCL, prNS}: {lbNS, LineDontBreak, 160}, - {lbNUCL, prNS}: {lbNS, LineDontBreak, 160}, - {lbCP, prNS}: {lbNS, LineDontBreak, 160}, - {lbNUCP, prNS}: {lbNS, LineDontBreak, 160}, - {lbCLCPSP, prNS}: {lbNS, LineDontBreak, 160}, + case lbCL | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbNUCL | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbCP | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbNUCP | prSP<<32: + return lbCLCPSP, LineDontBreak, 70 + case lbCL | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbNUCL | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbCP | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbNUCP | prNS<<32: + return lbNS, LineDontBreak, 160 + case lbCLCPSP | prNS<<32: + return lbNS, LineDontBreak, 160 // LB17. - {lbAny, prB2}: {lbB2, LineCanBreak, 310}, - {lbB2, prSP}: {lbB2SP, LineDontBreak, 70}, - {lbB2, prB2}: {lbB2, LineDontBreak, 170}, - {lbB2SP, prB2}: {lbB2, LineDontBreak, 170}, + case lbAny | prB2<<32: + return lbB2, LineCanBreak, 310 + case lbB2 | prSP<<32: + return lbB2SP, LineDontBreak, 70 + case lbB2 | prB2<<32: + return lbB2, LineDontBreak, 170 + case lbB2SP | prB2<<32: + return lbB2, LineDontBreak, 170 // LB18. - {lbSP, prAny}: {lbAny, LineCanBreak, 180}, - {lbQUSP, prAny}: {lbAny, LineCanBreak, 180}, - {lbCLCPSP, prAny}: {lbAny, LineCanBreak, 180}, - {lbB2SP, prAny}: {lbAny, LineCanBreak, 180}, + case lbSP | prAny<<32: + return lbAny, LineCanBreak, 180 + case lbQUSP | prAny<<32: + return lbAny, LineCanBreak, 180 + case lbCLCPSP | prAny<<32: + return lbAny, LineCanBreak, 180 + case lbB2SP | prAny<<32: + return lbAny, LineCanBreak, 180 // LB19. - {lbAny, prQU}: {lbQU, LineDontBreak, 190}, - {lbQU, prAny}: {lbAny, LineDontBreak, 190}, + case lbAny | prQU<<32: + return lbQU, LineDontBreak, 190 + case lbQU | prAny<<32: + return lbAny, LineDontBreak, 190 // LB20. - {lbAny, prCB}: {lbCB, LineCanBreak, 200}, - {lbCB, prAny}: {lbAny, LineCanBreak, 200}, + case lbAny | prCB<<32: + return lbCB, LineCanBreak, 200 + case lbCB | prAny<<32: + return lbAny, LineCanBreak, 200 // LB21. - {lbAny, prBA}: {lbBA, LineDontBreak, 210}, - {lbAny, prHY}: {lbHY, LineDontBreak, 210}, - {lbAny, prNS}: {lbNS, LineDontBreak, 210}, - {lbAny, prBB}: {lbBB, LineCanBreak, 310}, - {lbBB, prAny}: {lbAny, LineDontBreak, 210}, + case lbAny | prBA<<32: + return lbBA, LineDontBreak, 210 + case lbAny | prHY<<32: + return lbHY, LineDontBreak, 210 + case lbAny | prNS<<32: + return lbNS, LineDontBreak, 210 + case lbAny | prBB<<32: + return lbBB, LineCanBreak, 310 + case lbBB | prAny<<32: + return lbAny, LineDontBreak, 210 // LB21a. - {lbAny, prHL}: {lbHL, LineCanBreak, 310}, - {lbHL, prHY}: {lbLB21a, LineDontBreak, 210}, - {lbHL, prBA}: {lbLB21a, LineDontBreak, 210}, - {lbLB21a, prAny}: {lbAny, LineDontBreak, 211}, + case lbAny | prHL<<32: + return lbHL, LineCanBreak, 310 + case lbHL | prHY<<32: + return lbLB21a, LineDontBreak, 210 + case lbHL | prBA<<32: + return lbLB21a, LineDontBreak, 210 + case lbLB21a | prAny<<32: + return lbAny, LineDontBreak, 211 // LB21b. - {lbSY, prHL}: {lbHL, LineDontBreak, 212}, - {lbNUSY, prHL}: {lbHL, LineDontBreak, 212}, + case lbSY | prHL<<32: + return lbHL, LineDontBreak, 212 + case lbNUSY | prHL<<32: + return lbHL, LineDontBreak, 212 // LB22. - {lbAny, prIN}: {lbAny, LineDontBreak, 220}, + case lbAny | prIN<<32: + return lbAny, LineDontBreak, 220 // LB23. - {lbAny, prAL}: {lbAL, LineCanBreak, 310}, - {lbAny, prNU}: {lbNU, LineCanBreak, 310}, - {lbAL, prNU}: {lbNU, LineDontBreak, 230}, - {lbHL, prNU}: {lbNU, LineDontBreak, 230}, - {lbNU, prAL}: {lbAL, LineDontBreak, 230}, - {lbNU, prHL}: {lbHL, LineDontBreak, 230}, - {lbNUNU, prAL}: {lbAL, LineDontBreak, 230}, - {lbNUNU, prHL}: {lbHL, LineDontBreak, 230}, + case lbAny | prAL<<32: + return lbAL, LineCanBreak, 310 + case lbAny | prNU<<32: + return lbNU, LineCanBreak, 310 + case lbAL | prNU<<32: + return lbNU, LineDontBreak, 230 + case lbHL | prNU<<32: + return lbNU, LineDontBreak, 230 + case lbNU | prAL<<32: + return lbAL, LineDontBreak, 230 + case lbNU | prHL<<32: + return lbHL, LineDontBreak, 230 + case lbNUNU | prAL<<32: + return lbAL, LineDontBreak, 230 + case lbNUNU | prHL<<32: + return lbHL, LineDontBreak, 230 // LB23a. - {lbAny, prPR}: {lbPR, LineCanBreak, 310}, - {lbAny, prID}: {lbIDEM, LineCanBreak, 310}, - {lbAny, prEB}: {lbEB, LineCanBreak, 310}, - {lbAny, prEM}: {lbIDEM, LineCanBreak, 310}, - {lbPR, prID}: {lbIDEM, LineDontBreak, 231}, - {lbPR, prEB}: {lbEB, LineDontBreak, 231}, - {lbPR, prEM}: {lbIDEM, LineDontBreak, 231}, - {lbIDEM, prPO}: {lbPO, LineDontBreak, 231}, - {lbEB, prPO}: {lbPO, LineDontBreak, 231}, + case lbAny | prPR<<32: + return lbPR, LineCanBreak, 310 + case lbAny | prID<<32: + return lbIDEM, LineCanBreak, 310 + case lbAny | prEB<<32: + return lbEB, LineCanBreak, 310 + case lbAny | prEM<<32: + return lbIDEM, LineCanBreak, 310 + case lbPR | prID<<32: + return lbIDEM, LineDontBreak, 231 + case lbPR | prEB<<32: + return lbEB, LineDontBreak, 231 + case lbPR | prEM<<32: + return lbIDEM, LineDontBreak, 231 + case lbIDEM | prPO<<32: + return lbPO, LineDontBreak, 231 + case lbEB | prPO<<32: + return lbPO, LineDontBreak, 231 // LB24. - {lbAny, prPO}: {lbPO, LineCanBreak, 310}, - {lbPR, prAL}: {lbAL, LineDontBreak, 240}, - {lbPR, prHL}: {lbHL, LineDontBreak, 240}, - {lbPO, prAL}: {lbAL, LineDontBreak, 240}, - {lbPO, prHL}: {lbHL, LineDontBreak, 240}, - {lbAL, prPR}: {lbPR, LineDontBreak, 240}, - {lbAL, prPO}: {lbPO, LineDontBreak, 240}, - {lbHL, prPR}: {lbPR, LineDontBreak, 240}, - {lbHL, prPO}: {lbPO, LineDontBreak, 240}, + case lbAny | prPO<<32: + return lbPO, LineCanBreak, 310 + case lbPR | prAL<<32: + return lbAL, LineDontBreak, 240 + case lbPR | prHL<<32: + return lbHL, LineDontBreak, 240 + case lbPO | prAL<<32: + return lbAL, LineDontBreak, 240 + case lbPO | prHL<<32: + return lbHL, LineDontBreak, 240 + case lbAL | prPR<<32: + return lbPR, LineDontBreak, 240 + case lbAL | prPO<<32: + return lbPO, LineDontBreak, 240 + case lbHL | prPR<<32: + return lbPR, LineDontBreak, 240 + case lbHL | prPO<<32: + return lbPO, LineDontBreak, 240 // LB25 (simple transitions). - {lbPR, prNU}: {lbNU, LineDontBreak, 250}, - {lbPO, prNU}: {lbNU, LineDontBreak, 250}, - {lbOP, prNU}: {lbNU, LineDontBreak, 250}, - {lbHY, prNU}: {lbNU, LineDontBreak, 250}, - {lbNU, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNU, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNU, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNUNU, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNUNU, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNUNU, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNUSY, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNUSY, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNUSY, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNUIS, prNU}: {lbNUNU, LineDontBreak, 250}, - {lbNUIS, prSY}: {lbNUSY, LineDontBreak, 250}, - {lbNUIS, prIS}: {lbNUIS, LineDontBreak, 250}, - {lbNU, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNU, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNUNU, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNUNU, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNUSY, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNUSY, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNUIS, prCL}: {lbNUCL, LineDontBreak, 250}, - {lbNUIS, prCP}: {lbNUCP, LineDontBreak, 250}, - {lbNU, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUNU, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUSY, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUIS, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUCL, prPO}: {lbPO, LineDontBreak, 250}, - {lbNUCP, prPO}: {lbPO, LineDontBreak, 250}, - {lbNU, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUNU, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUSY, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUIS, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUCL, prPR}: {lbPR, LineDontBreak, 250}, - {lbNUCP, prPR}: {lbPR, LineDontBreak, 250}, + case lbPR | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbPO | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbOP | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbHY | prNU<<32: + return lbNU, LineDontBreak, 250 + case lbNU | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNU | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNU | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNUNU | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNUNU | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNUNU | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNUSY | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNUSY | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNUSY | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNUIS | prNU<<32: + return lbNUNU, LineDontBreak, 250 + case lbNUIS | prSY<<32: + return lbNUSY, LineDontBreak, 250 + case lbNUIS | prIS<<32: + return lbNUIS, LineDontBreak, 250 + case lbNU | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNU | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNUNU | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNUNU | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNUSY | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNUSY | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNUIS | prCL<<32: + return lbNUCL, LineDontBreak, 250 + case lbNUIS | prCP<<32: + return lbNUCP, LineDontBreak, 250 + case lbNU | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUNU | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUSY | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUIS | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUCL | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNUCP | prPO<<32: + return lbPO, LineDontBreak, 250 + case lbNU | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUNU | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUSY | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUIS | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUCL | prPR<<32: + return lbPR, LineDontBreak, 250 + case lbNUCP | prPR<<32: + return lbPR, LineDontBreak, 250 // LB26. - {lbAny, prJL}: {lbJL, LineCanBreak, 310}, - {lbAny, prJV}: {lbJV, LineCanBreak, 310}, - {lbAny, prJT}: {lbJT, LineCanBreak, 310}, - {lbAny, prH2}: {lbH2, LineCanBreak, 310}, - {lbAny, prH3}: {lbH3, LineCanBreak, 310}, - {lbJL, prJL}: {lbJL, LineDontBreak, 260}, - {lbJL, prJV}: {lbJV, LineDontBreak, 260}, - {lbJL, prH2}: {lbH2, LineDontBreak, 260}, - {lbJL, prH3}: {lbH3, LineDontBreak, 260}, - {lbJV, prJV}: {lbJV, LineDontBreak, 260}, - {lbJV, prJT}: {lbJT, LineDontBreak, 260}, - {lbH2, prJV}: {lbJV, LineDontBreak, 260}, - {lbH2, prJT}: {lbJT, LineDontBreak, 260}, - {lbJT, prJT}: {lbJT, LineDontBreak, 260}, - {lbH3, prJT}: {lbJT, LineDontBreak, 260}, + case lbAny | prJL<<32: + return lbJL, LineCanBreak, 310 + case lbAny | prJV<<32: + return lbJV, LineCanBreak, 310 + case lbAny | prJT<<32: + return lbJT, LineCanBreak, 310 + case lbAny | prH2<<32: + return lbH2, LineCanBreak, 310 + case lbAny | prH3<<32: + return lbH3, LineCanBreak, 310 + case lbJL | prJL<<32: + return lbJL, LineDontBreak, 260 + case lbJL | prJV<<32: + return lbJV, LineDontBreak, 260 + case lbJL | prH2<<32: + return lbH2, LineDontBreak, 260 + case lbJL | prH3<<32: + return lbH3, LineDontBreak, 260 + case lbJV | prJV<<32: + return lbJV, LineDontBreak, 260 + case lbJV | prJT<<32: + return lbJT, LineDontBreak, 260 + case lbH2 | prJV<<32: + return lbJV, LineDontBreak, 260 + case lbH2 | prJT<<32: + return lbJT, LineDontBreak, 260 + case lbJT | prJT<<32: + return lbJT, LineDontBreak, 260 + case lbH3 | prJT<<32: + return lbJT, LineDontBreak, 260 // LB27. - {lbJL, prPO}: {lbPO, LineDontBreak, 270}, - {lbJV, prPO}: {lbPO, LineDontBreak, 270}, - {lbJT, prPO}: {lbPO, LineDontBreak, 270}, - {lbH2, prPO}: {lbPO, LineDontBreak, 270}, - {lbH3, prPO}: {lbPO, LineDontBreak, 270}, - {lbPR, prJL}: {lbJL, LineDontBreak, 270}, - {lbPR, prJV}: {lbJV, LineDontBreak, 270}, - {lbPR, prJT}: {lbJT, LineDontBreak, 270}, - {lbPR, prH2}: {lbH2, LineDontBreak, 270}, - {lbPR, prH3}: {lbH3, LineDontBreak, 270}, + case lbJL | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbJV | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbJT | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbH2 | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbH3 | prPO<<32: + return lbPO, LineDontBreak, 270 + case lbPR | prJL<<32: + return lbJL, LineDontBreak, 270 + case lbPR | prJV<<32: + return lbJV, LineDontBreak, 270 + case lbPR | prJT<<32: + return lbJT, LineDontBreak, 270 + case lbPR | prH2<<32: + return lbH2, LineDontBreak, 270 + case lbPR | prH3<<32: + return lbH3, LineDontBreak, 270 // LB28. - {lbAL, prAL}: {lbAL, LineDontBreak, 280}, - {lbAL, prHL}: {lbHL, LineDontBreak, 280}, - {lbHL, prAL}: {lbAL, LineDontBreak, 280}, - {lbHL, prHL}: {lbHL, LineDontBreak, 280}, + case lbAL | prAL<<32: + return lbAL, LineDontBreak, 280 + case lbAL | prHL<<32: + return lbHL, LineDontBreak, 280 + case lbHL | prAL<<32: + return lbAL, LineDontBreak, 280 + case lbHL | prHL<<32: + return lbHL, LineDontBreak, 280 // LB29. - {lbIS, prAL}: {lbAL, LineDontBreak, 290}, - {lbIS, prHL}: {lbHL, LineDontBreak, 290}, - {lbNUIS, prAL}: {lbAL, LineDontBreak, 290}, - {lbNUIS, prHL}: {lbHL, LineDontBreak, 290}, + case lbIS | prAL<<32: + return lbAL, LineDontBreak, 290 + case lbIS | prHL<<32: + return lbHL, LineDontBreak, 290 + case lbNUIS | prAL<<32: + return lbAL, LineDontBreak, 290 + case lbNUIS | prHL<<32: + return lbHL, LineDontBreak, 290 + + default: + return -1, -1, -1 + } } // transitionLineBreakState determines the new state of the line break parser @@ -290,7 +449,7 @@ var lbTransitions = map[[2]int][3]int{ // further lookups. func transitionLineBreakState(state int, r rune, b []byte, str string) (newState int, lineBreak int) { // Determine the property of the next character. - nextProperty, generalCategory := propertyWithGenCat(lineBreakCodePoints, r) + nextProperty, generalCategory := propertyLineBreak(r) // Prepare. var forceNoBreak, isCPeaFWH bool @@ -306,7 +465,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState defer func() { // Transition into LB30. if newState == lbCP || newState == lbNUCP { - ea := property(eastAsianWidth, r) + ea := propertyEastAsianWidth(r) if ea != prF && ea != prW && ea != prH { newState |= lbCPeaFWHBit } @@ -352,30 +511,27 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState // Find the applicable transition in the table. var rule int - transition, ok := lbTransitions[[2]int{state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - newState, lineBreak, rule = transition[0], transition[1], transition[2] - } else { + newState, lineBreak, rule = lbTransitions(state, nextProperty) + if newState < 0 { // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := lbTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := lbTransitions[[2]int{lbAny, nextProperty}] - if okAnyProp && okAnyState { + anyPropProp, anyPropLineBreak, anyPropRule := lbTransitions(state, prAny) + anyStateProp, anyStateLineBreak, anyStateRule := lbTransitions(lbAny, nextProperty) + if anyPropProp >= 0 && anyStateProp >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState, lineBreak, rule = transAnyState[0], transAnyState[1], transAnyState[2] - if transAnyProp[2] < transAnyState[2] { - lineBreak, rule = transAnyProp[1], transAnyProp[2] + newState, lineBreak, rule = anyStateProp, anyStateLineBreak, anyStateRule + if anyPropRule < anyStateRule { + lineBreak, rule = anyPropLineBreak, anyPropRule } - } else if okAnyProp { + } else if anyPropProp >= 0 { // We only have a specific state. - newState, lineBreak, rule = transAnyProp[0], transAnyProp[1], transAnyProp[2] + newState, lineBreak, rule = anyPropProp, anyPropLineBreak, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. - } else if okAnyState { + } else if anyStateProp >= 0 { // We only have a specific property. - newState, lineBreak, rule = transAnyState[0], transAnyState[1], transAnyState[2] + newState, lineBreak, rule = anyStateProp, anyStateLineBreak, anyStateRule } else { // No known transition. LB31: ALL ÷ ALL. newState, lineBreak, rule = lbAny, LineCanBreak, 310 @@ -414,7 +570,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState r, _ = utf8.DecodeRuneInString(str) } if r != utf8.RuneError { - pr, _ := propertyWithGenCat(lineBreakCodePoints, r) + pr, _ := propertyLineBreak(r) if pr == prNU { return lbNU, LineDontBreak } @@ -424,7 +580,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState // LB30 (part one). if rule > 300 { if (state == lbAL || state == lbHL || state == lbNU || state == lbNUNU) && nextProperty == prOP { - ea := property(eastAsianWidth, r) + ea := propertyEastAsianWidth(r) if ea != prF && ea != prW && ea != prH { return lbOP, LineDontBreak } @@ -460,7 +616,7 @@ func transitionLineBreakState(state int, r rune, b []byte, str string) (newState return prAny, LineDontBreak } } - graphemeProperty := property(graphemeCodePoints, r) + graphemeProperty := propertyGraphemes(r) if graphemeProperty == prExtendedPictographic && generalCategory == gcCn { return lbExtPicCn, LineCanBreak } diff --git a/vendor/github.com/rivo/uniseg/properties.go b/vendor/github.com/rivo/uniseg/properties.go index bc3c7bcf3..6290e6810 100644 --- a/vendor/github.com/rivo/uniseg/properties.go +++ b/vendor/github.com/rivo/uniseg/properties.go @@ -160,9 +160,49 @@ func property(dictionary [][3]int, r rune) int { return propertySearch(dictionary, r)[2] } -// propertyWithGenCat returns the Unicode property value and General Category -// (see constants above) of the given code point. -func propertyWithGenCat(dictionary [][4]int, r rune) (property, generalCategory int) { - entry := propertySearch(dictionary, r) +// propertyLineBreak returns the Unicode property value and General Category +// (see constants above) of the given code point, as listed in the line break +// code points table, while fast tracking ASCII digits and letters. +func propertyLineBreak(r rune) (property, generalCategory int) { + if r >= 'a' && r <= 'z' { + return prAL, gcLl + } + if r >= 'A' && r <= 'Z' { + return prAL, gcLu + } + if r >= '0' && r <= '9' { + return prNU, gcNd + } + entry := propertySearch(lineBreakCodePoints, r) return entry[2], entry[3] } + +// propertyGraphemes returns the Unicode grapheme cluster property value of the +// given code point while fast tracking ASCII characters. +func propertyGraphemes(r rune) int { + if r >= 0x20 && r <= 0x7e { + return prAny + } + if r == 0x0a { + return prLF + } + if r == 0x0d { + return prCR + } + if r >= 0 && r <= 0x1f || r == 0x7f { + return prControl + } + return property(graphemeCodePoints, r) +} + +// propertyEastAsianWidth returns the Unicode East Asian Width property value of +// the given code point while fast tracking ASCII characters. +func propertyEastAsianWidth(r rune) int { + if r >= 0x20 && r <= 0x7e { + return prNa + } + if r >= 0 && r <= 0x1f || r == 0x7f { + return prN + } + return property(eastAsianWidth, r) +} diff --git a/vendor/github.com/rivo/uniseg/sentenceproperties.go b/vendor/github.com/rivo/uniseg/sentenceproperties.go index ba0cf2de1..67717ec1f 100644 --- a/vendor/github.com/rivo/uniseg/sentenceproperties.go +++ b/vendor/github.com/rivo/uniseg/sentenceproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // sentenceBreakCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/SentenceBreakProperty.txt +// https://www.unicode.org/Public/15.0.0/ucd/auxiliary/SentenceBreakProperty.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var sentenceBreakCodePoints = [][3]int{ {0x0009, 0x0009, prSp}, // Cc @@ -843,6 +843,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prNumeric}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prOLetter}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prExtend}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prExtend}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prOLetter}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -896,7 +897,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x0EBD, 0x0EBD, prOLetter}, // Lo LAO SEMIVOWEL SIGN NYO {0x0EC0, 0x0EC4, prOLetter}, // Lo [5] LAO VOWEL SIGN E..LAO VOWEL SIGN AI {0x0EC6, 0x0EC6, prOLetter}, // Lm LAO KO LA - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prExtend}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prNumeric}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0EDC, 0x0EDF, prOLetter}, // Lo [4] LAO HO NO..LAO LETTER KHMU NYO {0x0F00, 0x0F00, prOLetter}, // Lo TIBETAN SYLLABLE OM @@ -958,7 +959,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x10C7, 0x10C7, prUpper}, // L& GEORGIAN CAPITAL LETTER YN {0x10CD, 0x10CD, prUpper}, // L& GEORGIAN CAPITAL LETTER AEN {0x10D0, 0x10FA, prOLetter}, // L& [43] GEORGIAN LETTER AN..GEORGIAN LETTER AIN - {0x10FC, 0x10FC, prOLetter}, // Lm MODIFIER LETTER GEORGIAN NAR + {0x10FC, 0x10FC, prLower}, // Lm MODIFIER LETTER GEORGIAN NAR {0x10FD, 0x10FF, prOLetter}, // L& [3] GEORGIAN LETTER AEN..GEORGIAN LETTER LABIAL SIGN {0x1100, 0x1248, prOLetter}, // Lo [329] HANGUL CHOSEONG KIYEOK..ETHIOPIC SYLLABLE QWA {0x124A, 0x124D, prOLetter}, // Lo [4] ETHIOPIC SYLLABLE QWI..ETHIOPIC SYLLABLE QWE @@ -2034,7 +2035,7 @@ var sentenceBreakCodePoints = [][3]int{ {0xA7D7, 0xA7D7, prLower}, // L& LATIN SMALL LETTER MIDDLE SCOTS S {0xA7D8, 0xA7D8, prUpper}, // L& LATIN CAPITAL LETTER SIGMOID S {0xA7D9, 0xA7D9, prLower}, // L& LATIN SMALL LETTER SIGMOID S - {0xA7F2, 0xA7F4, prOLetter}, // Lm [3] MODIFIER LETTER CAPITAL C..MODIFIER LETTER CAPITAL Q + {0xA7F2, 0xA7F4, prLower}, // Lm [3] MODIFIER LETTER CAPITAL C..MODIFIER LETTER CAPITAL Q {0xA7F5, 0xA7F5, prUpper}, // L& LATIN CAPITAL LETTER REVERSED HALF H {0xA7F6, 0xA7F6, prLower}, // L& LATIN SMALL LETTER REVERSED HALF H {0xA7F7, 0xA7F7, prOLetter}, // Lo LATIN EPIGRAPHIC LETTER SIDEWAYS I @@ -2140,7 +2141,7 @@ var sentenceBreakCodePoints = [][3]int{ {0xAB30, 0xAB5A, prLower}, // L& [43] LATIN SMALL LETTER BARRED ALPHA..LATIN SMALL LETTER Y WITH SHORT RIGHT LEG {0xAB5C, 0xAB5F, prLower}, // Lm [4] MODIFIER LETTER SMALL HENG..MODIFIER LETTER SMALL U WITH LEFT HOOK {0xAB60, 0xAB68, prLower}, // L& [9] LATIN SMALL LETTER SAKHA YAT..LATIN SMALL LETTER TURNED R WITH MIDDLE TILDE - {0xAB69, 0xAB69, prOLetter}, // Lm MODIFIER LETTER SMALL TURNED W + {0xAB69, 0xAB69, prLower}, // Lm MODIFIER LETTER SMALL TURNED W {0xAB70, 0xABBF, prLower}, // L& [80] CHEROKEE SMALL LETTER A..CHEROKEE SMALL LETTER YA {0xABC0, 0xABE2, prOLetter}, // Lo [35] MEETEI MAYEK LETTER KOK..MEETEI MAYEK LETTER I LONSUM {0xABE3, 0xABE4, prExtend}, // Mc [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP @@ -2334,6 +2335,7 @@ var sentenceBreakCodePoints = [][3]int{ {0x10E80, 0x10EA9, prOLetter}, // Lo [42] YEZIDI LETTER ELIF..YEZIDI LETTER ET {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EB0, 0x10EB1, prOLetter}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prExtend}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prOLetter}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F27, 0x10F27, prOLetter}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH {0x10F30, 0x10F45, prOLetter}, // Lo [22] SOGDIAN LETTER ALEPH..SOGDIAN INDEPENDENT SHIN @@ -2408,6 +2410,8 @@ var sentenceBreakCodePoints = [][3]int{ {0x11238, 0x11239, prSTerm}, // Po [2] KHOJKI DANDA..KHOJKI DOUBLE DANDA {0x1123B, 0x1123C, prSTerm}, // Po [2] KHOJKI SECTION MARK..KHOJKI DOUBLE SECTION MARK {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prOLetter}, // Lo [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prExtend}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prOLetter}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prOLetter}, // Lo MULTANI LETTER GHA {0x1128A, 0x1128D, prOLetter}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -2603,13 +2607,29 @@ var sentenceBreakCodePoints = [][3]int{ {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prExtend}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O {0x11EF7, 0x11EF8, prSTerm}, // Po [2] MAKASAR PASSIMBANG..MAKASAR END OF SECTION + {0x11F00, 0x11F01, prExtend}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prOLetter}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prExtend}, // Mc KAWI SIGN VISARGA + {0x11F04, 0x11F10, prOLetter}, // Lo [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prOLetter}, // Lo [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prExtend}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prExtend}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prExtend}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prExtend}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prExtend}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prExtend}, // Mn KAWI CONJOINER + {0x11F43, 0x11F44, prSTerm}, // Po [2] KAWI DANDA..KAWI DOUBLE DANDA + {0x11F50, 0x11F59, prNumeric}, // Nd [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prOLetter}, // Lo LISU LETTER YHA {0x12000, 0x12399, prOLetter}, // Lo [922] CUNEIFORM SIGN A..CUNEIFORM SIGN U U {0x12400, 0x1246E, prOLetter}, // Nl [111] CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NUMERIC SIGN NINE U VARIANT FORM {0x12480, 0x12543, prOLetter}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU {0x12F90, 0x12FF0, prOLetter}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114 - {0x13000, 0x1342E, prOLetter}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032 - {0x13430, 0x13438, prFormat}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x13000, 0x1342F, prOLetter}, // Lo [1072] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH V011D + {0x13430, 0x1343F, prFormat}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prExtend}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prOLetter}, // Lo [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prExtend}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x14646, prOLetter}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530 {0x16800, 0x16A38, prOLetter}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ {0x16A40, 0x16A5E, prOLetter}, // Lo [31] MRO LETTER TA..MRO LETTER TEK @@ -2648,7 +2668,9 @@ var sentenceBreakCodePoints = [][3]int{ {0x1AFF5, 0x1AFFB, prOLetter}, // Lm [7] KATAKANA LETTER MINNAN TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-5 {0x1AFFD, 0x1AFFE, prOLetter}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B122, prOLetter}, // Lo [291] KATAKANA LETTER ARCHAIC E..KATAKANA LETTER ARCHAIC WU + {0x1B132, 0x1B132, prOLetter}, // Lo HIRAGANA LETTER SMALL KO {0x1B150, 0x1B152, prOLetter}, // Lo [3] HIRAGANA LETTER SMALL WI..HIRAGANA LETTER SMALL WO + {0x1B155, 0x1B155, prOLetter}, // Lo KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prOLetter}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1B170, 0x1B2FB, prOLetter}, // Lo [396] NUSHU CHARACTER-1B170..NUSHU CHARACTER-1B2FB {0x1BC00, 0x1BC6A, prOLetter}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M @@ -2738,11 +2760,14 @@ var sentenceBreakCodePoints = [][3]int{ {0x1DF00, 0x1DF09, prLower}, // L& [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prOLetter}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prLower}, // L& [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prLower}, // L& [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prLower}, // Lm [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prExtend}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prOLetter}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prOLetter}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -2753,6 +2778,10 @@ var sentenceBreakCodePoints = [][3]int{ {0x1E2C0, 0x1E2EB, prOLetter}, // Lo [44] WANCHO LETTER AA..WANCHO LETTER YIH {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prNumeric}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE + {0x1E4D0, 0x1E4EA, prOLetter}, // Lo [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prOLetter}, // Lm NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prExtend}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prNumeric}, // Nd [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prOLetter}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prOLetter}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prOLetter}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -2803,12 +2832,13 @@ var sentenceBreakCodePoints = [][3]int{ {0x1F676, 0x1F678, prClose}, // So [3] SANS-SERIF HEAVY DOUBLE TURNED COMMA QUOTATION MARK ORNAMENT..SANS-SERIF HEAVY LOW DOUBLE COMMA QUOTATION MARK ORNAMENT {0x1FBF0, 0x1FBF9, prNumeric}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x20000, 0x2A6DF, prOLetter}, // Lo [42720] CJK UNIFIED IDEOGRAPH-20000..CJK UNIFIED IDEOGRAPH-2A6DF - {0x2A700, 0x2B738, prOLetter}, // Lo [4153] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B738 + {0x2A700, 0x2B739, prOLetter}, // Lo [4154] CJK UNIFIED IDEOGRAPH-2A700..CJK UNIFIED IDEOGRAPH-2B739 {0x2B740, 0x2B81D, prOLetter}, // Lo [222] CJK UNIFIED IDEOGRAPH-2B740..CJK UNIFIED IDEOGRAPH-2B81D {0x2B820, 0x2CEA1, prOLetter}, // Lo [5762] CJK UNIFIED IDEOGRAPH-2B820..CJK UNIFIED IDEOGRAPH-2CEA1 {0x2CEB0, 0x2EBE0, prOLetter}, // Lo [7473] CJK UNIFIED IDEOGRAPH-2CEB0..CJK UNIFIED IDEOGRAPH-2EBE0 {0x2F800, 0x2FA1D, prOLetter}, // Lo [542] CJK COMPATIBILITY IDEOGRAPH-2F800..CJK COMPATIBILITY IDEOGRAPH-2FA1D {0x30000, 0x3134A, prOLetter}, // Lo [4939] CJK UNIFIED IDEOGRAPH-30000..CJK UNIFIED IDEOGRAPH-3134A + {0x31350, 0x323AF, prOLetter}, // Lo [4192] CJK UNIFIED IDEOGRAPH-31350..CJK UNIFIED IDEOGRAPH-323AF {0xE0001, 0xE0001, prFormat}, // Cf LANGUAGE TAG {0xE0020, 0xE007F, prExtend}, // Cf [96] TAG SPACE..CANCEL TAG {0xE0100, 0xE01EF, prExtend}, // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256 diff --git a/vendor/github.com/rivo/uniseg/sentencerules.go b/vendor/github.com/rivo/uniseg/sentencerules.go index 58c04794e..0b29c7bdb 100644 --- a/vendor/github.com/rivo/uniseg/sentencerules.go +++ b/vendor/github.com/rivo/uniseg/sentencerules.go @@ -18,104 +18,178 @@ const ( sbSB8aSp ) -// The sentence break parser's breaking instructions. -const ( - sbDontBreak = iota - sbBreak -) - -// The sentence break parser's state transitions. It's anologous to -// grTransitions, see comments there for details. Unicode version 14.0.0. -var sbTransitions = map[[2]int][3]int{ +// sbTransitions implements the sentence break parser's state transitions. It's +// anologous to [grTransitions], see comments there for details. +// +// Unicode version 15.0.0. +func sbTransitions(state, prop int) (newState int, sentenceBreak bool, rule int) { + switch uint64(state) | uint64(prop)<<32 { // SB3. - {sbAny, prCR}: {sbCR, sbDontBreak, 9990}, - {sbCR, prLF}: {sbParaSep, sbDontBreak, 30}, + case sbAny | prCR<<32: + return sbCR, false, 9990 + case sbCR | prLF<<32: + return sbParaSep, false, 30 // SB4. - {sbAny, prSep}: {sbParaSep, sbDontBreak, 9990}, - {sbAny, prLF}: {sbParaSep, sbDontBreak, 9990}, - {sbParaSep, prAny}: {sbAny, sbBreak, 40}, - {sbCR, prAny}: {sbAny, sbBreak, 40}, + case sbAny | prSep<<32: + return sbParaSep, false, 9990 + case sbAny | prLF<<32: + return sbParaSep, false, 9990 + case sbParaSep | prAny<<32: + return sbAny, true, 40 + case sbCR | prAny<<32: + return sbAny, true, 40 // SB6. - {sbAny, prATerm}: {sbATerm, sbDontBreak, 9990}, - {sbATerm, prNumeric}: {sbAny, sbDontBreak, 60}, - {sbSB7, prNumeric}: {sbAny, sbDontBreak, 60}, // Because ATerm also appears in SB7. + case sbAny | prATerm<<32: + return sbATerm, false, 9990 + case sbATerm | prNumeric<<32: + return sbAny, false, 60 + case sbSB7 | prNumeric<<32: + return sbAny, false, 60 // Because ATerm also appears in SB7. // SB7. - {sbAny, prUpper}: {sbUpper, sbDontBreak, 9990}, - {sbAny, prLower}: {sbLower, sbDontBreak, 9990}, - {sbUpper, prATerm}: {sbSB7, sbDontBreak, 70}, - {sbLower, prATerm}: {sbSB7, sbDontBreak, 70}, - {sbSB7, prUpper}: {sbUpper, sbDontBreak, 70}, + case sbAny | prUpper<<32: + return sbUpper, false, 9990 + case sbAny | prLower<<32: + return sbLower, false, 9990 + case sbUpper | prATerm<<32: + return sbSB7, false, 70 + case sbLower | prATerm<<32: + return sbSB7, false, 70 + case sbSB7 | prUpper<<32: + return sbUpper, false, 70 // SB8a. - {sbAny, prSTerm}: {sbSTerm, sbDontBreak, 9990}, - {sbATerm, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbATerm, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbATerm, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB7, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB7, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB7, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8Close, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8Close, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8Close, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8Sp, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8Sp, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8Sp, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSTerm, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSTerm, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSTerm, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8aClose, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8aClose, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8aClose, prSTerm}: {sbSTerm, sbDontBreak, 81}, - {sbSB8aSp, prSContinue}: {sbAny, sbDontBreak, 81}, - {sbSB8aSp, prATerm}: {sbATerm, sbDontBreak, 81}, - {sbSB8aSp, prSTerm}: {sbSTerm, sbDontBreak, 81}, + case sbAny | prSTerm<<32: + return sbSTerm, false, 9990 + case sbATerm | prSContinue<<32: + return sbAny, false, 81 + case sbATerm | prATerm<<32: + return sbATerm, false, 81 + case sbATerm | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB7 | prSContinue<<32: + return sbAny, false, 81 + case sbSB7 | prATerm<<32: + return sbATerm, false, 81 + case sbSB7 | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8Close | prSContinue<<32: + return sbAny, false, 81 + case sbSB8Close | prATerm<<32: + return sbATerm, false, 81 + case sbSB8Close | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8Sp | prSContinue<<32: + return sbAny, false, 81 + case sbSB8Sp | prATerm<<32: + return sbATerm, false, 81 + case sbSB8Sp | prSTerm<<32: + return sbSTerm, false, 81 + case sbSTerm | prSContinue<<32: + return sbAny, false, 81 + case sbSTerm | prATerm<<32: + return sbATerm, false, 81 + case sbSTerm | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8aClose | prSContinue<<32: + return sbAny, false, 81 + case sbSB8aClose | prATerm<<32: + return sbATerm, false, 81 + case sbSB8aClose | prSTerm<<32: + return sbSTerm, false, 81 + case sbSB8aSp | prSContinue<<32: + return sbAny, false, 81 + case sbSB8aSp | prATerm<<32: + return sbATerm, false, 81 + case sbSB8aSp | prSTerm<<32: + return sbSTerm, false, 81 // SB9. - {sbATerm, prClose}: {sbSB8Close, sbDontBreak, 90}, - {sbSB7, prClose}: {sbSB8Close, sbDontBreak, 90}, - {sbSB8Close, prClose}: {sbSB8Close, sbDontBreak, 90}, - {sbATerm, prSp}: {sbSB8Sp, sbDontBreak, 90}, - {sbSB7, prSp}: {sbSB8Sp, sbDontBreak, 90}, - {sbSB8Close, prSp}: {sbSB8Sp, sbDontBreak, 90}, - {sbSTerm, prClose}: {sbSB8aClose, sbDontBreak, 90}, - {sbSB8aClose, prClose}: {sbSB8aClose, sbDontBreak, 90}, - {sbSTerm, prSp}: {sbSB8aSp, sbDontBreak, 90}, - {sbSB8aClose, prSp}: {sbSB8aSp, sbDontBreak, 90}, - {sbATerm, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbATerm, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbATerm, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSB7, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSB7, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSB7, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSB8Close, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSB8Close, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSB8Close, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSTerm, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSTerm, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSTerm, prLF}: {sbParaSep, sbDontBreak, 90}, - {sbSB8aClose, prSep}: {sbParaSep, sbDontBreak, 90}, - {sbSB8aClose, prCR}: {sbParaSep, sbDontBreak, 90}, - {sbSB8aClose, prLF}: {sbParaSep, sbDontBreak, 90}, + case sbATerm | prClose<<32: + return sbSB8Close, false, 90 + case sbSB7 | prClose<<32: + return sbSB8Close, false, 90 + case sbSB8Close | prClose<<32: + return sbSB8Close, false, 90 + case sbATerm | prSp<<32: + return sbSB8Sp, false, 90 + case sbSB7 | prSp<<32: + return sbSB8Sp, false, 90 + case sbSB8Close | prSp<<32: + return sbSB8Sp, false, 90 + case sbSTerm | prClose<<32: + return sbSB8aClose, false, 90 + case sbSB8aClose | prClose<<32: + return sbSB8aClose, false, 90 + case sbSTerm | prSp<<32: + return sbSB8aSp, false, 90 + case sbSB8aClose | prSp<<32: + return sbSB8aSp, false, 90 + case sbATerm | prSep<<32: + return sbParaSep, false, 90 + case sbATerm | prCR<<32: + return sbParaSep, false, 90 + case sbATerm | prLF<<32: + return sbParaSep, false, 90 + case sbSB7 | prSep<<32: + return sbParaSep, false, 90 + case sbSB7 | prCR<<32: + return sbParaSep, false, 90 + case sbSB7 | prLF<<32: + return sbParaSep, false, 90 + case sbSB8Close | prSep<<32: + return sbParaSep, false, 90 + case sbSB8Close | prCR<<32: + return sbParaSep, false, 90 + case sbSB8Close | prLF<<32: + return sbParaSep, false, 90 + case sbSTerm | prSep<<32: + return sbParaSep, false, 90 + case sbSTerm | prCR<<32: + return sbParaSep, false, 90 + case sbSTerm | prLF<<32: + return sbParaSep, false, 90 + case sbSB8aClose | prSep<<32: + return sbParaSep, false, 90 + case sbSB8aClose | prCR<<32: + return sbParaSep, false, 90 + case sbSB8aClose | prLF<<32: + return sbParaSep, false, 90 // SB10. - {sbSB8Sp, prSp}: {sbSB8Sp, sbDontBreak, 100}, - {sbSB8aSp, prSp}: {sbSB8aSp, sbDontBreak, 100}, - {sbSB8Sp, prSep}: {sbParaSep, sbDontBreak, 100}, - {sbSB8Sp, prCR}: {sbParaSep, sbDontBreak, 100}, - {sbSB8Sp, prLF}: {sbParaSep, sbDontBreak, 100}, + case sbSB8Sp | prSp<<32: + return sbSB8Sp, false, 100 + case sbSB8aSp | prSp<<32: + return sbSB8aSp, false, 100 + case sbSB8Sp | prSep<<32: + return sbParaSep, false, 100 + case sbSB8Sp | prCR<<32: + return sbParaSep, false, 100 + case sbSB8Sp | prLF<<32: + return sbParaSep, false, 100 // SB11. - {sbATerm, prAny}: {sbAny, sbBreak, 110}, - {sbSB7, prAny}: {sbAny, sbBreak, 110}, - {sbSB8Close, prAny}: {sbAny, sbBreak, 110}, - {sbSB8Sp, prAny}: {sbAny, sbBreak, 110}, - {sbSTerm, prAny}: {sbAny, sbBreak, 110}, - {sbSB8aClose, prAny}: {sbAny, sbBreak, 110}, - {sbSB8aSp, prAny}: {sbAny, sbBreak, 110}, + case sbATerm | prAny<<32: + return sbAny, true, 110 + case sbSB7 | prAny<<32: + return sbAny, true, 110 + case sbSB8Close | prAny<<32: + return sbAny, true, 110 + case sbSB8Sp | prAny<<32: + return sbAny, true, 110 + case sbSTerm | prAny<<32: + return sbAny, true, 110 + case sbSB8aClose | prAny<<32: + return sbAny, true, 110 + case sbSB8aSp | prAny<<32: + return sbAny, true, 110 // We'll always break after ParaSep due to SB4. + + default: + return -1, false, -1 + } } // transitionSentenceBreakState determines the new state of the sentence break @@ -141,30 +215,27 @@ func transitionSentenceBreakState(state int, r rune, b []byte, str string) (newS // Find the applicable transition in the table. var rule int - transition, ok := sbTransitions[[2]int{state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - newState, sentenceBreak, rule = transition[0], transition[1] == sbBreak, transition[2] - } else { + newState, sentenceBreak, rule = sbTransitions(state, nextProperty) + if newState < 0 { // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := sbTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := sbTransitions[[2]int{sbAny, nextProperty}] - if okAnyProp && okAnyState { + anyPropState, anyPropProp, anyPropRule := sbTransitions(state, prAny) + anyStateState, anyStateProp, anyStateRule := sbTransitions(sbAny, nextProperty) + if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState, sentenceBreak, rule = transAnyState[0], transAnyState[1] == sbBreak, transAnyState[2] - if transAnyProp[2] < transAnyState[2] { - sentenceBreak, rule = transAnyProp[1] == sbBreak, transAnyProp[2] + newState, sentenceBreak, rule = anyStateState, anyStateProp, anyStateRule + if anyPropRule < anyStateRule { + sentenceBreak, rule = anyPropProp, anyPropRule } - } else if okAnyProp { + } else if anyPropState >= 0 { // We only have a specific state. - newState, sentenceBreak, rule = transAnyProp[0], transAnyProp[1] == sbBreak, transAnyProp[2] + newState, sentenceBreak, rule = anyPropState, anyPropProp, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. - } else if okAnyState { + } else if anyStateState >= 0 { // We only have a specific property. - newState, sentenceBreak, rule = transAnyState[0], transAnyState[1] == sbBreak, transAnyState[2] + newState, sentenceBreak, rule = anyStateState, anyStateProp, anyStateRule } else { // No known transition. SB999: Any × Any. newState, sentenceBreak, rule = sbAny, false, 9990 diff --git a/vendor/github.com/rivo/uniseg/step.go b/vendor/github.com/rivo/uniseg/step.go index 6eca4b5dc..2959b6909 100644 --- a/vendor/github.com/rivo/uniseg/step.go +++ b/vendor/github.com/rivo/uniseg/step.go @@ -100,7 +100,7 @@ func Step(b []byte, state int) (cluster, rest []byte, boundaries int, newState i if len(b) <= length { // If we're already past the end, there is nothing else to parse. var prop int if state < 0 { - prop = property(graphemeCodePoints, r) + prop = propertyGraphemes(r) } else { prop = state >> shiftPropState } @@ -179,7 +179,7 @@ func StepString(str string, state int) (cluster, rest string, boundaries int, ne // Extract the first rune. r, length := utf8.DecodeRuneInString(str) if len(str) <= length { // If we're already past the end, there is nothing else to parse. - prop := property(graphemeCodePoints, r) + prop := propertyGraphemes(r) return str, "", LineMustBreak | (1 << shiftWord) | (1 << shiftSentence) | (runeWidth(r, prop) << ShiftWidth), grAny | (wbAny << shiftWordState) | (sbAny << shiftSentenceState) | (lbAny << shiftLineState) } diff --git a/vendor/github.com/rivo/uniseg/width.go b/vendor/github.com/rivo/uniseg/width.go index 12a57cc2e..975a9f134 100644 --- a/vendor/github.com/rivo/uniseg/width.go +++ b/vendor/github.com/rivo/uniseg/width.go @@ -1,5 +1,10 @@ package uniseg +// EastAsianAmbiguousWidth specifies the monospace width for East Asian +// characters classified as Ambiguous. The default is 1 but some rare fonts +// render them with a width of 2. +var EastAsianAmbiguousWidth = 1 + // runeWidth returns the monospace width for the given rune. The provided // grapheme property is a value mapped by the [graphemeCodePoints] table. // @@ -33,9 +38,11 @@ func runeWidth(r rune, graphemeProperty int) int { return 4 } - switch property(eastAsianWidth, r) { + switch propertyEastAsianWidth(r) { case prW, prF: return 2 + case prA: + return EastAsianAmbiguousWidth } return 1 diff --git a/vendor/github.com/rivo/uniseg/wordproperties.go b/vendor/github.com/rivo/uniseg/wordproperties.go index 805cc536c..277ca1006 100644 --- a/vendor/github.com/rivo/uniseg/wordproperties.go +++ b/vendor/github.com/rivo/uniseg/wordproperties.go @@ -1,13 +1,13 @@ -package uniseg - // Code generated via go generate from gen_properties.go. DO NOT EDIT. +package uniseg + // workBreakCodePoints are taken from -// https://www.unicode.org/Public/14.0.0/ucd/auxiliary/WordBreakProperty.txt +// https://www.unicode.org/Public/15.0.0/ucd/auxiliary/WordBreakProperty.txt // and -// https://unicode.org/Public/14.0.0/ucd/emoji/emoji-data.txt +// https://unicode.org/Public/15.0.0/ucd/emoji/emoji-data.txt // ("Extended_Pictographic" only) -// on September 10, 2022. See https://www.unicode.org/license.html for the Unicode +// on September 5, 2023. See https://www.unicode.org/license.html for the Unicode // license agreement. var workBreakCodePoints = [][3]int{ {0x000A, 0x000A, prLF}, // Cc @@ -318,6 +318,7 @@ var workBreakCodePoints = [][3]int{ {0x0CE2, 0x0CE3, prExtend}, // Mn [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL {0x0CE6, 0x0CEF, prNumeric}, // Nd [10] KANNADA DIGIT ZERO..KANNADA DIGIT NINE {0x0CF1, 0x0CF2, prALetter}, // Lo [2] KANNADA SIGN JIHVAMULIYA..KANNADA SIGN UPADHMANIYA + {0x0CF3, 0x0CF3, prExtend}, // Mc KANNADA SIGN COMBINING ANUSVARA ABOVE RIGHT {0x0D00, 0x0D01, prExtend}, // Mn [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU {0x0D02, 0x0D03, prExtend}, // Mc [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA {0x0D04, 0x0D0C, prALetter}, // Lo [9] MALAYALAM LETTER VEDIC ANUSVARA..MALAYALAM LETTER VOCALIC L @@ -357,7 +358,7 @@ var workBreakCodePoints = [][3]int{ {0x0E50, 0x0E59, prNumeric}, // Nd [10] THAI DIGIT ZERO..THAI DIGIT NINE {0x0EB1, 0x0EB1, prExtend}, // Mn LAO VOWEL SIGN MAI KAN {0x0EB4, 0x0EBC, prExtend}, // Mn [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO - {0x0EC8, 0x0ECD, prExtend}, // Mn [6] LAO TONE MAI EK..LAO NIGGAHITA + {0x0EC8, 0x0ECE, prExtend}, // Mn [7] LAO TONE MAI EK..LAO YAMAKKAN {0x0ED0, 0x0ED9, prNumeric}, // Nd [10] LAO DIGIT ZERO..LAO DIGIT NINE {0x0F00, 0x0F00, prALetter}, // Lo TIBETAN SYLLABLE OM {0x0F18, 0x0F19, prExtend}, // Mn [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS @@ -1093,6 +1094,7 @@ var workBreakCodePoints = [][3]int{ {0x10E80, 0x10EA9, prALetter}, // Lo [42] YEZIDI LETTER ELIF..YEZIDI LETTER ET {0x10EAB, 0x10EAC, prExtend}, // Mn [2] YEZIDI COMBINING HAMZA MARK..YEZIDI COMBINING MADDA MARK {0x10EB0, 0x10EB1, prALetter}, // Lo [2] YEZIDI LETTER LAM WITH DOT ABOVE..YEZIDI LETTER YOT WITH CIRCUMFLEX ABOVE + {0x10EFD, 0x10EFF, prExtend}, // Mn [3] ARABIC SMALL LOW WORD SAKTA..ARABIC SMALL LOW WORD MADDA {0x10F00, 0x10F1C, prALetter}, // Lo [29] OLD SOGDIAN LETTER ALEPH..OLD SOGDIAN LETTER FINAL TAW WITH VERTICAL TAIL {0x10F27, 0x10F27, prALetter}, // Lo OLD SOGDIAN LIGATURE AYIN-DALETH {0x10F30, 0x10F45, prALetter}, // Lo [22] SOGDIAN LETTER ALEPH..SOGDIAN INDEPENDENT SHIN @@ -1157,6 +1159,8 @@ var workBreakCodePoints = [][3]int{ {0x11235, 0x11235, prExtend}, // Mc KHOJKI SIGN VIRAMA {0x11236, 0x11237, prExtend}, // Mn [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA {0x1123E, 0x1123E, prExtend}, // Mn KHOJKI SIGN SUKUN + {0x1123F, 0x11240, prALetter}, // Lo [2] KHOJKI LETTER QA..KHOJKI LETTER SHORT I + {0x11241, 0x11241, prExtend}, // Mn KHOJKI VOWEL SIGN VOCALIC R {0x11280, 0x11286, prALetter}, // Lo [7] MULTANI LETTER A..MULTANI LETTER GA {0x11288, 0x11288, prALetter}, // Lo MULTANI LETTER GHA {0x1128A, 0x1128D, prALetter}, // Lo [4] MULTANI LETTER CA..MULTANI LETTER JJA @@ -1337,13 +1341,28 @@ var workBreakCodePoints = [][3]int{ {0x11EE0, 0x11EF2, prALetter}, // Lo [19] MAKASAR LETTER KA..MAKASAR ANGKA {0x11EF3, 0x11EF4, prExtend}, // Mn [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U {0x11EF5, 0x11EF6, prExtend}, // Mc [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O + {0x11F00, 0x11F01, prExtend}, // Mn [2] KAWI SIGN CANDRABINDU..KAWI SIGN ANUSVARA + {0x11F02, 0x11F02, prALetter}, // Lo KAWI SIGN REPHA + {0x11F03, 0x11F03, prExtend}, // Mc KAWI SIGN VISARGA + {0x11F04, 0x11F10, prALetter}, // Lo [13] KAWI LETTER A..KAWI LETTER O + {0x11F12, 0x11F33, prALetter}, // Lo [34] KAWI LETTER KA..KAWI LETTER JNYA + {0x11F34, 0x11F35, prExtend}, // Mc [2] KAWI VOWEL SIGN AA..KAWI VOWEL SIGN ALTERNATE AA + {0x11F36, 0x11F3A, prExtend}, // Mn [5] KAWI VOWEL SIGN I..KAWI VOWEL SIGN VOCALIC R + {0x11F3E, 0x11F3F, prExtend}, // Mc [2] KAWI VOWEL SIGN E..KAWI VOWEL SIGN AI + {0x11F40, 0x11F40, prExtend}, // Mn KAWI VOWEL SIGN EU + {0x11F41, 0x11F41, prExtend}, // Mc KAWI SIGN KILLER + {0x11F42, 0x11F42, prExtend}, // Mn KAWI CONJOINER + {0x11F50, 0x11F59, prNumeric}, // Nd [10] KAWI DIGIT ZERO..KAWI DIGIT NINE {0x11FB0, 0x11FB0, prALetter}, // Lo LISU LETTER YHA {0x12000, 0x12399, prALetter}, // Lo [922] CUNEIFORM SIGN A..CUNEIFORM SIGN U U {0x12400, 0x1246E, prALetter}, // Nl [111] CUNEIFORM NUMERIC SIGN TWO ASH..CUNEIFORM NUMERIC SIGN NINE U VARIANT FORM {0x12480, 0x12543, prALetter}, // Lo [196] CUNEIFORM SIGN AB TIMES NUN TENU..CUNEIFORM SIGN ZU5 TIMES THREE DISH TENU {0x12F90, 0x12FF0, prALetter}, // Lo [97] CYPRO-MINOAN SIGN CM001..CYPRO-MINOAN SIGN CM114 - {0x13000, 0x1342E, prALetter}, // Lo [1071] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH AA032 - {0x13430, 0x13438, prFormat}, // Cf [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT + {0x13000, 0x1342F, prALetter}, // Lo [1072] EGYPTIAN HIEROGLYPH A001..EGYPTIAN HIEROGLYPH V011D + {0x13430, 0x1343F, prFormat}, // Cf [16] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END WALLED ENCLOSURE + {0x13440, 0x13440, prExtend}, // Mn EGYPTIAN HIEROGLYPH MIRROR HORIZONTALLY + {0x13441, 0x13446, prALetter}, // Lo [6] EGYPTIAN HIEROGLYPH FULL BLANK..EGYPTIAN HIEROGLYPH WIDE LOST SIGN + {0x13447, 0x13455, prExtend}, // Mn [15] EGYPTIAN HIEROGLYPH MODIFIER DAMAGED AT TOP START..EGYPTIAN HIEROGLYPH MODIFIER DAMAGED {0x14400, 0x14646, prALetter}, // Lo [583] ANATOLIAN HIEROGLYPH A001..ANATOLIAN HIEROGLYPH A530 {0x16800, 0x16A38, prALetter}, // Lo [569] BAMUM LETTER PHASE-A NGKUE MFON..BAMUM LETTER PHASE-F VUEQ {0x16A40, 0x16A5E, prALetter}, // Lo [31] MRO LETTER TA..MRO LETTER TEK @@ -1374,6 +1393,7 @@ var workBreakCodePoints = [][3]int{ {0x1AFFD, 0x1AFFE, prKatakana}, // Lm [2] KATAKANA LETTER MINNAN NASALIZED TONE-7..KATAKANA LETTER MINNAN NASALIZED TONE-8 {0x1B000, 0x1B000, prKatakana}, // Lo KATAKANA LETTER ARCHAIC E {0x1B120, 0x1B122, prKatakana}, // Lo [3] KATAKANA LETTER ARCHAIC YI..KATAKANA LETTER ARCHAIC WU + {0x1B155, 0x1B155, prKatakana}, // Lo KATAKANA LETTER SMALL KO {0x1B164, 0x1B167, prKatakana}, // Lo [4] KATAKANA LETTER SMALL WI..KATAKANA LETTER SMALL N {0x1BC00, 0x1BC6A, prALetter}, // Lo [107] DUPLOYAN LETTER H..DUPLOYAN LETTER VOCALIC M {0x1BC70, 0x1BC7C, prALetter}, // Lo [13] DUPLOYAN AFFIX LEFT HORIZONTAL SECANT..DUPLOYAN AFFIX ATTACHED TANGENT HOOK @@ -1431,11 +1451,14 @@ var workBreakCodePoints = [][3]int{ {0x1DF00, 0x1DF09, prALetter}, // L& [10] LATIN SMALL LETTER FENG DIGRAPH WITH TRILL..LATIN SMALL LETTER T WITH HOOK AND RETROFLEX HOOK {0x1DF0A, 0x1DF0A, prALetter}, // Lo LATIN LETTER RETROFLEX CLICK WITH RETROFLEX HOOK {0x1DF0B, 0x1DF1E, prALetter}, // L& [20] LATIN SMALL LETTER ESH WITH DOUBLE BAR..LATIN SMALL LETTER S WITH CURL + {0x1DF25, 0x1DF2A, prALetter}, // L& [6] LATIN SMALL LETTER D WITH MID-HEIGHT LEFT HOOK..LATIN SMALL LETTER T WITH MID-HEIGHT LEFT HOOK {0x1E000, 0x1E006, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE {0x1E008, 0x1E018, prExtend}, // Mn [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU {0x1E01B, 0x1E021, prExtend}, // Mn [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI {0x1E023, 0x1E024, prExtend}, // Mn [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS {0x1E026, 0x1E02A, prExtend}, // Mn [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA + {0x1E030, 0x1E06D, prALetter}, // Lm [62] MODIFIER LETTER CYRILLIC SMALL A..MODIFIER LETTER CYRILLIC SMALL STRAIGHT U WITH STROKE + {0x1E08F, 0x1E08F, prExtend}, // Mn COMBINING CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I {0x1E100, 0x1E12C, prALetter}, // Lo [45] NYIAKENG PUACHUE HMONG LETTER MA..NYIAKENG PUACHUE HMONG LETTER W {0x1E130, 0x1E136, prExtend}, // Mn [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D {0x1E137, 0x1E13D, prALetter}, // Lm [7] NYIAKENG PUACHUE HMONG SIGN FOR PERSON..NYIAKENG PUACHUE HMONG SYLLABLE LENGTHENER @@ -1446,6 +1469,10 @@ var workBreakCodePoints = [][3]int{ {0x1E2C0, 0x1E2EB, prALetter}, // Lo [44] WANCHO LETTER AA..WANCHO LETTER YIH {0x1E2EC, 0x1E2EF, prExtend}, // Mn [4] WANCHO TONE TUP..WANCHO TONE KOINI {0x1E2F0, 0x1E2F9, prNumeric}, // Nd [10] WANCHO DIGIT ZERO..WANCHO DIGIT NINE + {0x1E4D0, 0x1E4EA, prALetter}, // Lo [27] NAG MUNDARI LETTER O..NAG MUNDARI LETTER ELL + {0x1E4EB, 0x1E4EB, prALetter}, // Lm NAG MUNDARI SIGN OJOD + {0x1E4EC, 0x1E4EF, prExtend}, // Mn [4] NAG MUNDARI SIGN MUHOR..NAG MUNDARI SIGN SUTUH + {0x1E4F0, 0x1E4F9, prNumeric}, // Nd [10] NAG MUNDARI DIGIT ZERO..NAG MUNDARI DIGIT NINE {0x1E7E0, 0x1E7E6, prALetter}, // Lo [7] ETHIOPIC SYLLABLE HHYA..ETHIOPIC SYLLABLE HHYO {0x1E7E8, 0x1E7EB, prALetter}, // Lo [4] ETHIOPIC SYLLABLE GURAGE HHWA..ETHIOPIC SYLLABLE HHWE {0x1E7ED, 0x1E7EE, prALetter}, // Lo [2] ETHIOPIC SYLLABLE GURAGE MWI..ETHIOPIC SYLLABLE GURAGE MWEE @@ -1740,7 +1767,8 @@ var workBreakCodePoints = [][3]int{ {0x1F6D3, 0x1F6D4, prExtendedPictographic}, // E0.0 [2] (🛓..🛔) STUPA..PAGODA {0x1F6D5, 0x1F6D5, prExtendedPictographic}, // E12.0 [1] (🛕) hindu temple {0x1F6D6, 0x1F6D7, prExtendedPictographic}, // E13.0 [2] (🛖..🛗) hut..elevator - {0x1F6D8, 0x1F6DC, prExtendedPictographic}, // E0.0 [5] (🛘..🛜) .. + {0x1F6D8, 0x1F6DB, prExtendedPictographic}, // E0.0 [4] (🛘..🛛) .. + {0x1F6DC, 0x1F6DC, prExtendedPictographic}, // E15.0 [1] (🛜) wireless {0x1F6DD, 0x1F6DF, prExtendedPictographic}, // E14.0 [3] (🛝..🛟) playground slide..ring buoy {0x1F6E0, 0x1F6E5, prExtendedPictographic}, // E0.7 [6] (🛠️..🛥️) hammer and wrench..motor boat {0x1F6E6, 0x1F6E8, prExtendedPictographic}, // E0.0 [3] (🛦..🛨) UP-POINTING MILITARY AIRPLANE..UP-POINTING SMALL AIRPLANE @@ -1757,7 +1785,7 @@ var workBreakCodePoints = [][3]int{ {0x1F6FA, 0x1F6FA, prExtendedPictographic}, // E12.0 [1] (🛺) auto rickshaw {0x1F6FB, 0x1F6FC, prExtendedPictographic}, // E13.0 [2] (🛻..🛼) pickup truck..roller skate {0x1F6FD, 0x1F6FF, prExtendedPictographic}, // E0.0 [3] (🛽..🛿) .. - {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) .. + {0x1F774, 0x1F77F, prExtendedPictographic}, // E0.0 [12] (🝴..🝿) LOT OF FORTUNE..ORCUS {0x1F7D5, 0x1F7DF, prExtendedPictographic}, // E0.0 [11] (🟕..🟟) CIRCLED TRIANGLE.. {0x1F7E0, 0x1F7EB, prExtendedPictographic}, // E12.0 [12] (🟠..🟫) orange circle..brown square {0x1F7EC, 0x1F7EF, prExtendedPictographic}, // E0.0 [4] (🟬..🟯) .. @@ -1816,30 +1844,37 @@ var workBreakCodePoints = [][3]int{ {0x1FA00, 0x1FA6F, prExtendedPictographic}, // E0.0 [112] (🨀..🩯) NEUTRAL CHESS KING.. {0x1FA70, 0x1FA73, prExtendedPictographic}, // E12.0 [4] (🩰..🩳) ballet shoes..shorts {0x1FA74, 0x1FA74, prExtendedPictographic}, // E13.0 [1] (🩴) thong sandal - {0x1FA75, 0x1FA77, prExtendedPictographic}, // E0.0 [3] (🩵..🩷) .. + {0x1FA75, 0x1FA77, prExtendedPictographic}, // E15.0 [3] (🩵..🩷) light blue heart..pink heart {0x1FA78, 0x1FA7A, prExtendedPictographic}, // E12.0 [3] (🩸..🩺) drop of blood..stethoscope {0x1FA7B, 0x1FA7C, prExtendedPictographic}, // E14.0 [2] (🩻..🩼) x-ray..crutch {0x1FA7D, 0x1FA7F, prExtendedPictographic}, // E0.0 [3] (🩽..🩿) .. {0x1FA80, 0x1FA82, prExtendedPictographic}, // E12.0 [3] (🪀..🪂) yo-yo..parachute {0x1FA83, 0x1FA86, prExtendedPictographic}, // E13.0 [4] (🪃..🪆) boomerang..nesting dolls - {0x1FA87, 0x1FA8F, prExtendedPictographic}, // E0.0 [9] (🪇..🪏) .. + {0x1FA87, 0x1FA88, prExtendedPictographic}, // E15.0 [2] (🪇..🪈) maracas..flute + {0x1FA89, 0x1FA8F, prExtendedPictographic}, // E0.0 [7] (🪉..🪏) .. {0x1FA90, 0x1FA95, prExtendedPictographic}, // E12.0 [6] (🪐..🪕) ringed planet..banjo {0x1FA96, 0x1FAA8, prExtendedPictographic}, // E13.0 [19] (🪖..🪨) military helmet..rock {0x1FAA9, 0x1FAAC, prExtendedPictographic}, // E14.0 [4] (🪩..🪬) mirror ball..hamsa - {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E0.0 [3] (🪭..🪯) .. + {0x1FAAD, 0x1FAAF, prExtendedPictographic}, // E15.0 [3] (🪭..🪯) folding hand fan..khanda {0x1FAB0, 0x1FAB6, prExtendedPictographic}, // E13.0 [7] (🪰..🪶) fly..feather {0x1FAB7, 0x1FABA, prExtendedPictographic}, // E14.0 [4] (🪷..🪺) lotus..nest with eggs - {0x1FABB, 0x1FABF, prExtendedPictographic}, // E0.0 [5] (🪻..🪿) .. + {0x1FABB, 0x1FABD, prExtendedPictographic}, // E15.0 [3] (🪻..🪽) hyacinth..wing + {0x1FABE, 0x1FABE, prExtendedPictographic}, // E0.0 [1] (🪾) + {0x1FABF, 0x1FABF, prExtendedPictographic}, // E15.0 [1] (🪿) goose {0x1FAC0, 0x1FAC2, prExtendedPictographic}, // E13.0 [3] (🫀..🫂) anatomical heart..people hugging {0x1FAC3, 0x1FAC5, prExtendedPictographic}, // E14.0 [3] (🫃..🫅) pregnant man..person with crown - {0x1FAC6, 0x1FACF, prExtendedPictographic}, // E0.0 [10] (🫆..🫏) .. + {0x1FAC6, 0x1FACD, prExtendedPictographic}, // E0.0 [8] (🫆..🫍) .. + {0x1FACE, 0x1FACF, prExtendedPictographic}, // E15.0 [2] (🫎..🫏) moose..donkey {0x1FAD0, 0x1FAD6, prExtendedPictographic}, // E13.0 [7] (🫐..🫖) blueberries..teapot {0x1FAD7, 0x1FAD9, prExtendedPictographic}, // E14.0 [3] (🫗..🫙) pouring liquid..jar - {0x1FADA, 0x1FADF, prExtendedPictographic}, // E0.0 [6] (🫚..🫟) .. + {0x1FADA, 0x1FADB, prExtendedPictographic}, // E15.0 [2] (🫚..🫛) ginger root..pea pod + {0x1FADC, 0x1FADF, prExtendedPictographic}, // E0.0 [4] (🫜..🫟) .. {0x1FAE0, 0x1FAE7, prExtendedPictographic}, // E14.0 [8] (🫠..🫧) melting face..bubbles - {0x1FAE8, 0x1FAEF, prExtendedPictographic}, // E0.0 [8] (🫨..🫯) .. + {0x1FAE8, 0x1FAE8, prExtendedPictographic}, // E15.0 [1] (🫨) shaking face + {0x1FAE9, 0x1FAEF, prExtendedPictographic}, // E0.0 [7] (🫩..🫯) .. {0x1FAF0, 0x1FAF6, prExtendedPictographic}, // E14.0 [7] (🫰..🫶) hand with index finger and thumb crossed..heart hands - {0x1FAF7, 0x1FAFF, prExtendedPictographic}, // E0.0 [9] (🫷..🫿) .. + {0x1FAF7, 0x1FAF8, prExtendedPictographic}, // E15.0 [2] (🫷..🫸) leftwards pushing hand..rightwards pushing hand + {0x1FAF9, 0x1FAFF, prExtendedPictographic}, // E0.0 [7] (🫹..🫿) .. {0x1FBF0, 0x1FBF9, prNumeric}, // Nd [10] SEGMENTED DIGIT ZERO..SEGMENTED DIGIT NINE {0x1FC00, 0x1FFFD, prExtendedPictographic}, // E0.0[1022] (🰀..🿽) .. {0xE0001, 0xE0001, prFormat}, // Cf LANGUAGE TAG diff --git a/vendor/github.com/rivo/uniseg/wordrules.go b/vendor/github.com/rivo/uniseg/wordrules.go index 325407e40..57a8c6831 100644 --- a/vendor/github.com/rivo/uniseg/wordrules.go +++ b/vendor/github.com/rivo/uniseg/wordrules.go @@ -22,82 +22,121 @@ const ( wbZWJBit = 16 // This bit is set for any states followed by at least one zero-width joiner (see WB4 and WB3c). ) -// The word break parser's breaking instructions. -const ( - wbDontBreak = iota - wbBreak -) - -// The word break parser's state transitions. It's anologous to grTransitions, -// see comments there for details. Unicode version 14.0.0. -var wbTransitions = map[[2]int][3]int{ +// wbTransitions implements the word break parser's state transitions. It's +// anologous to [grTransitions], see comments there for details. +// +// Unicode version 15.0.0. +func wbTransitions(state, prop int) (newState int, wordBreak bool, rule int) { + switch uint64(state) | uint64(prop)<<32 { // WB3b. - {wbAny, prNewline}: {wbNewline, wbBreak, 32}, - {wbAny, prCR}: {wbCR, wbBreak, 32}, - {wbAny, prLF}: {wbLF, wbBreak, 32}, + case wbAny | prNewline<<32: + return wbNewline, true, 32 + case wbAny | prCR<<32: + return wbCR, true, 32 + case wbAny | prLF<<32: + return wbLF, true, 32 // WB3a. - {wbNewline, prAny}: {wbAny, wbBreak, 31}, - {wbCR, prAny}: {wbAny, wbBreak, 31}, - {wbLF, prAny}: {wbAny, wbBreak, 31}, + case wbNewline | prAny<<32: + return wbAny, true, 31 + case wbCR | prAny<<32: + return wbAny, true, 31 + case wbLF | prAny<<32: + return wbAny, true, 31 // WB3. - {wbCR, prLF}: {wbLF, wbDontBreak, 30}, + case wbCR | prLF<<32: + return wbLF, false, 30 // WB3d. - {wbAny, prWSegSpace}: {wbWSegSpace, wbBreak, 9990}, - {wbWSegSpace, prWSegSpace}: {wbWSegSpace, wbDontBreak, 34}, + case wbAny | prWSegSpace<<32: + return wbWSegSpace, true, 9990 + case wbWSegSpace | prWSegSpace<<32: + return wbWSegSpace, false, 34 // WB5. - {wbAny, prALetter}: {wbALetter, wbBreak, 9990}, - {wbAny, prHebrewLetter}: {wbHebrewLetter, wbBreak, 9990}, - {wbALetter, prALetter}: {wbALetter, wbDontBreak, 50}, - {wbALetter, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 50}, - {wbHebrewLetter, prALetter}: {wbALetter, wbDontBreak, 50}, - {wbHebrewLetter, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 50}, + case wbAny | prALetter<<32: + return wbALetter, true, 9990 + case wbAny | prHebrewLetter<<32: + return wbHebrewLetter, true, 9990 + case wbALetter | prALetter<<32: + return wbALetter, false, 50 + case wbALetter | prHebrewLetter<<32: + return wbHebrewLetter, false, 50 + case wbHebrewLetter | prALetter<<32: + return wbALetter, false, 50 + case wbHebrewLetter | prHebrewLetter<<32: + return wbHebrewLetter, false, 50 // WB7. Transitions to wbWB7 handled by transitionWordBreakState(). - {wbWB7, prALetter}: {wbALetter, wbDontBreak, 70}, - {wbWB7, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 70}, + case wbWB7 | prALetter<<32: + return wbALetter, false, 70 + case wbWB7 | prHebrewLetter<<32: + return wbHebrewLetter, false, 70 // WB7a. - {wbHebrewLetter, prSingleQuote}: {wbAny, wbDontBreak, 71}, + case wbHebrewLetter | prSingleQuote<<32: + return wbAny, false, 71 // WB7c. Transitions to wbWB7c handled by transitionWordBreakState(). - {wbWB7c, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 73}, + case wbWB7c | prHebrewLetter<<32: + return wbHebrewLetter, false, 73 // WB8. - {wbAny, prNumeric}: {wbNumeric, wbBreak, 9990}, - {wbNumeric, prNumeric}: {wbNumeric, wbDontBreak, 80}, + case wbAny | prNumeric<<32: + return wbNumeric, true, 9990 + case wbNumeric | prNumeric<<32: + return wbNumeric, false, 80 // WB9. - {wbALetter, prNumeric}: {wbNumeric, wbDontBreak, 90}, - {wbHebrewLetter, prNumeric}: {wbNumeric, wbDontBreak, 90}, + case wbALetter | prNumeric<<32: + return wbNumeric, false, 90 + case wbHebrewLetter | prNumeric<<32: + return wbNumeric, false, 90 // WB10. - {wbNumeric, prALetter}: {wbALetter, wbDontBreak, 100}, - {wbNumeric, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 100}, + case wbNumeric | prALetter<<32: + return wbALetter, false, 100 + case wbNumeric | prHebrewLetter<<32: + return wbHebrewLetter, false, 100 // WB11. Transitions to wbWB11 handled by transitionWordBreakState(). - {wbWB11, prNumeric}: {wbNumeric, wbDontBreak, 110}, + case wbWB11 | prNumeric<<32: + return wbNumeric, false, 110 // WB13. - {wbAny, prKatakana}: {wbKatakana, wbBreak, 9990}, - {wbKatakana, prKatakana}: {wbKatakana, wbDontBreak, 130}, + case wbAny | prKatakana<<32: + return wbKatakana, true, 9990 + case wbKatakana | prKatakana<<32: + return wbKatakana, false, 130 // WB13a. - {wbAny, prExtendNumLet}: {wbExtendNumLet, wbBreak, 9990}, - {wbALetter, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbHebrewLetter, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbNumeric, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbKatakana, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, - {wbExtendNumLet, prExtendNumLet}: {wbExtendNumLet, wbDontBreak, 131}, + case wbAny | prExtendNumLet<<32: + return wbExtendNumLet, true, 9990 + case wbALetter | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbHebrewLetter | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbNumeric | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbKatakana | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 + case wbExtendNumLet | prExtendNumLet<<32: + return wbExtendNumLet, false, 131 // WB13b. - {wbExtendNumLet, prALetter}: {wbALetter, wbDontBreak, 132}, - {wbExtendNumLet, prHebrewLetter}: {wbHebrewLetter, wbDontBreak, 132}, - {wbExtendNumLet, prNumeric}: {wbNumeric, wbDontBreak, 132}, - {wbExtendNumLet, prKatakana}: {prKatakana, wbDontBreak, 132}, + case wbExtendNumLet | prALetter<<32: + return wbALetter, false, 132 + case wbExtendNumLet | prHebrewLetter<<32: + return wbHebrewLetter, false, 132 + case wbExtendNumLet | prNumeric<<32: + return wbNumeric, false, 132 + case wbExtendNumLet | prKatakana<<32: + return wbKatakana, false, 132 + + default: + return -1, false, -1 + } } // transitionWordBreakState determines the new state of the word break parser @@ -141,30 +180,27 @@ func transitionWordBreakState(state int, r rune, b []byte, str string) (newState // Find the applicable transition in the table. var rule int - transition, ok := wbTransitions[[2]int{state, nextProperty}] - if ok { - // We have a specific transition. We'll use it. - newState, wordBreak, rule = transition[0], transition[1] == wbBreak, transition[2] - } else { + newState, wordBreak, rule = wbTransitions(state, nextProperty) + if newState < 0 { // No specific transition found. Try the less specific ones. - transAnyProp, okAnyProp := wbTransitions[[2]int{state, prAny}] - transAnyState, okAnyState := wbTransitions[[2]int{wbAny, nextProperty}] - if okAnyProp && okAnyState { + anyPropState, anyPropWordBreak, anyPropRule := wbTransitions(state, prAny) + anyStateState, anyStateWordBreak, anyStateRule := wbTransitions(wbAny, nextProperty) + if anyPropState >= 0 && anyStateState >= 0 { // Both apply. We'll use a mix (see comments for grTransitions). - newState, wordBreak, rule = transAnyState[0], transAnyState[1] == wbBreak, transAnyState[2] - if transAnyProp[2] < transAnyState[2] { - wordBreak, rule = transAnyProp[1] == wbBreak, transAnyProp[2] + newState, wordBreak, rule = anyStateState, anyStateWordBreak, anyStateRule + if anyPropRule < anyStateRule { + wordBreak, rule = anyPropWordBreak, anyPropRule } - } else if okAnyProp { + } else if anyPropState >= 0 { // We only have a specific state. - newState, wordBreak, rule = transAnyProp[0], transAnyProp[1] == wbBreak, transAnyProp[2] + newState, wordBreak, rule = anyPropState, anyPropWordBreak, anyPropRule // This branch will probably never be reached because okAnyState will // always be true given the current transition map. But we keep it here // for future modifications to the transition map where this may not be // true anymore. - } else if okAnyState { + } else if anyStateState >= 0 { // We only have a specific property. - newState, wordBreak, rule = transAnyState[0], transAnyState[1] == wbBreak, transAnyState[2] + newState, wordBreak, rule = anyStateState, anyStateWordBreak, anyStateRule } else { // No known transition. WB999: Any ÷ Any. newState, wordBreak, rule = wbAny, true, 9990 diff --git a/vendor/go.opentelemetry.io/collector/featuregate/LICENSE b/vendor/go.opentelemetry.io/collector/featuregate/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/collector/featuregate/Makefile b/vendor/go.opentelemetry.io/collector/featuregate/Makefile new file mode 100644 index 000000000..39734bfae --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/Makefile @@ -0,0 +1 @@ +include ../Makefile.Common diff --git a/vendor/go.opentelemetry.io/collector/featuregate/README.md b/vendor/go.opentelemetry.io/collector/featuregate/README.md new file mode 100644 index 000000000..d3e3c802d --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/README.md @@ -0,0 +1,77 @@ +# Collector Feature Gates + +This package provides a mechanism that allows operators to enable and disable +experimental or transitional features at deployment time. These flags should +be able to govern the behavior of the application starting as early as possible +and should be available to every component such that decisions may be made +based on flags at the component level. + +## Usage + +Feature gates must be defined and registered with the global registry in +an `init()` function. This makes the `Gate` available to be configured and +queried with the defined [`Stage`](#feature-lifecycle) default value. +A `Gate` can have a list of associated issues that allow users to refer to +the issue and report any additional problems or understand the context of the `Gate`. +Once a `Gate` has been marked as `Stable`, it must have a `RemovalVersion` set. + +```go +var myFeatureGate = featuregate.GlobalRegistry().MustRegister( + "namespaced.uniqueIdentifier", + featuregate.Stable, + featuregate.WithRegisterFromVersion("v0.65.0") + featuregate.WithRegisterDescription("A brief description of what the gate controls"), + featuregate.WithRegisterReferenceURL("https://github.com/open-telemetry/opentelemetry-collector/issues/6167"), + featuregate.WithRegisterToVersion("v0.70.0")) +``` + +The status of the gate may later be checked by interrogating the global +feature gate registry: + +```go +if myFeatureGate.IsEnabled() { + setupNewFeature() +} +``` + +Note that querying the registry takes a read lock and accesses a map, so it +should be done once and the result cached for local use if repeated checks +are required. Avoid querying the registry in a loop. + +## Controlling Gates + +Feature gates can be enabled or disabled via the CLI, with the +`--feature-gates` flag. When using the CLI flag, gate +identifiers must be presented as a comma-delimited list. Gate identifiers +prefixed with `-` will disable the gate and prefixing with `+` or with no +prefix will enable the gate. + +```shell +otelcol --config=config.yaml --feature-gates=gate1,-gate2,+gate3 +``` + +This will enable `gate1` and `gate3` and disable `gate2`. + +## Feature Lifecycle + +Features controlled by a `Gate` should follow a three-stage lifecycle, +modeled after the [system used by Kubernetes](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/#feature-stages): + +1. An `alpha` stage where the feature is disabled by default and must be enabled + through a `Gate`. +2. A `beta` stage where the feature has been well tested and is enabled by + default but can be disabled through a `Gate`. +3. A generally available or `stable` stage where the feature is permanently enabled. At this stage + the gate should no longer be explicitly used. Disabling the gate will produce an error and + explicitly enabling will produce a warning log. +4. A `stable` feature gate will be removed in the version specified by its `ToVersion` value. + +Features that prove unworkable in the `alpha` stage may be discontinued +without proceeding to the `beta` stage. Instead, they will proceed to the +`deprecated` stage, which will feature is permanently disabled. A feature gate will +be removed once it has been `deprecated` for at least 2 releases of the collector. + +Features that make it to the `beta` stage are intended to reach general availability but may still be discontinued. +If, after wider use, it is determined that the gate should be discontinued it will be reverted to the `alpha` stage +for 2 releases and then proceed to the `deprecated` stage. If instead it is ready for general availability it will +proceed to the `stable` stage. diff --git a/vendor/go.opentelemetry.io/collector/featuregate/flag.go b/vendor/go.opentelemetry.io/collector/featuregate/flag.go new file mode 100644 index 000000000..950129681 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/flag.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +import ( + "flag" + "strings" + + "go.uber.org/multierr" +) + +const ( + featureGatesFlag = "feature-gates" + featureGatesFlagDescription = "Comma-delimited list of feature gate identifiers. Prefix with '-' to disable the feature. '+' or no prefix will enable the feature." +) + +// RegisterFlagsOption is an option for RegisterFlags. +type RegisterFlagsOption interface { + private() +} + +// RegisterFlags that directly applies feature gate statuses to a Registry. +func (r *Registry) RegisterFlags(flagSet *flag.FlagSet, _ ...RegisterFlagsOption) { + flagSet.Var(&flagValue{reg: r}, featureGatesFlag, featureGatesFlagDescription) +} + +// flagValue implements the flag.Value interface and directly applies feature gate statuses to a Registry. +type flagValue struct { + reg *Registry +} + +func (f *flagValue) String() string { + var ids []string + f.reg.VisitAll(func(g *Gate) { + id := g.ID() + if !g.IsEnabled() { + id = "-" + id + } + ids = append(ids, id) + }) + return strings.Join(ids, ",") +} + +func (f *flagValue) Set(s string) error { + if s == "" { + return nil + } + + var errs error + ids := strings.Split(s, ",") + for i := range ids { + id := ids[i] + val := true + switch id[0] { + case '-': + id = id[1:] + val = false + case '+': + id = id[1:] + } + errs = multierr.Append(errs, f.reg.Set(id, val)) + } + return errs +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/gate.go b/vendor/go.opentelemetry.io/collector/featuregate/gate.go new file mode 100644 index 000000000..a250ceb9a --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/gate.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +import ( + "fmt" + "sync/atomic" + + "github.com/hashicorp/go-version" +) + +// Gate is an immutable object that is owned by the Registry and represents an individual feature that +// may be enabled or disabled based on the lifecycle state of the feature and CLI flags specified by the user. +type Gate struct { + id string + description string + referenceURL string + fromVersion *version.Version + toVersion *version.Version + stage Stage + enabled *atomic.Bool +} + +// ID returns the id of the Gate. +func (g *Gate) ID() string { + return g.id +} + +// IsEnabled returns true if the feature described by the Gate is enabled. +func (g *Gate) IsEnabled() bool { + return g.enabled.Load() +} + +// Description returns the description for the Gate. +func (g *Gate) Description() string { + return g.description +} + +// Stage returns the Gate's lifecycle stage. +func (g *Gate) Stage() Stage { + return g.stage +} + +// ReferenceURL returns the URL to the contextual information about the Gate. +func (g *Gate) ReferenceURL() string { + return g.referenceURL +} + +// FromVersion returns the version information when the Gate's was added. +func (g *Gate) FromVersion() string { + return fmt.Sprintf("v%s", g.fromVersion) +} + +// ToVersion returns the version information when Gate's in StageStable. +func (g *Gate) ToVersion() string { + return fmt.Sprintf("v%s", g.toVersion) +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/registry.go b/vendor/go.opentelemetry.io/collector/featuregate/registry.go new file mode 100644 index 000000000..b43a33e44 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/registry.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +import ( + "fmt" + "net/url" + "regexp" + "sort" + "sync" + "sync/atomic" + + "github.com/hashicorp/go-version" +) + +var ( + globalRegistry = NewRegistry() + + // idRegexp is used to validate the ID of a Gate. + // IDs' characters must be alphanumeric or dots. + idRegexp = regexp.MustCompile(`^[0-9a-zA-Z\.]*$`) +) + +// GlobalRegistry returns the global Registry. +func GlobalRegistry() *Registry { + return globalRegistry +} + +type Registry struct { + gates sync.Map +} + +// NewRegistry returns a new empty Registry. +func NewRegistry() *Registry { + return &Registry{} +} + +// RegisterOption allows to configure additional information about a Gate during registration. +type RegisterOption interface { + apply(g *Gate) error +} + +type registerOptionFunc func(g *Gate) error + +func (ro registerOptionFunc) apply(g *Gate) error { + return ro(g) +} + +// WithRegisterDescription adds description for the Gate. +func WithRegisterDescription(description string) RegisterOption { + return registerOptionFunc(func(g *Gate) error { + g.description = description + return nil + }) +} + +// WithRegisterReferenceURL adds a URL that has all the contextual information about the Gate. +// referenceURL must be a valid URL as defined by `net/url.Parse`. +func WithRegisterReferenceURL(referenceURL string) RegisterOption { + return registerOptionFunc(func(g *Gate) error { + if _, err := url.Parse(referenceURL); err != nil { + return fmt.Errorf("WithRegisterReferenceURL: invalid reference URL %q: %w", referenceURL, err) + } + + g.referenceURL = referenceURL + return nil + }) +} + +// WithRegisterFromVersion is used to set the Gate "FromVersion". +// The "FromVersion" contains the Collector release when a feature is introduced. +// fromVersion must be a valid version string: it may start with 'v' and must be in the format Major.Minor.Patch[-PreRelease]. +// PreRelease is optional and may have dashes, tildes and ASCII alphanumeric characters. +func WithRegisterFromVersion(fromVersion string) RegisterOption { + return registerOptionFunc(func(g *Gate) error { + from, err := version.NewVersion(fromVersion) + if err != nil { + return fmt.Errorf("WithRegisterFromVersion: invalid version %q: %w", fromVersion, err) + } + + g.fromVersion = from + return nil + }) +} + +// WithRegisterToVersion is used to set the Gate "ToVersion". +// The "ToVersion", if not empty, contains the last Collector release in which you can still use a feature gate. +// If the feature stage is either "Deprecated" or "Stable", the "ToVersion" is the Collector release when the feature is removed. +// toVersion must be a valid version string: it may start with 'v' and must be in the format Major.Minor.Patch[-PreRelease]. +// PreRelease is optional and may have dashes, tildes and ASCII alphanumeric characters. +func WithRegisterToVersion(toVersion string) RegisterOption { + return registerOptionFunc(func(g *Gate) error { + to, err := version.NewVersion(toVersion) + if err != nil { + return fmt.Errorf("WithRegisterToVersion: invalid version %q: %w", toVersion, err) + } + + g.toVersion = to + return nil + }) +} + +// MustRegister like Register but panics if an invalid ID or gate options are provided. +func (r *Registry) MustRegister(id string, stage Stage, opts ...RegisterOption) *Gate { + g, err := r.Register(id, stage, opts...) + if err != nil { + panic(err) + } + return g +} + +func validateID(id string) error { + if id == "" { + return fmt.Errorf("empty ID") + } + + if !idRegexp.MatchString(id) { + return fmt.Errorf("invalid character(s) in ID") + } + return nil +} + +// Register a Gate and return it. The returned Gate can be used to check if is enabled or not. +// id must be an ASCII alphanumeric nonempty string. Dots are allowed for namespacing. +func (r *Registry) Register(id string, stage Stage, opts ...RegisterOption) (*Gate, error) { + if err := validateID(id); err != nil { + return nil, fmt.Errorf("invalid ID %q: %w", id, err) + } + + g := &Gate{ + id: id, + stage: stage, + } + for _, opt := range opts { + err := opt.apply(g) + if err != nil { + return nil, fmt.Errorf("failed to apply option: %w", err) + } + } + switch g.stage { + case StageAlpha, StageDeprecated: + g.enabled = &atomic.Bool{} + case StageBeta, StageStable: + enabled := &atomic.Bool{} + enabled.Store(true) + g.enabled = enabled + default: + return nil, fmt.Errorf("unknown stage value %q for gate %q", stage, id) + } + if (g.stage == StageStable || g.stage == StageDeprecated) && g.toVersion == nil { + return nil, fmt.Errorf("no removal version set for %v gate %q", g.stage.String(), id) + } + + if g.fromVersion != nil && g.toVersion != nil && g.toVersion.LessThan(g.fromVersion) { + return nil, fmt.Errorf("toVersion %q is before fromVersion %q", g.toVersion, g.fromVersion) + } + + if _, loaded := r.gates.LoadOrStore(id, g); loaded { + return nil, fmt.Errorf("attempted to add pre-existing gate %q", id) + } + return g, nil +} + +// Set the enabled valued for a Gate identified by the given id. +func (r *Registry) Set(id string, enabled bool) error { + v, ok := r.gates.Load(id) + if !ok { + validGates := []string{} + r.VisitAll(func(g *Gate) { + validGates = append(validGates, g.ID()) + }) + return fmt.Errorf("no such feature gate %q. valid gates: %v", id, validGates) + } + g := v.(*Gate) + + switch g.stage { + case StageStable: + if !enabled { + return fmt.Errorf("feature gate %q is stable, can not be disabled", id) + } + fmt.Printf("Feature gate %q is stable and already enabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion) + case StageDeprecated: + if enabled { + return fmt.Errorf("feature gate %q is deprecated, can not be enabled", id) + } + fmt.Printf("Feature gate %q is deprecated and already disabled. It will be removed in version %v and continued use of the gate after version %v will result in an error.\n", id, g.toVersion, g.toVersion) + default: + g.enabled.Store(enabled) + } + return nil +} + +// VisitAll visits all the gates in lexicographical order, calling fn for each. +func (r *Registry) VisitAll(fn func(*Gate)) { + var gates []*Gate + r.gates.Range(func(key, value any) bool { + gates = append(gates, value.(*Gate)) + return true + }) + sort.Slice(gates, func(i, j int) bool { + return gates[i].ID() < gates[j].ID() + }) + for i := range gates { + fn(gates[i]) + } +} diff --git a/vendor/go.opentelemetry.io/collector/featuregate/stage.go b/vendor/go.opentelemetry.io/collector/featuregate/stage.go new file mode 100644 index 000000000..f2be1b248 --- /dev/null +++ b/vendor/go.opentelemetry.io/collector/featuregate/stage.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package featuregate // import "go.opentelemetry.io/collector/featuregate" + +// Stage represents the Gate's lifecycle and what is the expected state of it. +type Stage int8 + +const ( + // StageAlpha is used when creating a new feature and the Gate must be explicitly enabled + // by the operator. + // + // The Gate will be disabled by default. + StageAlpha Stage = iota + // StageBeta is used when the feature gate is well tested and is enabled by default, + // but can be disabled by a Gate. + // + // The Gate will be enabled by default. + StageBeta + // StageStable is used when feature is permanently enabled and can not be disabled by a Gate. + // This value is used to provide feedback to the user that the gate will be removed in the next versions. + // + // The Gate will be enabled by default and will return an error if disabled. + StageStable + // StageDeprecated is used when feature is permanently disabled and can not be enabled by a Gate. + // This value is used to provide feedback to the user that the gate will be removed in the next versions. + // + // The Gate will be disabled by default and will return an error if modified. + StageDeprecated +) + +func (s Stage) String() string { + switch s { + case StageAlpha: + return "Alpha" + case StageBeta: + return "Beta" + case StageStable: + return "Stable" + case StageDeprecated: + return "Deprecated" + } + return "Unknown" +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index e41e6df61..bbe5d6585 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -43,7 +43,7 @@ type serverHandler struct { *config } -// NewServerHandler creates a stats.Handler for gRPC server. +// NewServerHandler creates a stats.Handler for a gRPC server. func NewServerHandler(opts ...Option) stats.Handler { h := &serverHandler{ config: newConfig(opts, "server"), @@ -54,9 +54,6 @@ func NewServerHandler(opts ...Option) stats.Handler { // TagConn can attach some information to the given context. func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { - span := trace.SpanFromContext(ctx) - attrs := peerAttr(peerFromCtx(ctx)) - span.SetAttributes(attrs...) return ctx } @@ -85,14 +82,15 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont // HandleRPC processes the RPC stats. func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - h.handleRPC(ctx, rs) + isServer := true + h.handleRPC(ctx, rs, isServer) } type clientHandler struct { *config } -// NewClientHandler creates a stats.Handler for gRPC client. +// NewClientHandler creates a stats.Handler for a gRPC client. func NewClientHandler(opts ...Option) stats.Handler { h := &clientHandler{ config: newConfig(opts, "client"), @@ -121,14 +119,12 @@ func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont // HandleRPC processes the RPC stats. func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) { - h.handleRPC(ctx, rs) + isServer := false + h.handleRPC(ctx, rs, isServer) } // TagConn can attach some information to the given context. -func (h *clientHandler) TagConn(ctx context.Context, cti *stats.ConnTagInfo) context.Context { - span := trace.SpanFromContext(ctx) - attrs := peerAttr(cti.RemoteAddr.String()) - span.SetAttributes(attrs...) +func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context { return ctx } @@ -137,20 +133,23 @@ func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) { // no-op } -func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { +func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool) { // nolint: revive // isServer is not a control flag. span := trace.SpanFromContext(ctx) - gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) + var metricAttrs []attribute.KeyValue var messageId int64 - metricAttrs := make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) - metricAttrs = append(metricAttrs, gctx.metricAttrs...) - wctx := withoutCancel(ctx) + + gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext) + if gctx != nil { + metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1) + metricAttrs = append(metricAttrs, gctx.metricAttrs...) + } switch rs := rs.(type) { case *stats.Begin: case *stats.InPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) } if c.ReceivedEvent { @@ -166,7 +165,7 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { case *stats.OutPayload: if gctx != nil { messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(wctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) + c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributes(metricAttrs...)) } if c.SentEvent { @@ -185,7 +184,12 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { if rs.Error != nil { s, _ := status.FromError(rs.Error) - span.SetStatus(codes.Error, s.Message()) + if isServer { + statusCode, msg := serverStatus(s) + span.SetStatus(statusCode, msg) + } else { + span.SetStatus(codes.Error, s.Message()) + } rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code())) } else { rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK)) @@ -198,41 +202,12 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats) { // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond) - c.rpcDuration.Record(wctx, elapsedTime, metric.WithAttributes(metricAttrs...)) - c.rpcRequestsPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) - c.rpcResponsesPerRPC.Record(wctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + c.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributes(metricAttrs...)) + if gctx != nil { + c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), metric.WithAttributes(metricAttrs...)) + c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), metric.WithAttributes(metricAttrs...)) + } default: return } } - -func withoutCancel(parent context.Context) context.Context { - if parent == nil { - panic("cannot create context from nil parent") - } - return withoutCancelCtx{parent} -} - -type withoutCancelCtx struct { - c context.Context -} - -func (withoutCancelCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (withoutCancelCtx) Done() <-chan struct{} { - return nil -} - -func (withoutCancelCtx) Err() error { - return nil -} - -func (w withoutCancelCtx) Value(key any) any { - return w.c.Value(key) -} - -func (w withoutCancelCtx) String() string { - return "withoutCancel" -} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index f47c8a675..ae1f8ea5e 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -16,7 +16,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.46.1" + return "0.47.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index 9a8260059..af84f0e4b 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -26,7 +26,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/metric" "go.opentelemetry.io/otel/propagation" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" "go.opentelemetry.io/otel/trace" ) @@ -43,10 +43,12 @@ type middleware struct { writeEvent bool filters []Filter spanNameFormatter func(string, *http.Request) string - counters map[string]metric.Int64Counter - valueRecorders map[string]metric.Float64Histogram publicEndpoint bool publicEndpointFn func(*http.Request) bool + + requestBytesCounter metric.Int64Counter + responseBytesCounter metric.Int64Counter + serverLatencyMeasure metric.Float64Histogram } func defaultHandlerFormatter(operation string, _ *http.Request) string { @@ -104,33 +106,27 @@ func handleErr(err error) { } func (h *middleware) createMeasures() { - h.counters = make(map[string]metric.Int64Counter) - h.valueRecorders = make(map[string]metric.Float64Histogram) - - requestBytesCounter, err := h.meter.Int64Counter( + var err error + h.requestBytesCounter, err = h.meter.Int64Counter( RequestContentLength, metric.WithUnit("By"), metric.WithDescription("Measures the size of HTTP request content length (uncompressed)"), ) handleErr(err) - responseBytesCounter, err := h.meter.Int64Counter( + h.responseBytesCounter, err = h.meter.Int64Counter( ResponseContentLength, metric.WithUnit("By"), metric.WithDescription("Measures the size of HTTP response content length (uncompressed)"), ) handleErr(err) - serverLatencyMeasure, err := h.meter.Float64Histogram( + h.serverLatencyMeasure, err = h.meter.Float64Histogram( ServerLatency, metric.WithUnit("ms"), metric.WithDescription("Measures the duration of HTTP request handling"), ) handleErr(err) - - h.counters[RequestContentLength] = requestBytesCounter - h.counters[ResponseContentLength] = responseBytesCounter - h.valueRecorders[ServerLatency] = serverLatencyMeasure } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -236,13 +232,13 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode)) } o := metric.WithAttributes(attributes...) - h.counters[RequestContentLength].Add(ctx, bw.read, o) - h.counters[ResponseContentLength].Add(ctx, rww.written, o) + h.requestBytesCounter.Add(ctx, bw.read, o) + h.responseBytesCounter.Add(ctx, rww.written, o) // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) - h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, o) + h.serverLatencyMeasure.Record(ctx, elapsedTime, o) } func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go index d3dede9eb..794d4c26a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/httpconv.go @@ -24,7 +24,7 @@ import ( "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) // HTTPClientResponse returns trace attributes for an HTTP response received by a @@ -149,9 +149,7 @@ func HTTPResponseHeader(h http.Header) []attribute.KeyValue { type httpConv struct { NetConv *netConv - EnduserIDKey attribute.Key HTTPClientIPKey attribute.Key - HTTPFlavorKey attribute.Key HTTPMethodKey attribute.Key HTTPRequestContentLengthKey attribute.Key HTTPResponseContentLengthKey attribute.Key @@ -161,15 +159,13 @@ type httpConv struct { HTTPStatusCodeKey attribute.Key HTTPTargetKey attribute.Key HTTPURLKey attribute.Key - HTTPUserAgentKey attribute.Key + UserAgentOriginalKey attribute.Key } var hc = &httpConv{ NetConv: nc, - EnduserIDKey: semconv.EnduserIDKey, HTTPClientIPKey: semconv.HTTPClientIPKey, - HTTPFlavorKey: semconv.HTTPFlavorKey, HTTPMethodKey: semconv.HTTPMethodKey, HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, @@ -179,7 +175,7 @@ var hc = &httpConv{ HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, HTTPTargetKey: semconv.HTTPTargetKey, HTTPURLKey: semconv.HTTPURLKey, - HTTPUserAgentKey: semconv.HTTPUserAgentKey, + UserAgentOriginalKey: semconv.UserAgentOriginalKey, } // ClientResponse returns attributes for an HTTP response received by a client @@ -193,6 +189,10 @@ var hc = &httpConv{ // // append(ClientResponse(resp), ClientRequest(resp.Request)...) func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.status_code int + http.response_content_length int + */ var n int if resp.StatusCode > 0 { n++ @@ -212,11 +212,31 @@ func (c *httpConv) ClientResponse(resp *http.Response) []attribute.KeyValue { } // ClientRequest returns attributes for an HTTP request made by a client. The -// following attributes are always returned: "http.url", "http.flavor", -// "http.method", "net.peer.name". The following attributes are returned if the -// related values are defined in req: "net.peer.port", "http.user_agent", -// "http.request_content_length", "enduser.id". +// following attributes are always returned: "http.url", "http.method", +// "net.peer.name". The following attributes are returned if the related values +// are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "user_agent.original". func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { + /* The following semantic conventions are returned if present: + http.method string + user_agent.original string + http.url string + net.peer.name string + net.peer.port int + http.request_content_length int + */ + + /* The following semantic conventions are not returned: + http.status_code This requires the response. See ClientResponse. + http.response_content_length This requires the response. See ClientResponse. + net.sock.family This requires the socket used. + net.sock.peer.addr This requires the socket used. + net.sock.peer.name This requires the socket used. + net.sock.peer.port This requires the socket used. + http.resend_count This is something outside of a single request. + net.protocol.name The value is the Request is ignored, and the go client will always use "http". + net.protocol.version The value in the Request is ignored, and the go client will always use 1.1 or 2.0. + */ n := 3 // URL, peer name, proto, and method. var h string if req.URL != nil { @@ -234,14 +254,10 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { if req.ContentLength > 0 { n++ } - userID, _, hasUserID := req.BasicAuth() - if hasUserID { - n++ - } + attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) - attrs = append(attrs, c.flavor(req.Proto)) var u string if req.URL != nil { @@ -260,17 +276,13 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { } if useragent != "" { - attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if l := req.ContentLength; l > 0 { attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) } - if hasUserID { - attrs = append(attrs, c.EnduserIDKey.String(userID)) - } - return attrs } @@ -291,18 +303,35 @@ func (c *httpConv) ClientRequest(req *http.Request) []attribute.KeyValue { // The req Host will be used to determine the server instead. // // The following attributes are always returned: "http.method", "http.scheme", -// "http.flavor", "http.target", "net.host.name". The following attributes are -// returned if they related values are defined in req: "net.host.port", -// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", -// "http.client_ip". +// "http.target", "net.host.name". The following attributes are returned if they +// related values are defined in req: "net.host.port", "net.sock.peer.addr", +// "net.sock.peer.port", "user_agent.original", "http.client_ip", +// "net.protocol.name", "net.protocol.version". func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { - // TODO: This currently does not add the specification required - // `http.target` attribute. It has too high of a cardinality to safely be - // added. An alternate should be added, or this comment removed, when it is - // addressed by the specification. If it is ultimately decided to continue - // not including the attribute, the HTTPTargetKey field of the httpConv - // should be removed as well. + /* The following semantic conventions are returned if present: + http.method string + http.scheme string + net.host.name string + net.host.port int + net.sock.peer.addr string + net.sock.peer.port int + user_agent.original string + http.client_ip string + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + http.target string Note: doesn't include the query parameter. + */ + /* The following semantic conventions are not returned: + http.status_code This requires the response. + http.request_content_length This requires the len() of body, which can mutate it. + http.response_content_length This requires the response. + http.route This is not available. + net.sock.peer.name This would require a DNS lookup. + net.sock.host.addr The request doesn't have access to the underlying socket. + net.sock.host.port The request doesn't have access to the underlying socket. + + */ n := 4 // Method, scheme, proto, and host name. var host string var p int @@ -330,19 +359,31 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K if useragent != "" { n++ } - userID, _, hasUserID := req.BasicAuth() - if hasUserID { - n++ - } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) if clientIP != "" { n++ } + + var target string + if req.URL != nil { + target = req.URL.Path + if target != "" { + n++ + } + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" && protoName != "http" { + n++ + } + if protoVersion != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.method(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.flavor(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { @@ -359,17 +400,24 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K } if useragent != "" { - attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) - } - - if hasUserID { - attrs = append(attrs, c.EnduserIDKey.String(userID)) + attrs = append(attrs, c.UserAgentOriginalKey.String(useragent)) } if clientIP != "" { attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) } + if target != "" { + attrs = append(attrs, c.HTTPTargetKey.String(target)) + } + + if protoName != "" && protoName != "http" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } + return attrs } @@ -394,14 +442,18 @@ func (c *httpConv) ServerRequest(server string, req *http.Request) []attribute.K // "http.flavor", "net.host.name". The following attributes are // returned if they related values are defined in req: "net.host.port". func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attribute.KeyValue { - // TODO: This currently does not add the specification required - // `http.target` attribute. It has too high of a cardinality to safely be - // added. An alternate should be added, or this comment removed, when it is - // addressed by the specification. If it is ultimately decided to continue - // not including the attribute, the HTTPTargetKey field of the httpConv - // should be removed as well. + /* The following semantic conventions are returned if present: + http.scheme string + http.route string + http.method string + http.status_code int + net.host.name string + net.host.port int + net.protocol.name string Note: not set if the value is "http". + net.protocol.version string + */ - n := 4 // Method, scheme, proto, and host name. + n := 3 // Method, scheme, and host name. var host string var p int if server == "" { @@ -417,16 +469,29 @@ func (c *httpConv) ServerRequestMetrics(server string, req *http.Request) []attr if hostPort > 0 { n++ } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + n++ + } + if protoVersion != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) attrs = append(attrs, c.methodMetric(req.Method)) attrs = append(attrs, c.scheme(req.TLS != nil)) - attrs = append(attrs, c.flavor(req.Proto)) attrs = append(attrs, c.NetConv.HostName(host)) if hostPort > 0 { attrs = append(attrs, c.NetConv.HostPort(hostPort)) } + if protoName != "" { + attrs = append(attrs, c.NetConv.NetProtocolName.String(protoName)) + } + if protoVersion != "" { + attrs = append(attrs, c.NetConv.NetProtocolVersion.String(protoVersion)) + } return attrs } @@ -455,21 +520,6 @@ func (c *httpConv) scheme(https bool) attribute.KeyValue { // nolint:revive return c.HTTPSchemeHTTP } -func (c *httpConv) flavor(proto string) attribute.KeyValue { - switch proto { - case "HTTP/1.0": - return c.HTTPFlavorKey.String("1.0") - case "HTTP/1.1": - return c.HTTPFlavorKey.String("1.1") - case "HTTP/2": - return c.HTTPFlavorKey.String("2.0") - case "HTTP/3": - return c.HTTPFlavorKey.String("3.0") - default: - return c.HTTPFlavorKey.String(proto) - } -} - func serverClientIP(xForwardedFor string) string { if idx := strings.Index(xForwardedFor, ","); idx >= 0 { xForwardedFor = xForwardedFor[:idx] diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index bde889343..cb4cb9355 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -22,7 +22,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + semconv "go.opentelemetry.io/otel/semconv/v1.20.0" ) // NetTransport returns a trace attribute describing the transport protocol of the @@ -57,6 +57,8 @@ type netConv struct { NetHostPortKey attribute.Key NetPeerNameKey attribute.Key NetPeerPortKey attribute.Key + NetProtocolName attribute.Key + NetProtocolVersion attribute.Key NetSockFamilyKey attribute.Key NetSockPeerAddrKey attribute.Key NetSockPeerPortKey attribute.Key @@ -73,6 +75,8 @@ var nc = &netConv{ NetHostPortKey: semconv.NetHostPortKey, NetPeerNameKey: semconv.NetPeerNameKey, NetPeerPortKey: semconv.NetPeerPortKey, + NetProtocolName: semconv.NetProtocolNameKey, + NetProtocolVersion: semconv.NetProtocolVersionKey, NetSockFamilyKey: semconv.NetSockFamilyKey, NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, NetSockPeerPortKey: semconv.NetSockPeerPortKey, @@ -366,3 +370,9 @@ func splitHostPort(hostport string) (host string, port int) { } return host, int(p) } + +func netProtocol(proto string) (name string, version string) { + name, version, _ = strings.Cut(proto, "/") + name = strings.ToLower(name) + return name, version +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index bd41c1804..9a4a02143 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.46.1" + return "0.47.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 24874f856..fe670d79c 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,46 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.22.0/0.45.0] 2024-01-17 + +### Added + +- The `go.opentelemetry.io/otel/semconv/v1.22.0` package. + The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735) +- The `go.opentelemetry.io/otel/semconv/v1.23.0` package. + The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746) +- The `go.opentelemetry.io/otel/semconv/v1.23.1` package. + The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749) +- The `go.opentelemetry.io/otel/semconv/v1.24.0` package. + The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770) +- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733) +- Experimental cardinality limiting is added to the metric SDK. + See [metric documentation](./sdk/metric/EXPERIMENTAL.md#cardinality-limit) for more information about this feature and how to enable it. (#4457) +- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804) + +### Changed + +- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754) +- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.19.0` version of the OpenTelemetry specification. (#4754) +- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`. + If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671) +- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722) +- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721) +- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743) +- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774) +- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775) +- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818) +- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804) + +### Fixed + +- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755) +- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756) +- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772) +- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776) +- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804) +- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742) + ## [1.21.0/0.44.0] 2023-11-16 ### Removed @@ -2735,7 +2775,8 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.22.0...HEAD +[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0 [1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 [1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 [1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 850606ae6..31857a617 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -591,6 +591,26 @@ this. [^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 +### Ignoring context cancellation + +OpenTelemetry API implementations need to ignore the cancellation of the context that are +passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). +Recording methods should not return an error describing the cancellation state of the context +when they complete, nor should they abort any work. + +This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for +the method. In that case the context cancellation can be used for the timeout with the +restriction that this behavior is documented for the method. Otherwise, timeouts +are expected to be handled by the user calling the API, not the implementation. + +Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method +of a provider. It is assumed the context passed from a user is not used for this purpose. + +Outside of the direct recording of telemetry from the API (e.g. exporting telemetry, +force flushing telemetry, shutting down a signal provider) the context cancellation +should be honored. This means all work done on behalf of the user provided context +should be canceled. + ## Approvers and Maintainers ### Approvers @@ -610,6 +630,7 @@ this. ### Emeritus +- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 2c5b0cc28..44e1bfc9b 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -66,7 +66,7 @@ are made for those systems currently. ## Getting Started -You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/). OpenTelemetry's goal is to provide a single set of APIs to capture distributed traces and metrics from your application and send them to an observability diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index 82ce3ee46..d2691d0bd 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -123,12 +123,12 @@ Once verified be sure to [make a release for the `contrib` repository](https://g ### Website Documentation -Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go]. +Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go]. Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. [OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions -[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/ -[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go +[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ +[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go ### Demo Repository diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index 9f9303d4f..7e6765b06 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -279,52 +279,75 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S position-- kvs[offset], kvs[position] = kvs[position], kvs[offset] } + kvs = kvs[position:] + if filter != nil { - return filterSet(kvs[position:], filter) - } - return Set{ - equivalent: computeDistinct(kvs[position:]), - }, nil -} - -// filterSet reorders kvs so that included keys are contiguous at the end of -// the slice, while excluded keys precede the included keys. -func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { - var excluded []KeyValue - - // Move attributes that do not match the filter so they're adjacent before - // calling computeDistinct(). - distinctPosition := len(kvs) - - // Swap indistinct keys forward and distinct keys toward the - // end of the slice. - offset := len(kvs) - 1 - for ; offset >= 0; offset-- { - if filter(kvs[offset]) { - distinctPosition-- - kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] - continue + if div := filteredToFront(kvs, filter); div != 0 { + return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] } } - excluded = kvs[:distinctPosition] + return Set{equivalent: computeDistinct(kvs)}, nil +} - return Set{ - equivalent: computeDistinct(kvs[distinctPosition:]), - }, excluded +// filteredToFront filters slice in-place using keep function. All KeyValues that need to +// be removed are moved to the front. All KeyValues that need to be kept are +// moved (in-order) to the back. The index for the first KeyValue to be kept is +// returned. +func filteredToFront(slice []KeyValue, keep Filter) int { + n := len(slice) + j := n + for i := n - 1; i >= 0; i-- { + if keep(slice[i]) { + j-- + slice[i], slice[j] = slice[j], slice[i] + } + } + return j } // Filter returns a filtered copy of this Set. See the documentation for // NewSetWithSortableFiltered for more details. func (l *Set) Filter(re Filter) (Set, []KeyValue) { if re == nil { - return Set{ - equivalent: l.equivalent, - }, nil + return *l, nil } - // Note: This could be refactored to avoid the temporary slice - // allocation, if it proves to be expensive. - return filterSet(l.ToSlice(), re) + // Iterate in reverse to the first attribute that will be filtered out. + n := l.Len() + first := n - 1 + for ; first >= 0; first-- { + kv, _ := l.Get(first) + if !re(kv) { + break + } + } + + // No attributes will be dropped, return the immutable Set l and nil. + if first < 0 { + return *l, nil + } + + // Copy now that we know we need to return a modified set. + // + // Do not do this in-place on the underlying storage of *Set l. Sets are + // immutable and filtering should not change this. + slice := l.ToSlice() + + // Don't re-iterate the slice if only slice[0] is filtered. + if first == 0 { + // It is safe to assume len(slice) >= 1 given we found at least one + // attribute above that needs to be filtered out. + return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] + } + + // Move the filtered slice[first] to the front (preserving order). + kv := slice[first] + copy(slice[1:first+1], slice[:first]) + slice[0] = kv + + // Do not re-evaluate re(slice[first+1:]). + div := filteredToFront(slice[1:first+1], re) + 1 + return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] } // computeDistinct returns a Distinct using either the fixed- or diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 84532cb1d..7d27cf77d 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -18,7 +18,6 @@ import ( "errors" "fmt" "net/url" - "regexp" "strings" "go.opentelemetry.io/otel/internal/baggage" @@ -32,16 +31,6 @@ const ( listDelimiter = "," keyValueDelimiter = "=" propertyDelimiter = ";" - - keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` - valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` - keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` -) - -var ( - keyRe = regexp.MustCompile(`^` + keyDef + `$`) - valueRe = regexp.MustCompile(`^` + valueDef + `$`) - propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) ) var ( @@ -67,7 +56,7 @@ type Property struct { // // If key is invalid, an error will be returned. func NewKeyProperty(key string) (Property, error) { - if !keyRe.MatchString(key) { + if !validateKey(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) } @@ -77,14 +66,29 @@ func NewKeyProperty(key string) (Property, error) { // NewKeyValueProperty returns a new Property for key with value. // -// If key or value are invalid, an error will be returned. +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be precent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewKeyValuePropertyRaw] instead +// that does not require precent-encoding of the value. func NewKeyValueProperty(key, value string) (Property, error) { - if !keyRe.MatchString(key) { - return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) - } - if !valueRe.MatchString(value) { + if !validateValue(value) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewKeyValuePropertyRaw(key, decodedValue) +} + +// NewKeyValuePropertyRaw returns a new Property for key with value. +// +// The passed key must be compliant with W3C Baggage specification. +func NewKeyValuePropertyRaw(key, value string) (Property, error) { + if !validateKey(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } p := Property{ key: key, @@ -106,20 +110,11 @@ func parseProperty(property string) (Property, error) { return newInvalidProperty(), nil } - match := propertyRe.FindStringSubmatch(property) - if len(match) != 4 { + p, ok := parsePropertyInternal(property) + if !ok { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) } - var p Property - if match[1] != "" { - p.key = match[1] - } else { - p.key = match[2] - p.value = match[3] - p.hasValue = true - } - return p, nil } @@ -130,12 +125,9 @@ func (p Property) validate() error { return fmt.Errorf("invalid property: %w", err) } - if !keyRe.MatchString(p.key) { + if !validateKey(p.key) { return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) } - if p.hasValue && !valueRe.MatchString(p.value) { - return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) - } if !p.hasValue && p.value != "" { return errFunc(errors.New("inconsistent value")) } @@ -154,11 +146,11 @@ func (p Property) Value() (string, bool) { return p.value, p.hasValue } -// String encodes Property into a string compliant with the W3C Baggage +// String encodes Property into a header string compliant with the W3C Baggage // specification. func (p Property) String() string { if p.hasValue { - return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value)) } return p.key } @@ -218,7 +210,7 @@ func (p properties) validate() error { return nil } -// String encodes properties into a string compliant with the W3C Baggage +// String encodes properties into a header string compliant with the W3C Baggage // specification. func (p properties) String() string { props := make([]string, len(p)) @@ -240,11 +232,28 @@ type Member struct { hasData bool } -// NewMember returns a new Member from the passed arguments. The key will be -// used directly while the value will be url decoded after validation. An error -// is returned if the created Member would be invalid according to the W3C -// Baggage specification. +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +// The passed value must be precent-encoded as defined in W3C Baggage specification. +// +// Notice: Consider using [NewMemberRaw] instead +// that does not require precent-encoding of the value. func NewMember(key, value string, props ...Property) (Member, error) { + if !validateValue(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + decodedValue, err := url.PathUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + return NewMemberRaw(key, decodedValue, props...) +} + +// NewMemberRaw returns a new Member from the passed arguments. +// +// The passed key must be compliant with W3C Baggage specification. +func NewMemberRaw(key, value string, props ...Property) (Member, error) { m := Member{ key: key, value: value, @@ -254,11 +263,6 @@ func NewMember(key, value string, props ...Property) (Member, error) { if err := m.validate(); err != nil { return newInvalidMember(), err } - decodedValue, err := url.PathUnescape(value) - if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) - } - m.value = decodedValue return m, nil } @@ -274,11 +278,7 @@ func parseMember(member string) (Member, error) { return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) } - var ( - key, value string - props properties - ) - + var props properties keyValue, properties, found := strings.Cut(member, propertyDelimiter) if found { // Parse the member properties. @@ -299,36 +299,34 @@ func parseMember(member string) (Member, error) { } // "Leading and trailing whitespaces are allowed but MUST be trimmed // when converting the header into a data structure." - key = strings.TrimSpace(k) - var err error - value, err = url.PathUnescape(strings.TrimSpace(v)) - if err != nil { - return newInvalidMember(), fmt.Errorf("%w: %q", err, value) - } - if !keyRe.MatchString(key) { + key := strings.TrimSpace(k) + if !validateKey(key) { return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) } - if !valueRe.MatchString(value) { - return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + + val := strings.TrimSpace(v) + if !validateValue(val) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v) } + // Decode a precent-encoded value. + value, err := url.PathUnescape(val) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err) + } return Member{key: key, value: value, properties: props, hasData: true}, nil } // validate ensures m conforms to the W3C Baggage specification. -// A key is just an ASCII string, but a value must be URL encoded UTF-8, -// returning an error otherwise. +// A key must be an ASCII string, returning an error otherwise. func (m Member) validate() error { if !m.hasData { return fmt.Errorf("%w: %q", errInvalidMember, m) } - if !keyRe.MatchString(m.key) { + if !validateKey(m.key) { return fmt.Errorf("%w: %q", errInvalidKey, m.key) } - if !valueRe.MatchString(m.value) { - return fmt.Errorf("%w: %q", errInvalidValue, m.value) - } return m.properties.validate() } @@ -341,11 +339,13 @@ func (m Member) Value() string { return m.value } // Properties returns a copy of the Member properties. func (m Member) Properties() []Property { return m.properties.Copy() } -// String encodes Member into a string compliant with the W3C Baggage +// String encodes Member into a header string compliant with the W3C Baggage // specification. func (m Member) String() string { - // A key is just an ASCII string, but a value is URL encoded UTF-8. - s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) + // A key is just an ASCII string. A value is restricted to be + // US-ASCII characters excluding CTLs, whitespace, + // DQUOTE, comma, semicolon, and backslash. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value)) if len(m.properties) > 0 { s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) } @@ -536,9 +536,8 @@ func (b Baggage) Len() int { return len(b.list) } -// String encodes Baggage into a string compliant with the W3C Baggage -// specification. The returned string will be invalid if the Baggage contains -// any invalid list-members. +// String encodes Baggage into a header string compliant with the W3C Baggage +// specification. func (b Baggage) String() string { members := make([]string, 0, len(b.list)) for k, v := range b.list { @@ -550,3 +549,196 @@ func (b Baggage) String() string { } return strings.Join(members, listDelimiter) } + +// parsePropertyInternal attempts to decode a Property from the passed string. +// It follows the spec at https://www.w3.org/TR/baggage/#definition. +func parsePropertyInternal(s string) (p Property, ok bool) { + // For the entire function we will use " key = value " as an example. + // Attempting to parse the key. + // First skip spaces at the beginning "< >key = value " (they could be empty). + index := skipSpace(s, 0) + + // Parse the key: " = value ". + keyStart := index + keyEnd := index + for _, c := range s[keyStart:] { + if !validateKeyChar(c) { + break + } + keyEnd++ + } + + // If we couldn't find any valid key character, + // it means the key is either empty or invalid. + if keyStart == keyEnd { + return + } + + // Skip spaces after the key: " key< >= value ". + index = skipSpace(s, keyEnd) + + if index == len(s) { + // A key can have no value, like: " key ". + ok = true + p.key = s[keyStart:keyEnd] + return + } + + // If we have not reached the end and we can't find the '=' delimiter, + // it means the property is invalid. + if s[index] != keyValueDelimiter[0] { + return + } + + // Attempting to parse the value. + // Match: " key =< >value ". + index = skipSpace(s, index+1) + + // Match the value string: " key = ". + // A valid property can be: " key =". + // Therefore, we don't have to check if the value is empty. + valueStart := index + valueEnd := index + for _, c := range s[valueStart:] { + if !validateValueChar(c) { + break + } + valueEnd++ + } + + // Skip all trailing whitespaces: " key = value< >". + index = skipSpace(s, valueEnd) + + // If after looking for the value and skipping whitespaces + // we have not reached the end, it means the property is + // invalid, something like: " key = value value1". + if index != len(s) { + return + } + + // Decode a precent-encoded value. + value, err := url.PathUnescape(s[valueStart:valueEnd]) + if err != nil { + return + } + + ok = true + p.key = s[keyStart:keyEnd] + p.hasValue = true + + p.value = value + return +} + +func skipSpace(s string, offset int) int { + i := offset + for ; i < len(s); i++ { + c := s[i] + if c != ' ' && c != '\t' { + break + } + } + return i +} + +func validateKey(s string) bool { + if len(s) == 0 { + return false + } + + for _, c := range s { + if !validateKeyChar(c) { + return false + } + } + + return true +} + +func validateKeyChar(c int32) bool { + return (c >= 0x23 && c <= 0x27) || + (c >= 0x30 && c <= 0x39) || + (c >= 0x41 && c <= 0x5a) || + (c >= 0x5e && c <= 0x7a) || + c == 0x21 || + c == 0x2a || + c == 0x2b || + c == 0x2d || + c == 0x2e || + c == 0x7c || + c == 0x7e +} + +func validateValue(s string) bool { + for _, c := range s { + if !validateValueChar(c) { + return false + } + } + + return true +} + +func validateValueChar(c int32) bool { + return c == 0x21 || + (c >= 0x23 && c <= 0x2b) || + (c >= 0x2d && c <= 0x3a) || + (c >= 0x3c && c <= 0x5b) || + (c >= 0x5d && c <= 0x7e) +} + +// valueEscape escapes the string so it can be safely placed inside a baggage value, +// replacing special characters with %XX sequences as needed. +// +// The implementation is based on: +// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285. +func valueEscape(s string) string { + hexCount := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return s + } + + var buf [64]byte + var t []byte + + required := len(s) + 2*hexCount + if required <= len(buf) { + t = buf[:required] + } else { + t = make([]byte, required) + } + + j := 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(s[i]) { + const upperhex = "0123456789ABCDEF" + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + } else { + t[j] = c + j++ + } + } + + return string(t) +} + +// shouldEscape returns true if the specified byte should be escaped when +// appearing in a baggage value string. +func shouldEscape(c byte) bool { + if c == '%' { + // The percent character must be encoded so that percent-encoding can work. + return true + } + return !validateValueChar(int32(c)) +} diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go index daa36c89d..36d7c24e8 100644 --- a/vendor/go.opentelemetry.io/otel/doc.go +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -22,7 +22,7 @@ transmitted anywhere. An implementation of the OpenTelemetry SDK, like the default SDK implementation (go.opentelemetry.io/otel/sdk), and associated exporters are used to process and transport this data. -To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. +To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/. To read more about tracing, see go.opentelemetry.io/otel/trace. diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go index 75a8f3435..63e5d6222 100644 --- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -18,7 +18,7 @@ import ( "context" "encoding/hex" "fmt" - "regexp" + "strings" "go.opentelemetry.io/otel/trace" ) @@ -28,6 +28,7 @@ const ( maxVersion = 254 traceparentHeader = "traceparent" tracestateHeader = "tracestate" + delimiter = "-" ) // TraceContext is a propagator that supports the W3C Trace Context format @@ -41,8 +42,8 @@ const ( type TraceContext struct{} var ( - _ TextMapPropagator = TraceContext{} - traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") + _ TextMapPropagator = TraceContext{} + versionPart = fmt.Sprintf("%.2X", supportedVersion) ) // Inject set tracecontext from the Context into the carrier. @@ -59,12 +60,19 @@ func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { // Clear all flags other than the trace-context supported sampling bit. flags := sc.TraceFlags() & trace.FlagsSampled - h := fmt.Sprintf("%.2x-%s-%s-%s", - supportedVersion, - sc.TraceID(), - sc.SpanID(), - flags) - carrier.Set(traceparentHeader, h) + var sb strings.Builder + sb.Grow(2 + 32 + 16 + 2 + 3) + _, _ = sb.WriteString(versionPart) + traceID := sc.TraceID() + spanID := sc.SpanID() + flagByte := [1]byte{byte(flags)} + var buf [32]byte + for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} { + _ = sb.WriteByte(delimiter[0]) + n := hex.Encode(buf[:], src) + _, _ = sb.Write(buf[:n]) + } + carrier.Set(traceparentHeader, sb.String()) } // Extract reads tracecontext from the carrier into a returned Context. @@ -86,21 +94,8 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { return trace.SpanContext{} } - matches := traceCtxRegExp.FindStringSubmatch(h) - - if len(matches) == 0 { - return trace.SpanContext{} - } - - if len(matches) < 5 { // four subgroups plus the overall match - return trace.SpanContext{} - } - - if len(matches[1]) != 2 { - return trace.SpanContext{} - } - ver, err := hex.DecodeString(matches[1]) - if err != nil { + var ver [1]byte + if !extractPart(ver[:], &h, 2) { return trace.SpanContext{} } version := int(ver[0]) @@ -108,36 +103,24 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { return trace.SpanContext{} } - if version == 0 && len(matches) != 5 { // four subgroups plus the overall match - return trace.SpanContext{} - } - - if len(matches[2]) != 32 { - return trace.SpanContext{} - } - var scc trace.SpanContextConfig - - scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) - if err != nil { + if !extractPart(scc.TraceID[:], &h, 32) { + return trace.SpanContext{} + } + if !extractPart(scc.SpanID[:], &h, 16) { return trace.SpanContext{} } - if len(matches[3]) != 16 { + var opts [1]byte + if !extractPart(opts[:], &h, 2) { return trace.SpanContext{} } - scc.SpanID, err = trace.SpanIDFromHex(matches[3]) - if err != nil { + if version == 0 && (h != "" || opts[0] > 2) { + // version 0 not allow extra + // version 0 not allow other flag return trace.SpanContext{} } - if len(matches[4]) != 2 { - return trace.SpanContext{} - } - opts, err := hex.DecodeString(matches[4]) - if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { - return trace.SpanContext{} - } // Clear all flags other than the trace-context supported sampling bit. scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled @@ -155,6 +138,29 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { return sc } +// upperHex detect hex is upper case Unicode characters. +func upperHex(v string) bool { + for _, c := range v { + if c >= 'A' && c <= 'F' { + return true + } + } + return false +} + +func extractPart(dst []byte, h *string, n int) bool { + part, left, _ := strings.Cut(*h, delimiter) + *h = left + // hex.Decode decodes unsupported upper-case characters, so exclude explicitly. + if len(part) != n || upperHex(part) { + return false + } + if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 { + return false + } + return true +} + // Fields returns the keys who's values are set with Inject. func (tc TraceContext) Fields() []string { return []string{traceparentHeader, tracestateHeader} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go new file mode 100644 index 000000000..67d1d4c44 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/attribute_group.go @@ -0,0 +1,1209 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// Describes HTTP attributes. +const ( + // HTTPMethodKey is the attribute Key conforming to the "http.method" + // semantic conventions. It represents the hTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + + // HTTPStatusCodeKey is the attribute Key conforming to the + // "http.status_code" semantic conventions. It represents the [HTTP + // response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was + // received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") +) + +// HTTPMethod returns an attribute KeyValue conforming to the "http.method" +// semantic conventions. It represents the hTTP request method. +func HTTPMethod(val string) attribute.KeyValue { + return HTTPMethodKey.String(val) +} + +// HTTPStatusCode returns an attribute KeyValue conforming to the +// "http.status_code" semantic conventions. It represents the [HTTP response +// status code](https://tools.ietf.org/html/rfc7231#section-6). +func HTTPStatusCode(val int) attribute.KeyValue { + return HTTPStatusCodeKey.Int(val) +} + +// HTTP Server spans attributes +const ( + // HTTPSchemeKey is the attribute Key conforming to the "http.scheme" + // semantic conventions. It represents the URI scheme identifying the used + // protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + + // HTTPRouteKey is the attribute Key conforming to the "http.route" + // semantic conventions. It represents the matched route (path template in + // the format used by the respective server framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and + // the URI path can NOT substitute it. + // SHOULD include the [application + // root](/specification/trace/semantic_conventions/http.md#http-server-definitions) + // if there is one. + HTTPRouteKey = attribute.Key("http.route") +) + +// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme" +// semantic conventions. It represents the URI scheme identifying the used +// protocol. +func HTTPScheme(val string) attribute.KeyValue { + return HTTPSchemeKey.String(val) +} + +// HTTPRoute returns an attribute KeyValue conforming to the "http.route" +// semantic conventions. It represents the matched route (path template in the +// format used by the respective server framework). See note below +func HTTPRoute(val string) attribute.KeyValue { + return HTTPRouteKey.String(val) +} + +// Attributes for Events represented using Log Records. +const ( + // EventNameKey is the attribute Key conforming to the "event.name" + // semantic conventions. It represents the name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + + // EventDomainKey is the attribute Key conforming to the "event.domain" + // semantic conventions. It represents the domain identifies the business + // context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// EventName returns an attribute KeyValue conforming to the "event.name" +// semantic conventions. It represents the name identifies the event. +func EventName(val string) attribute.KeyValue { + return EventNameKey.String(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetTransportKey is the attribute Key conforming to the "net.transport" + // semantic conventions. It represents the transport protocol used. See + // note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + + // NetProtocolNameKey is the attribute Key conforming to the + // "net.protocol.name" semantic conventions. It represents the application + // layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetProtocolNameKey = attribute.Key("net.protocol.name") + + // NetProtocolVersionKey is the attribute Key conforming to the + // "net.protocol.version" semantic conventions. It represents the version + // of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.protocol.version` refers to the version of the protocol used + // and might be different from the protocol client's version. If the HTTP + // client used has a version of `0.27.2`, but sends HTTP version `1.1`, + // this attribute should be set to `1.1`. + NetProtocolVersionKey = attribute.Key("net.protocol.version") + + // NetSockPeerNameKey is the attribute Key conforming to the + // "net.sock.peer.name" semantic conventions. It represents the remote + // socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from + // `net.peer.name` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + + // NetSockPeerAddrKey is the attribute Key conforming to the + // "net.sock.peer.addr" semantic conventions. It represents the remote + // socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, + // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + + // NetSockPeerPortKey is the attribute Key conforming to the + // "net.sock.peer.port" semantic conventions. It represents the remote + // socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + + // NetSockFamilyKey is the attribute Key conforming to the + // "net.sock.family" semantic conventions. It represents the protocol + // [address + // family](https://man7.org/linux/man-pages/man7/address_families.7.html) + // which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if + // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers + // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + + // NetPeerNameKey is the attribute Key conforming to the "net.peer.name" + // semantic conventions. It represents the logical remote hostname, see + // note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an + // extra DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + + // NetPeerPortKey is the attribute Key conforming to the "net.peer.port" + // semantic conventions. It represents the logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + + // NetHostNameKey is the attribute Key conforming to the "net.host.name" + // semantic conventions. It represents the logical local hostname or + // similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + + // NetHostPortKey is the attribute Key conforming to the "net.host.port" + // semantic conventions. It represents the logical local port number, + // preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + + // NetSockHostAddrKey is the attribute Key conforming to the + // "net.sock.host.addr" semantic conventions. It represents the local + // socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + + // NetSockHostPortKey is the attribute Key conforming to the + // "net.sock.host.port" semantic conventions. It represents the local + // socket port number. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If defined for the address + // family and if different than `net.host.port` and if `net.sock.host.addr` + // is set. In other cases, it is still recommended to set this.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +// NetProtocolName returns an attribute KeyValue conforming to the +// "net.protocol.name" semantic conventions. It represents the application +// layer protocol used. The value SHOULD be normalized to lowercase. +func NetProtocolName(val string) attribute.KeyValue { + return NetProtocolNameKey.String(val) +} + +// NetProtocolVersion returns an attribute KeyValue conforming to the +// "net.protocol.version" semantic conventions. It represents the version of +// the application layer protocol used. See note below. +func NetProtocolVersion(val string) attribute.KeyValue { + return NetProtocolVersionKey.String(val) +} + +// NetSockPeerName returns an attribute KeyValue conforming to the +// "net.sock.peer.name" semantic conventions. It represents the remote socket +// peer name. +func NetSockPeerName(val string) attribute.KeyValue { + return NetSockPeerNameKey.String(val) +} + +// NetSockPeerAddr returns an attribute KeyValue conforming to the +// "net.sock.peer.addr" semantic conventions. It represents the remote socket +// peer address: IPv4 or IPv6 for internet protocols, path for local +// communication, +// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html). +func NetSockPeerAddr(val string) attribute.KeyValue { + return NetSockPeerAddrKey.String(val) +} + +// NetSockPeerPort returns an attribute KeyValue conforming to the +// "net.sock.peer.port" semantic conventions. It represents the remote socket +// peer port. +func NetSockPeerPort(val int) attribute.KeyValue { + return NetSockPeerPortKey.Int(val) +} + +// NetPeerName returns an attribute KeyValue conforming to the +// "net.peer.name" semantic conventions. It represents the logical remote +// hostname, see note below. +func NetPeerName(val string) attribute.KeyValue { + return NetPeerNameKey.String(val) +} + +// NetPeerPort returns an attribute KeyValue conforming to the +// "net.peer.port" semantic conventions. It represents the logical remote port +// number +func NetPeerPort(val int) attribute.KeyValue { + return NetPeerPortKey.Int(val) +} + +// NetHostName returns an attribute KeyValue conforming to the +// "net.host.name" semantic conventions. It represents the logical local +// hostname or similar, see note below. +func NetHostName(val string) attribute.KeyValue { + return NetHostNameKey.String(val) +} + +// NetHostPort returns an attribute KeyValue conforming to the +// "net.host.port" semantic conventions. It represents the logical local port +// number, preferably the one that the peer used to connect +func NetHostPort(val int) attribute.KeyValue { + return NetHostPortKey.Int(val) +} + +// NetSockHostAddr returns an attribute KeyValue conforming to the +// "net.sock.host.addr" semantic conventions. It represents the local socket +// address. Useful in case of a multi-IP host. +func NetSockHostAddr(val string) attribute.KeyValue { + return NetSockHostAddrKey.String(val) +} + +// NetSockHostPort returns an attribute KeyValue conforming to the +// "net.sock.host.port" semantic conventions. It represents the local socket +// port number. +func NetSockHostPort(val int) attribute.KeyValue { + return NetSockHostPortKey.Int(val) +} + +// These attributes may be used for any network related operation. +const ( + // NetHostConnectionTypeKey is the attribute Key conforming to the + // "net.host.connection.type" semantic conventions. It represents the + // internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + + // NetHostConnectionSubtypeKey is the attribute Key conforming to the + // "net.host.connection.subtype" semantic conventions. It represents the + // this describes more details regarding the connection.type. It may be the + // type of cell technology connection, but it could be used for describing + // details about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + + // NetHostCarrierNameKey is the attribute Key conforming to the + // "net.host.carrier.name" semantic conventions. It represents the name of + // the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + + // NetHostCarrierMccKey is the attribute Key conforming to the + // "net.host.carrier.mcc" semantic conventions. It represents the mobile + // carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + + // NetHostCarrierMncKey is the attribute Key conforming to the + // "net.host.carrier.mnc" semantic conventions. It represents the mobile + // carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + + // NetHostCarrierIccKey is the attribute Key conforming to the + // "net.host.carrier.icc" semantic conventions. It represents the ISO + // 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// NetHostCarrierName returns an attribute KeyValue conforming to the +// "net.host.carrier.name" semantic conventions. It represents the name of the +// mobile carrier. +func NetHostCarrierName(val string) attribute.KeyValue { + return NetHostCarrierNameKey.String(val) +} + +// NetHostCarrierMcc returns an attribute KeyValue conforming to the +// "net.host.carrier.mcc" semantic conventions. It represents the mobile +// carrier country code. +func NetHostCarrierMcc(val string) attribute.KeyValue { + return NetHostCarrierMccKey.String(val) +} + +// NetHostCarrierMnc returns an attribute KeyValue conforming to the +// "net.host.carrier.mnc" semantic conventions. It represents the mobile +// carrier network code. +func NetHostCarrierMnc(val string) attribute.KeyValue { + return NetHostCarrierMncKey.String(val) +} + +// NetHostCarrierIcc returns an attribute KeyValue conforming to the +// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1 +// alpha-2 2-character country code associated with the mobile carrier network. +func NetHostCarrierIcc(val string) attribute.KeyValue { + return NetHostCarrierIccKey.String(val) +} + +// Semantic conventions for HTTP client and server Spans. +const ( + // HTTPRequestContentLengthKey is the attribute Key conforming to the + // "http.request_content_length" semantic conventions. It represents the + // size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + + // HTTPResponseContentLengthKey is the attribute Key conforming to the + // "http.response_content_length" semantic conventions. It represents the + // size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as + // the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) + // header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +// HTTPRequestContentLength returns an attribute KeyValue conforming to the +// "http.request_content_length" semantic conventions. It represents the size +// of the request payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPRequestContentLength(val int) attribute.KeyValue { + return HTTPRequestContentLengthKey.Int(val) +} + +// HTTPResponseContentLength returns an attribute KeyValue conforming to the +// "http.response_content_length" semantic conventions. It represents the size +// of the response payload body in bytes. This is the number of bytes +// transferred excluding headers and is often, but not always, present as the +// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length) +// header. For requests using transport encoding, this should be the compressed +// size. +func HTTPResponseContentLength(val int) attribute.KeyValue { + return HTTPResponseContentLengthKey.Int(val) +} + +// Semantic convention describing per-message attributes populated on messaging +// spans or links. +const ( + // MessagingMessageIDKey is the attribute Key conforming to the + // "messaging.message.id" semantic conventions. It represents a value used + // by the messaging system as an identifier for the message, represented as + // a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + + // MessagingMessageConversationIDKey is the attribute Key conforming to the + // "messaging.message.conversation_id" semantic conventions. It represents + // the [conversation ID](#conversations) identifying the conversation to + // which the message belongs, represented as a string. Sometimes called + // "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + + // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to + // the "messaging.message.payload_size_bytes" semantic conventions. It + // represents the (uncompressed) size of the message payload in bytes. Also + // use this attribute if it is unknown whether the compressed or + // uncompressed payload size is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + + // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key + // conforming to the "messaging.message.payload_compressed_size_bytes" + // semantic conventions. It represents the compressed size of the message + // payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// MessagingMessageID returns an attribute KeyValue conforming to the +// "messaging.message.id" semantic conventions. It represents a value used by +// the messaging system as an identifier for the message, represented as a +// string. +func MessagingMessageID(val string) attribute.KeyValue { + return MessagingMessageIDKey.String(val) +} + +// MessagingMessageConversationID returns an attribute KeyValue conforming +// to the "messaging.message.conversation_id" semantic conventions. It +// represents the [conversation ID](#conversations) identifying the +// conversation to which the message belongs, represented as a string. +// Sometimes called "Correlation ID". +func MessagingMessageConversationID(val string) attribute.KeyValue { + return MessagingMessageConversationIDKey.String(val) +} + +// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming +// to the "messaging.message.payload_size_bytes" semantic conventions. It +// represents the (uncompressed) size of the message payload in bytes. Also use +// this attribute if it is unknown whether the compressed or uncompressed +// payload size is reported. +func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadSizeBytesKey.Int(val) +} + +// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue +// conforming to the "messaging.message.payload_compressed_size_bytes" semantic +// conventions. It represents the compressed size of the message payload in +// bytes. +func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue { + return MessagingMessagePayloadCompressedSizeBytesKey.Int(val) +} + +// Semantic convention for attributes that describe messaging destination on +// broker +const ( + // MessagingDestinationNameKey is the attribute Key conforming to the + // "messaging.destination.name" semantic conventions. It represents the + // message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic + // or other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD + // uniquely identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + + // MessagingDestinationTemplateKey is the attribute Key conforming to the + // "messaging.destination.template" semantic conventions. It represents the + // low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example + // would be a destination name involving a user name or product id. + // Although the destination name in this case is of high cardinality, the + // underlying template is of low cardinality and can be effectively used + // for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + + // MessagingDestinationTemporaryKey is the attribute Key conforming to the + // "messaging.destination.temporary" semantic conventions. It represents a + // boolean that is true if the message destination is temporary and might + // not exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + + // MessagingDestinationAnonymousKey is the attribute Key conforming to the + // "messaging.destination.anonymous" semantic conventions. It represents a + // boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +// MessagingDestinationName returns an attribute KeyValue conforming to the +// "messaging.destination.name" semantic conventions. It represents the message +// destination name +func MessagingDestinationName(val string) attribute.KeyValue { + return MessagingDestinationNameKey.String(val) +} + +// MessagingDestinationTemplate returns an attribute KeyValue conforming to +// the "messaging.destination.template" semantic conventions. It represents the +// low cardinality representation of the messaging destination name +func MessagingDestinationTemplate(val string) attribute.KeyValue { + return MessagingDestinationTemplateKey.String(val) +} + +// MessagingDestinationTemporary returns an attribute KeyValue conforming to +// the "messaging.destination.temporary" semantic conventions. It represents a +// boolean that is true if the message destination is temporary and might not +// exist anymore after messages are processed. +func MessagingDestinationTemporary(val bool) attribute.KeyValue { + return MessagingDestinationTemporaryKey.Bool(val) +} + +// MessagingDestinationAnonymous returns an attribute KeyValue conforming to +// the "messaging.destination.anonymous" semantic conventions. It represents a +// boolean that is true if the message destination is anonymous (could be +// unnamed or have auto-generated name). +func MessagingDestinationAnonymous(val bool) attribute.KeyValue { + return MessagingDestinationAnonymousKey.Bool(val) +} + +// Semantic convention for attributes that describe messaging source on broker +const ( + // MessagingSourceNameKey is the attribute Key conforming to the + // "messaging.source.name" semantic conventions. It represents the message + // source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or + // other entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely + // identify the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + + // MessagingSourceTemplateKey is the attribute Key conforming to the + // "messaging.source.template" semantic conventions. It represents the low + // cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would + // be a source name involving a user name or product id. Although the + // source name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and + // aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + + // MessagingSourceTemporaryKey is the attribute Key conforming to the + // "messaging.source.temporary" semantic conventions. It represents a + // boolean that is true if the message source is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + + // MessagingSourceAnonymousKey is the attribute Key conforming to the + // "messaging.source.anonymous" semantic conventions. It represents a + // boolean that is true if the message source is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +// MessagingSourceName returns an attribute KeyValue conforming to the +// "messaging.source.name" semantic conventions. It represents the message +// source name +func MessagingSourceName(val string) attribute.KeyValue { + return MessagingSourceNameKey.String(val) +} + +// MessagingSourceTemplate returns an attribute KeyValue conforming to the +// "messaging.source.template" semantic conventions. It represents the low +// cardinality representation of the messaging source name +func MessagingSourceTemplate(val string) attribute.KeyValue { + return MessagingSourceTemplateKey.String(val) +} + +// MessagingSourceTemporary returns an attribute KeyValue conforming to the +// "messaging.source.temporary" semantic conventions. It represents a boolean +// that is true if the message source is temporary and might not exist anymore +// after messages are processed. +func MessagingSourceTemporary(val bool) attribute.KeyValue { + return MessagingSourceTemporaryKey.Bool(val) +} + +// MessagingSourceAnonymous returns an attribute KeyValue conforming to the +// "messaging.source.anonymous" semantic conventions. It represents a boolean +// that is true if the message source is anonymous (could be unnamed or have +// auto-generated name). +func MessagingSourceAnonymous(val bool) attribute.KeyValue { + return MessagingSourceAnonymousKey.Bool(val) +} + +// Attributes for RabbitMQ +const ( + // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key + // conforming to the "messaging.rabbitmq.destination.routing_key" semantic + // conventions. It represents the rabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue +// conforming to the "messaging.rabbitmq.destination.routing_key" semantic +// conventions. It represents the rabbitMQ message routing key. +func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue { + return MessagingRabbitmqDestinationRoutingKeyKey.String(val) +} + +// Attributes for Apache Kafka +const ( + // MessagingKafkaMessageKeyKey is the attribute Key conforming to the + // "messaging.kafka.message.key" semantic conventions. It represents the + // message keys in Kafka are used for grouping alike messages to ensure + // they're processed on the same partition. They differ from + // `messaging.message.id` in that they're not unique. If the key is `null`, + // the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to + // be supplied for the attribute. If the key has no unambiguous, canonical + // string form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + + // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the + // "messaging.kafka.consumer.group" semantic conventions. It represents the + // name of the Kafka Consumer Group that is handling the message. Only + // applies to consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + + // MessagingKafkaClientIDKey is the attribute Key conforming to the + // "messaging.kafka.client_id" semantic conventions. It represents the + // client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + + // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to + // the "messaging.kafka.destination.partition" semantic conventions. It + // represents the partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + + // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the + // "messaging.kafka.source.partition" semantic conventions. It represents + // the partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + + // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the + // "messaging.kafka.message.offset" semantic conventions. It represents the + // offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + + // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the + // "messaging.kafka.message.tombstone" semantic conventions. It represents + // a boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When + // missing, the value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the +// "messaging.kafka.message.key" semantic conventions. It represents the +// message keys in Kafka are used for grouping alike messages to ensure they're +// processed on the same partition. They differ from `messaging.message.id` in +// that they're not unique. If the key is `null`, the attribute MUST NOT be +// set. +func MessagingKafkaMessageKey(val string) attribute.KeyValue { + return MessagingKafkaMessageKeyKey.String(val) +} + +// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to +// the "messaging.kafka.consumer.group" semantic conventions. It represents the +// name of the Kafka Consumer Group that is handling the message. Only applies +// to consumers, not producers. +func MessagingKafkaConsumerGroup(val string) attribute.KeyValue { + return MessagingKafkaConsumerGroupKey.String(val) +} + +// MessagingKafkaClientID returns an attribute KeyValue conforming to the +// "messaging.kafka.client_id" semantic conventions. It represents the client +// ID for the Consumer or Producer that is handling the message. +func MessagingKafkaClientID(val string) attribute.KeyValue { + return MessagingKafkaClientIDKey.String(val) +} + +// MessagingKafkaDestinationPartition returns an attribute KeyValue +// conforming to the "messaging.kafka.destination.partition" semantic +// conventions. It represents the partition the message is sent to. +func MessagingKafkaDestinationPartition(val int) attribute.KeyValue { + return MessagingKafkaDestinationPartitionKey.Int(val) +} + +// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to +// the "messaging.kafka.source.partition" semantic conventions. It represents +// the partition the message is received from. +func MessagingKafkaSourcePartition(val int) attribute.KeyValue { + return MessagingKafkaSourcePartitionKey.Int(val) +} + +// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to +// the "messaging.kafka.message.offset" semantic conventions. It represents the +// offset of a record in the corresponding Kafka partition. +func MessagingKafkaMessageOffset(val int) attribute.KeyValue { + return MessagingKafkaMessageOffsetKey.Int(val) +} + +// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming +// to the "messaging.kafka.message.tombstone" semantic conventions. It +// represents a boolean that is true if the message is a tombstone. +func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue { + return MessagingKafkaMessageTombstoneKey.Bool(val) +} + +// Attributes for Apache RocketMQ +const ( + // MessagingRocketmqNamespaceKey is the attribute Key conforming to the + // "messaging.rocketmq.namespace" semantic conventions. It represents the + // namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + + // MessagingRocketmqClientGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.client_group" semantic conventions. It represents + // the name of the RocketMQ producer/consumer group that is handling the + // message. The client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + + // MessagingRocketmqClientIDKey is the attribute Key conforming to the + // "messaging.rocketmq.client_id" semantic conventions. It represents the + // unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + + // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delivery_timestamp" + // semantic conventions. It represents the timestamp in milliseconds that + // the delay message is expected to be delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delay time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + + // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key + // conforming to the "messaging.rocketmq.message.delay_time_level" semantic + // conventions. It represents the delay time level for delay message, which + // determines the message delay time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay + // and delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + + // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the + // "messaging.rocketmq.message.group" semantic conventions. It represents + // the it is essential for FIFO message. Messages that belong to the same + // message group are always processed one by one within the same consumer + // group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + + // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the + // "messaging.rocketmq.message.type" semantic conventions. It represents + // the type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + + // MessagingRocketmqMessageTagKey is the attribute Key conforming to the + // "messaging.rocketmq.message.tag" semantic conventions. It represents the + // secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + + // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the + // "messaging.rocketmq.message.keys" semantic conventions. It represents + // the key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + + // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to + // the "messaging.rocketmq.consumption_model" semantic conventions. It + // represents the model of message consumption. This only applies to + // consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// MessagingRocketmqNamespace returns an attribute KeyValue conforming to +// the "messaging.rocketmq.namespace" semantic conventions. It represents the +// namespace of RocketMQ resources, resources in different namespaces are +// individual. +func MessagingRocketmqNamespace(val string) attribute.KeyValue { + return MessagingRocketmqNamespaceKey.String(val) +} + +// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.client_group" semantic conventions. It represents +// the name of the RocketMQ producer/consumer group that is handling the +// message. The client type is identified by the SpanKind. +func MessagingRocketmqClientGroup(val string) attribute.KeyValue { + return MessagingRocketmqClientGroupKey.String(val) +} + +// MessagingRocketmqClientID returns an attribute KeyValue conforming to the +// "messaging.rocketmq.client_id" semantic conventions. It represents the +// unique identifier for each client. +func MessagingRocketmqClientID(val string) attribute.KeyValue { + return MessagingRocketmqClientIDKey.String(val) +} + +// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic +// conventions. It represents the timestamp in milliseconds that the delay +// message is expected to be delivered to consumer. +func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue { + return MessagingRocketmqMessageDeliveryTimestampKey.Int(val) +} + +// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue +// conforming to the "messaging.rocketmq.message.delay_time_level" semantic +// conventions. It represents the delay time level for delay message, which +// determines the message delay time. +func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue { + return MessagingRocketmqMessageDelayTimeLevelKey.Int(val) +} + +// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.group" semantic conventions. It represents +// the it is essential for FIFO message. Messages that belong to the same +// message group are always processed one by one within the same consumer +// group. +func MessagingRocketmqMessageGroup(val string) attribute.KeyValue { + return MessagingRocketmqMessageGroupKey.String(val) +} + +// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.tag" semantic conventions. It represents the +// secondary classifier of message besides topic. +func MessagingRocketmqMessageTag(val string) attribute.KeyValue { + return MessagingRocketmqMessageTagKey.String(val) +} + +// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to +// the "messaging.rocketmq.message.keys" semantic conventions. It represents +// the key(s) of message, another way to mark message besides message id. +func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue { + return MessagingRocketmqMessageKeysKey.StringSlice(val) +} + +// Describes user-agent attributes. +const ( + // UserAgentOriginalKey is the attribute Key conforming to the + // "user_agent.original" semantic conventions. It represents the value of + // the [HTTP + // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) + // header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + UserAgentOriginalKey = attribute.Key("user_agent.original") +) + +// UserAgentOriginal returns an attribute KeyValue conforming to the +// "user_agent.original" semantic conventions. It represents the value of the +// [HTTP +// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent) +// header sent by the client. +func UserAgentOriginal(val string) attribute.KeyValue { + return UserAgentOriginalKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go new file mode 100644 index 000000000..359c5a696 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.20.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go new file mode 100644 index 000000000..8ac9350d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/event.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// This semantic convention defines the attributes used to represent a feature +// flag evaluation as an event. +const ( + // FeatureFlagKeyKey is the attribute Key conforming to the + // "feature_flag.key" semantic conventions. It represents the unique + // identifier of the feature flag. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'logo-color' + FeatureFlagKeyKey = attribute.Key("feature_flag.key") + + // FeatureFlagProviderNameKey is the attribute Key conforming to the + // "feature_flag.provider_name" semantic conventions. It represents the + // name of the service provider that performs the flag evaluation. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'Flag Manager' + FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name") + + // FeatureFlagVariantKey is the attribute Key conforming to the + // "feature_flag.variant" semantic conventions. It represents the sHOULD be + // a semantic identifier for a value. If one is unavailable, a stringified + // version of the value can be used. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'red', 'true', 'on' + // Note: A semantic identifier, commonly referred to as a variant, provides + // a means + // for referring to a value without including the value itself. This can + // provide additional context for understanding the meaning behind a value. + // For example, the variant `red` maybe be used for the value `#c05543`. + // + // A stringified version of the value can be used in situations where a + // semantic identifier is unavailable. String representation of the value + // should be determined by the implementer. + FeatureFlagVariantKey = attribute.Key("feature_flag.variant") +) + +// FeatureFlagKey returns an attribute KeyValue conforming to the +// "feature_flag.key" semantic conventions. It represents the unique identifier +// of the feature flag. +func FeatureFlagKey(val string) attribute.KeyValue { + return FeatureFlagKeyKey.String(val) +} + +// FeatureFlagProviderName returns an attribute KeyValue conforming to the +// "feature_flag.provider_name" semantic conventions. It represents the name of +// the service provider that performs the flag evaluation. +func FeatureFlagProviderName(val string) attribute.KeyValue { + return FeatureFlagProviderNameKey.String(val) +} + +// FeatureFlagVariant returns an attribute KeyValue conforming to the +// "feature_flag.variant" semantic conventions. It represents the sHOULD be a +// semantic identifier for a value. If one is unavailable, a stringified +// version of the value can be used. +func FeatureFlagVariant(val string) attribute.KeyValue { + return FeatureFlagVariantKey.String(val) +} + +// RPC received/sent message. +const ( + // MessageTypeKey is the attribute Key conforming to the "message.type" + // semantic conventions. It represents the whether this is a received or + // sent message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + + // MessageIDKey is the attribute Key conforming to the "message.id" + // semantic conventions. It represents the mUST be calculated as two + // different counters starting from `1` one for sent messages and one for + // received message. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + + // MessageCompressedSizeKey is the attribute Key conforming to the + // "message.compressed_size" semantic conventions. It represents the + // compressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // MessageUncompressedSizeKey is the attribute Key conforming to the + // "message.uncompressed_size" semantic conventions. It represents the + // uncompressed size of the message in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) + +// MessageID returns an attribute KeyValue conforming to the "message.id" +// semantic conventions. It represents the mUST be calculated as two different +// counters starting from `1` one for sent messages and one for received +// message. +func MessageID(val int) attribute.KeyValue { + return MessageIDKey.Int(val) +} + +// MessageCompressedSize returns an attribute KeyValue conforming to the +// "message.compressed_size" semantic conventions. It represents the compressed +// size of the message in bytes. +func MessageCompressedSize(val int) attribute.KeyValue { + return MessageCompressedSizeKey.Int(val) +} + +// MessageUncompressedSize returns an attribute KeyValue conforming to the +// "message.uncompressed_size" semantic conventions. It represents the +// uncompressed size of the message in bytes. +func MessageUncompressedSize(val int) attribute.KeyValue { + return MessageUncompressedSizeKey.Int(val) +} + +// The attributes used to report a single exception associated with a span. +const ( + // ExceptionEscapedKey is the attribute Key conforming to the + // "exception.escaped" semantic conventions. It represents the sHOULD be + // set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of + // a span, + // if that span is ended while the exception is still logically "in + // flight". + // This may be actually "in flight" in some languages (e.g. if the + // exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most + // languages. + // + // It is usually not possible to determine at the point where an exception + // is thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending + // the span, + // as done in the [example above](#recording-an-exception). + // + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// ExceptionEscaped returns an attribute KeyValue conforming to the +// "exception.escaped" semantic conventions. It represents the sHOULD be set to +// true if the exception event is recorded at a point where it is known that +// the exception is escaping the scope of the span. +func ExceptionEscaped(val bool) attribute.KeyValue { + return ExceptionEscapedKey.Bool(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go new file mode 100644 index 000000000..09ff4dfdb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go new file mode 100644 index 000000000..342aede95 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go new file mode 100644 index 000000000..a2b906742 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/resource.go @@ -0,0 +1,2071 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is +// running. The `browser.*` attributes MUST be used only for resources that +// represent applications running in a web browser (regardless of whether +// running on a mobile or desktop device). +const ( + // BrowserBrandsKey is the attribute Key conforming to the "browser.brands" + // semantic conventions. It represents the array of brand name and version + // separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + + // BrowserPlatformKey is the attribute Key conforming to the + // "browser.platform" semantic conventions. It represents the platform on + // which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute + // SHOULD be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client + // Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in + // the [`os.type` and `os.name` attributes](./os.md). However, for + // consistency, the values in the `browser.platform` attribute should + // capture the exact value that the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + + // BrowserMobileKey is the attribute Key conforming to the "browser.mobile" + // semantic conventions. It represents a boolean that is true if the + // browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute + // SHOULD be left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + + // BrowserLanguageKey is the attribute Key conforming to the + // "browser.language" semantic conventions. It represents the preferred + // language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// BrowserBrands returns an attribute KeyValue conforming to the +// "browser.brands" semantic conventions. It represents the array of brand name +// and version separated by a space +func BrowserBrands(val ...string) attribute.KeyValue { + return BrowserBrandsKey.StringSlice(val) +} + +// BrowserPlatform returns an attribute KeyValue conforming to the +// "browser.platform" semantic conventions. It represents the platform on which +// the browser is running +func BrowserPlatform(val string) attribute.KeyValue { + return BrowserPlatformKey.String(val) +} + +// BrowserMobile returns an attribute KeyValue conforming to the +// "browser.mobile" semantic conventions. It represents a boolean that is true +// if the browser is running on a mobile device +func BrowserMobile(val bool) attribute.KeyValue { + return BrowserMobileKey.Bool(val) +} + +// BrowserLanguage returns an attribute KeyValue conforming to the +// "browser.language" semantic conventions. It represents the preferred +// language of the user using the browser +func BrowserLanguage(val string) attribute.KeyValue { + return BrowserLanguageKey.String(val) +} + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // CloudProviderKey is the attribute Key conforming to the "cloud.provider" + // semantic conventions. It represents the name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + + // CloudAccountIDKey is the attribute Key conforming to the + // "cloud.account.id" semantic conventions. It represents the cloud account + // ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + + // CloudRegionKey is the attribute Key conforming to the "cloud.region" + // semantic conventions. It represents the geographical region the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for + // example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure + // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/), + // [Google Cloud regions](https://cloud.google.com/about/locations), or + // [Tencent Cloud + // regions](https://www.tencentcloud.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + + // CloudResourceIDKey is the attribute Key conforming to the + // "cloud.resource_id" semantic conventions. It represents the cloud + // provider-specific native identifier of the monitored cloud resource + // (e.g. an + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // on AWS, a [fully qualified resource + // ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // on Azure, a [full resource + // name](https://cloud.google.com/apis/design/resource_names#full_resource_name) + // on GCP) + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function', + // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID', + // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/' + // Note: On some cloud providers, it may not be possible to determine the + // full ID at startup, + // so it may be necessary to set `cloud.resource_id` as a span attribute + // instead. + // + // The exact value to use for `cloud.resource_id` depends on the cloud + // provider. + // The following well-known definitions MUST be used if you set this + // attribute and they apply: + // + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias + // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html) + // with the resolved function version, as the same runtime instance may + // be invokable with + // multiple different aliases. + // * **GCP:** The [URI of the + // resource](https://cloud.google.com/iam/docs/full-resource-names) + // * **Azure:** The [Fully Qualified Resource + // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id) + // of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider. + CloudResourceIDKey = attribute.Key("cloud.resource_id") + + // CloudAvailabilityZoneKey is the attribute Key conforming to the + // "cloud.availability_zone" semantic conventions. It represents the cloud + // regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the + // resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google + // Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + + // CloudPlatformKey is the attribute Key conforming to the "cloud.platform" + // semantic conventions. It represents the cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Heroku Platform as a Service + CloudProviderHeroku = CloudProviderKey.String("heroku") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// CloudAccountID returns an attribute KeyValue conforming to the +// "cloud.account.id" semantic conventions. It represents the cloud account ID +// the resource is assigned to. +func CloudAccountID(val string) attribute.KeyValue { + return CloudAccountIDKey.String(val) +} + +// CloudRegion returns an attribute KeyValue conforming to the +// "cloud.region" semantic conventions. It represents the geographical region +// the resource is running. +func CloudRegion(val string) attribute.KeyValue { + return CloudRegionKey.String(val) +} + +// CloudResourceID returns an attribute KeyValue conforming to the +// "cloud.resource_id" semantic conventions. It represents the cloud +// provider-specific native identifier of the monitored cloud resource (e.g. an +// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) +// on AWS, a [fully qualified resource +// ID](https://learn.microsoft.com/en-us/rest/api/resources/resources/get-by-id) +// on Azure, a [full resource +// name](https://cloud.google.com/apis/design/resource_names#full_resource_name) +// on GCP) +func CloudResourceID(val string) attribute.KeyValue { + return CloudResourceIDKey.String(val) +} + +// CloudAvailabilityZone returns an attribute KeyValue conforming to the +// "cloud.availability_zone" semantic conventions. It represents the cloud +// regions often have multiple, isolated locations known as zones to increase +// availability. Availability zone represents the zone where the resource is +// running. +func CloudAvailabilityZone(val string) attribute.KeyValue { + return CloudAvailabilityZoneKey.String(val) +} + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // AWSECSContainerARNKey is the attribute Key conforming to the + // "aws.ecs.container.arn" semantic conventions. It represents the Amazon + // Resource Name (ARN) of an [ECS container + // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + + // AWSECSClusterARNKey is the attribute Key conforming to the + // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an + // [ECS + // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + + // AWSECSLaunchtypeKey is the attribute Key conforming to the + // "aws.ecs.launchtype" semantic conventions. It represents the [launch + // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html) + // for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + + // AWSECSTaskARNKey is the attribute Key conforming to the + // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an + // [ECS task + // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + + // AWSECSTaskFamilyKey is the attribute Key conforming to the + // "aws.ecs.task.family" semantic conventions. It represents the task + // definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + + // AWSECSTaskRevisionKey is the attribute Key conforming to the + // "aws.ecs.task.revision" semantic conventions. It represents the revision + // for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// AWSECSContainerARN returns an attribute KeyValue conforming to the +// "aws.ecs.container.arn" semantic conventions. It represents the Amazon +// Resource Name (ARN) of an [ECS container +// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). +func AWSECSContainerARN(val string) attribute.KeyValue { + return AWSECSContainerARNKey.String(val) +} + +// AWSECSClusterARN returns an attribute KeyValue conforming to the +// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS +// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html). +func AWSECSClusterARN(val string) attribute.KeyValue { + return AWSECSClusterARNKey.String(val) +} + +// AWSECSTaskARN returns an attribute KeyValue conforming to the +// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS +// task +// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html). +func AWSECSTaskARN(val string) attribute.KeyValue { + return AWSECSTaskARNKey.String(val) +} + +// AWSECSTaskFamily returns an attribute KeyValue conforming to the +// "aws.ecs.task.family" semantic conventions. It represents the task +// definition family this task definition is a member of. +func AWSECSTaskFamily(val string) attribute.KeyValue { + return AWSECSTaskFamilyKey.String(val) +} + +// AWSECSTaskRevision returns an attribute KeyValue conforming to the +// "aws.ecs.task.revision" semantic conventions. It represents the revision for +// this task definition. +func AWSECSTaskRevision(val string) attribute.KeyValue { + return AWSECSTaskRevisionKey.String(val) +} + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // AWSEKSClusterARNKey is the attribute Key conforming to the + // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an + // EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// AWSEKSClusterARN returns an attribute KeyValue conforming to the +// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS +// cluster. +func AWSEKSClusterARN(val string) attribute.KeyValue { + return AWSEKSClusterARNKey.String(val) +} + +// Resources specific to Amazon Web Services. +const ( + // AWSLogGroupNamesKey is the attribute Key conforming to the + // "aws.log.group.names" semantic conventions. It represents the name(s) of + // the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like + // multi-container applications, where a single application has sidecar + // containers, and each write to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + + // AWSLogGroupARNsKey is the attribute Key conforming to the + // "aws.log.group.arns" semantic conventions. It represents the Amazon + // Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + + // AWSLogStreamNamesKey is the attribute Key conforming to the + // "aws.log.stream.names" semantic conventions. It represents the name(s) + // of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + + // AWSLogStreamARNsKey is the attribute Key conforming to the + // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of + // the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format). + // One log group can contain several log streams, so these ARNs necessarily + // identify both a log group and a log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// AWSLogGroupNames returns an attribute KeyValue conforming to the +// "aws.log.group.names" semantic conventions. It represents the name(s) of the +// AWS log group(s) an application is writing to. +func AWSLogGroupNames(val ...string) attribute.KeyValue { + return AWSLogGroupNamesKey.StringSlice(val) +} + +// AWSLogGroupARNs returns an attribute KeyValue conforming to the +// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource +// Name(s) (ARN) of the AWS log group(s). +func AWSLogGroupARNs(val ...string) attribute.KeyValue { + return AWSLogGroupARNsKey.StringSlice(val) +} + +// AWSLogStreamNames returns an attribute KeyValue conforming to the +// "aws.log.stream.names" semantic conventions. It represents the name(s) of +// the AWS log stream(s) an application is writing to. +func AWSLogStreamNames(val ...string) attribute.KeyValue { + return AWSLogStreamNamesKey.StringSlice(val) +} + +// AWSLogStreamARNs returns an attribute KeyValue conforming to the +// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the +// AWS log stream(s). +func AWSLogStreamARNs(val ...string) attribute.KeyValue { + return AWSLogStreamARNsKey.StringSlice(val) +} + +// Heroku dyno metadata +const ( + // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the + // "heroku.release.creation_timestamp" semantic conventions. It represents + // the time and date the release was created + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2022-10-23T18:00:42Z' + HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp") + + // HerokuReleaseCommitKey is the attribute Key conforming to the + // "heroku.release.commit" semantic conventions. It represents the commit + // hash for the current release + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec' + HerokuReleaseCommitKey = attribute.Key("heroku.release.commit") + + // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id" + // semantic conventions. It represents the unique identifier for the + // application + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da' + HerokuAppIDKey = attribute.Key("heroku.app.id") +) + +// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming +// to the "heroku.release.creation_timestamp" semantic conventions. It +// represents the time and date the release was created +func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue { + return HerokuReleaseCreationTimestampKey.String(val) +} + +// HerokuReleaseCommit returns an attribute KeyValue conforming to the +// "heroku.release.commit" semantic conventions. It represents the commit hash +// for the current release +func HerokuReleaseCommit(val string) attribute.KeyValue { + return HerokuReleaseCommitKey.String(val) +} + +// HerokuAppID returns an attribute KeyValue conforming to the +// "heroku.app.id" semantic conventions. It represents the unique identifier +// for the application +func HerokuAppID(val string) attribute.KeyValue { + return HerokuAppIDKey.String(val) +} + +// A container instance. +const ( + // ContainerNameKey is the attribute Key conforming to the "container.name" + // semantic conventions. It represents the container name used by container + // runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + + // ContainerIDKey is the attribute Key conforming to the "container.id" + // semantic conventions. It represents the container ID. Usually a UUID, as + // for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container-identification). + // The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + + // ContainerRuntimeKey is the attribute Key conforming to the + // "container.runtime" semantic conventions. It represents the container + // runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + + // ContainerImageNameKey is the attribute Key conforming to the + // "container.image.name" semantic conventions. It represents the name of + // the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + + // ContainerImageTagKey is the attribute Key conforming to the + // "container.image.tag" semantic conventions. It represents the container + // image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// ContainerName returns an attribute KeyValue conforming to the +// "container.name" semantic conventions. It represents the container name used +// by container runtime. +func ContainerName(val string) attribute.KeyValue { + return ContainerNameKey.String(val) +} + +// ContainerID returns an attribute KeyValue conforming to the +// "container.id" semantic conventions. It represents the container ID. Usually +// a UUID, as for example used to [identify Docker +// containers](https://docs.docker.com/engine/reference/run/#container-identification). +// The UUID might be abbreviated. +func ContainerID(val string) attribute.KeyValue { + return ContainerIDKey.String(val) +} + +// ContainerRuntime returns an attribute KeyValue conforming to the +// "container.runtime" semantic conventions. It represents the container +// runtime managing this container. +func ContainerRuntime(val string) attribute.KeyValue { + return ContainerRuntimeKey.String(val) +} + +// ContainerImageName returns an attribute KeyValue conforming to the +// "container.image.name" semantic conventions. It represents the name of the +// image the container was built on. +func ContainerImageName(val string) attribute.KeyValue { + return ContainerImageNameKey.String(val) +} + +// ContainerImageTag returns an attribute KeyValue conforming to the +// "container.image.tag" semantic conventions. It represents the container +// image tag. +func ContainerImageTag(val string) attribute.KeyValue { + return ContainerImageTagKey.String(val) +} + +// The software deployment. +const ( + // DeploymentEnvironmentKey is the attribute Key conforming to the + // "deployment.environment" semantic conventions. It represents the name of + // the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// DeploymentEnvironment returns an attribute KeyValue conforming to the +// "deployment.environment" semantic conventions. It represents the name of the +// [deployment +// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka +// deployment tier). +func DeploymentEnvironment(val string) attribute.KeyValue { + return DeploymentEnvironmentKey.String(val) +} + +// The device on which the process represented by this resource is running. +const ( + // DeviceIDKey is the attribute Key conforming to the "device.id" semantic + // conventions. It represents a unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values + // outlined below. This value is not an advertising identifier and MUST NOT + // be used as such. On iOS (Swift or Objective-C), this value MUST be equal + // to the [vendor + // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor). + // On Android (Java or Kotlin), this value MUST be equal to the Firebase + // Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on + // best practices and exact implementation details. Caution should be taken + // when storing personal data or anything which can identify a user. GDPR + // and data protection laws may apply, ensure you do your own due + // diligence. + DeviceIDKey = attribute.Key("device.id") + + // DeviceModelIdentifierKey is the attribute Key conforming to the + // "device.model.identifier" semantic conventions. It represents the model + // identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version + // of the model identifier rather than the market or consumer-friendly name + // of the device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + + // DeviceModelNameKey is the attribute Key conforming to the + // "device.model.name" semantic conventions. It represents the marketing + // name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of + // the device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + + // DeviceManufacturerKey is the attribute Key conforming to the + // "device.manufacturer" semantic conventions. It represents the name of + // the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// DeviceID returns an attribute KeyValue conforming to the "device.id" +// semantic conventions. It represents a unique identifier representing the +// device +func DeviceID(val string) attribute.KeyValue { + return DeviceIDKey.String(val) +} + +// DeviceModelIdentifier returns an attribute KeyValue conforming to the +// "device.model.identifier" semantic conventions. It represents the model +// identifier for the device +func DeviceModelIdentifier(val string) attribute.KeyValue { + return DeviceModelIdentifierKey.String(val) +} + +// DeviceModelName returns an attribute KeyValue conforming to the +// "device.model.name" semantic conventions. It represents the marketing name +// for the device model +func DeviceModelName(val string) attribute.KeyValue { + return DeviceModelNameKey.String(val) +} + +// DeviceManufacturer returns an attribute KeyValue conforming to the +// "device.manufacturer" semantic conventions. It represents the name of the +// device manufacturer +func DeviceManufacturer(val string) attribute.KeyValue { + return DeviceManufacturerKey.String(val) +} + +// A serverless instance. +const ( + // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic + // conventions. It represents the name of the single function that this + // runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the + // FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes) + // span attributes). + // + // For some cloud providers, the above definition is ambiguous. The + // following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud + // providers/products: + // + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `cloud.resource_id` attribute). + FaaSNameKey = attribute.Key("faas.name") + + // FaaSVersionKey is the attribute Key conforming to the "faas.version" + // semantic conventions. It represents the immutable version of the + // function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + // + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + + // FaaSInstanceKey is the attribute Key conforming to the "faas.instance" + // semantic conventions. It represents the execution environment ID as a + // string, that will be potentially reused for other invocations to the + // same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + + // FaaSMaxMemoryKey is the attribute Key conforming to the + // "faas.max_memory" semantic conventions. It represents the amount of + // memory available to the serverless function converted to Bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 134217728 + // Note: It's recommended to set this attribute since e.g. too little + // memory can easily stop a Java AWS Lambda function from working + // correctly. On AWS Lambda, the environment variable + // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must + // be multiplied by 1,048,576). + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// FaaSName returns an attribute KeyValue conforming to the "faas.name" +// semantic conventions. It represents the name of the single function that +// this runtime instance executes. +func FaaSName(val string) attribute.KeyValue { + return FaaSNameKey.String(val) +} + +// FaaSVersion returns an attribute KeyValue conforming to the +// "faas.version" semantic conventions. It represents the immutable version of +// the function being executed. +func FaaSVersion(val string) attribute.KeyValue { + return FaaSVersionKey.String(val) +} + +// FaaSInstance returns an attribute KeyValue conforming to the +// "faas.instance" semantic conventions. It represents the execution +// environment ID as a string, that will be potentially reused for other +// invocations to the same function/function version. +func FaaSInstance(val string) attribute.KeyValue { + return FaaSInstanceKey.String(val) +} + +// FaaSMaxMemory returns an attribute KeyValue conforming to the +// "faas.max_memory" semantic conventions. It represents the amount of memory +// available to the serverless function converted to Bytes. +func FaaSMaxMemory(val int) attribute.KeyValue { + return FaaSMaxMemoryKey.Int(val) +} + +// A host is defined as a general computing instance. +const ( + // HostIDKey is the attribute Key conforming to the "host.id" semantic + // conventions. It represents the unique host ID. For Cloud, this must be + // the instance_id assigned by the cloud provider. For non-containerized + // systems, this should be the `machine-id`. See the table below for the + // sources to use to determine the `machine-id` based on operating system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + + // HostNameKey is the attribute Key conforming to the "host.name" semantic + // conventions. It represents the name of the host. On Unix systems, it may + // contain what the hostname command returns, or the fully qualified + // hostname, or another name specified by the user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + + // HostTypeKey is the attribute Key conforming to the "host.type" semantic + // conventions. It represents the type of host. For Cloud, this must be the + // machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + + // HostArchKey is the attribute Key conforming to the "host.arch" semantic + // conventions. It represents the CPU architecture the host system is + // running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + + // HostImageNameKey is the attribute Key conforming to the + // "host.image.name" semantic conventions. It represents the name of the VM + // image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + + // HostImageIDKey is the attribute Key conforming to the "host.image.id" + // semantic conventions. It represents the vM image ID. For Cloud, this + // value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + + // HostImageVersionKey is the attribute Key conforming to the + // "host.image.version" semantic conventions. It represents the version + // string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// HostID returns an attribute KeyValue conforming to the "host.id" semantic +// conventions. It represents the unique host ID. For Cloud, this must be the +// instance_id assigned by the cloud provider. For non-containerized systems, +// this should be the `machine-id`. See the table below for the sources to use +// to determine the `machine-id` based on operating system. +func HostID(val string) attribute.KeyValue { + return HostIDKey.String(val) +} + +// HostName returns an attribute KeyValue conforming to the "host.name" +// semantic conventions. It represents the name of the host. On Unix systems, +// it may contain what the hostname command returns, or the fully qualified +// hostname, or another name specified by the user. +func HostName(val string) attribute.KeyValue { + return HostNameKey.String(val) +} + +// HostType returns an attribute KeyValue conforming to the "host.type" +// semantic conventions. It represents the type of host. For Cloud, this must +// be the machine type. +func HostType(val string) attribute.KeyValue { + return HostTypeKey.String(val) +} + +// HostImageName returns an attribute KeyValue conforming to the +// "host.image.name" semantic conventions. It represents the name of the VM +// image or OS install the host was instantiated from. +func HostImageName(val string) attribute.KeyValue { + return HostImageNameKey.String(val) +} + +// HostImageID returns an attribute KeyValue conforming to the +// "host.image.id" semantic conventions. It represents the vM image ID. For +// Cloud, this value is from the provider. +func HostImageID(val string) attribute.KeyValue { + return HostImageIDKey.String(val) +} + +// HostImageVersion returns an attribute KeyValue conforming to the +// "host.image.version" semantic conventions. It represents the version string +// of the VM image as defined in [Version +// Attributes](README.md#version-attributes). +func HostImageVersion(val string) attribute.KeyValue { + return HostImageVersionKey.String(val) +} + +// A Kubernetes Cluster. +const ( + // K8SClusterNameKey is the attribute Key conforming to the + // "k8s.cluster.name" semantic conventions. It represents the name of the + // cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// K8SClusterName returns an attribute KeyValue conforming to the +// "k8s.cluster.name" semantic conventions. It represents the name of the +// cluster. +func K8SClusterName(val string) attribute.KeyValue { + return K8SClusterNameKey.String(val) +} + +// A Kubernetes Node object. +const ( + // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name" + // semantic conventions. It represents the name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + + // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" + // semantic conventions. It represents the UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// K8SNodeName returns an attribute KeyValue conforming to the +// "k8s.node.name" semantic conventions. It represents the name of the Node. +func K8SNodeName(val string) attribute.KeyValue { + return K8SNodeNameKey.String(val) +} + +// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid" +// semantic conventions. It represents the UID of the Node. +func K8SNodeUID(val string) attribute.KeyValue { + return K8SNodeUIDKey.String(val) +} + +// A Kubernetes Namespace. +const ( + // K8SNamespaceNameKey is the attribute Key conforming to the + // "k8s.namespace.name" semantic conventions. It represents the name of the + // namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// K8SNamespaceName returns an attribute KeyValue conforming to the +// "k8s.namespace.name" semantic conventions. It represents the name of the +// namespace that the pod is running in. +func K8SNamespaceName(val string) attribute.KeyValue { + return K8SNamespaceNameKey.String(val) +} + +// A Kubernetes Pod object. +const ( + // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" + // semantic conventions. It represents the UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + + // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" + // semantic conventions. It represents the name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid" +// semantic conventions. It represents the UID of the Pod. +func K8SPodUID(val string) attribute.KeyValue { + return K8SPodUIDKey.String(val) +} + +// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name" +// semantic conventions. It represents the name of the Pod. +func K8SPodName(val string) attribute.KeyValue { + return K8SPodNameKey.String(val) +} + +// A container in a +// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // K8SContainerNameKey is the attribute Key conforming to the + // "k8s.container.name" semantic conventions. It represents the name of the + // Container from Pod specification, must be unique within a Pod. Container + // runtime usually uses different globally unique name (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + + // K8SContainerRestartCountKey is the attribute Key conforming to the + // "k8s.container.restart_count" semantic conventions. It represents the + // number of times the container was restarted. This attribute can be used + // to identify a particular container (running or stopped) within a + // container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// K8SContainerName returns an attribute KeyValue conforming to the +// "k8s.container.name" semantic conventions. It represents the name of the +// Container from Pod specification, must be unique within a Pod. Container +// runtime usually uses different globally unique name (`container.name`). +func K8SContainerName(val string) attribute.KeyValue { + return K8SContainerNameKey.String(val) +} + +// K8SContainerRestartCount returns an attribute KeyValue conforming to the +// "k8s.container.restart_count" semantic conventions. It represents the number +// of times the container was restarted. This attribute can be used to identify +// a particular container (running or stopped) within a container spec. +func K8SContainerRestartCount(val int) attribute.KeyValue { + return K8SContainerRestartCountKey.Int(val) +} + +// A Kubernetes ReplicaSet object. +const ( + // K8SReplicaSetUIDKey is the attribute Key conforming to the + // "k8s.replicaset.uid" semantic conventions. It represents the UID of the + // ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + + // K8SReplicaSetNameKey is the attribute Key conforming to the + // "k8s.replicaset.name" semantic conventions. It represents the name of + // the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// K8SReplicaSetUID returns an attribute KeyValue conforming to the +// "k8s.replicaset.uid" semantic conventions. It represents the UID of the +// ReplicaSet. +func K8SReplicaSetUID(val string) attribute.KeyValue { + return K8SReplicaSetUIDKey.String(val) +} + +// K8SReplicaSetName returns an attribute KeyValue conforming to the +// "k8s.replicaset.name" semantic conventions. It represents the name of the +// ReplicaSet. +func K8SReplicaSetName(val string) attribute.KeyValue { + return K8SReplicaSetNameKey.String(val) +} + +// A Kubernetes Deployment object. +const ( + // K8SDeploymentUIDKey is the attribute Key conforming to the + // "k8s.deployment.uid" semantic conventions. It represents the UID of the + // Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + + // K8SDeploymentNameKey is the attribute Key conforming to the + // "k8s.deployment.name" semantic conventions. It represents the name of + // the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// K8SDeploymentUID returns an attribute KeyValue conforming to the +// "k8s.deployment.uid" semantic conventions. It represents the UID of the +// Deployment. +func K8SDeploymentUID(val string) attribute.KeyValue { + return K8SDeploymentUIDKey.String(val) +} + +// K8SDeploymentName returns an attribute KeyValue conforming to the +// "k8s.deployment.name" semantic conventions. It represents the name of the +// Deployment. +func K8SDeploymentName(val string) attribute.KeyValue { + return K8SDeploymentNameKey.String(val) +} + +// A Kubernetes StatefulSet object. +const ( + // K8SStatefulSetUIDKey is the attribute Key conforming to the + // "k8s.statefulset.uid" semantic conventions. It represents the UID of the + // StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + + // K8SStatefulSetNameKey is the attribute Key conforming to the + // "k8s.statefulset.name" semantic conventions. It represents the name of + // the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// K8SStatefulSetUID returns an attribute KeyValue conforming to the +// "k8s.statefulset.uid" semantic conventions. It represents the UID of the +// StatefulSet. +func K8SStatefulSetUID(val string) attribute.KeyValue { + return K8SStatefulSetUIDKey.String(val) +} + +// K8SStatefulSetName returns an attribute KeyValue conforming to the +// "k8s.statefulset.name" semantic conventions. It represents the name of the +// StatefulSet. +func K8SStatefulSetName(val string) attribute.KeyValue { + return K8SStatefulSetNameKey.String(val) +} + +// A Kubernetes DaemonSet object. +const ( + // K8SDaemonSetUIDKey is the attribute Key conforming to the + // "k8s.daemonset.uid" semantic conventions. It represents the UID of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + + // K8SDaemonSetNameKey is the attribute Key conforming to the + // "k8s.daemonset.name" semantic conventions. It represents the name of the + // DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// K8SDaemonSetUID returns an attribute KeyValue conforming to the +// "k8s.daemonset.uid" semantic conventions. It represents the UID of the +// DaemonSet. +func K8SDaemonSetUID(val string) attribute.KeyValue { + return K8SDaemonSetUIDKey.String(val) +} + +// K8SDaemonSetName returns an attribute KeyValue conforming to the +// "k8s.daemonset.name" semantic conventions. It represents the name of the +// DaemonSet. +func K8SDaemonSetName(val string) attribute.KeyValue { + return K8SDaemonSetNameKey.String(val) +} + +// A Kubernetes Job object. +const ( + // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" + // semantic conventions. It represents the UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + + // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" + // semantic conventions. It represents the name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid" +// semantic conventions. It represents the UID of the Job. +func K8SJobUID(val string) attribute.KeyValue { + return K8SJobUIDKey.String(val) +} + +// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name" +// semantic conventions. It represents the name of the Job. +func K8SJobName(val string) attribute.KeyValue { + return K8SJobNameKey.String(val) +} + +// A Kubernetes CronJob object. +const ( + // K8SCronJobUIDKey is the attribute Key conforming to the + // "k8s.cronjob.uid" semantic conventions. It represents the UID of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + + // K8SCronJobNameKey is the attribute Key conforming to the + // "k8s.cronjob.name" semantic conventions. It represents the name of the + // CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// K8SCronJobUID returns an attribute KeyValue conforming to the +// "k8s.cronjob.uid" semantic conventions. It represents the UID of the +// CronJob. +func K8SCronJobUID(val string) attribute.KeyValue { + return K8SCronJobUIDKey.String(val) +} + +// K8SCronJobName returns an attribute KeyValue conforming to the +// "k8s.cronjob.name" semantic conventions. It represents the name of the +// CronJob. +func K8SCronJobName(val string) attribute.KeyValue { + return K8SCronJobNameKey.String(val) +} + +// The operating system (OS) on which the process represented by this resource +// is running. +const ( + // OSTypeKey is the attribute Key conforming to the "os.type" semantic + // conventions. It represents the operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + + // OSDescriptionKey is the attribute Key conforming to the "os.description" + // semantic conventions. It represents the human readable (not intended to + // be parsed) OS version information, like e.g. reported by `ver` or + // `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 + // LTS' + OSDescriptionKey = attribute.Key("os.description") + + // OSNameKey is the attribute Key conforming to the "os.name" semantic + // conventions. It represents the human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + + // OSVersionKey is the attribute Key conforming to the "os.version" + // semantic conventions. It represents the version string of the operating + // system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// OSDescription returns an attribute KeyValue conforming to the +// "os.description" semantic conventions. It represents the human readable (not +// intended to be parsed) OS version information, like e.g. reported by `ver` +// or `lsb_release -a` commands. +func OSDescription(val string) attribute.KeyValue { + return OSDescriptionKey.String(val) +} + +// OSName returns an attribute KeyValue conforming to the "os.name" semantic +// conventions. It represents the human readable operating system name. +func OSName(val string) attribute.KeyValue { + return OSNameKey.String(val) +} + +// OSVersion returns an attribute KeyValue conforming to the "os.version" +// semantic conventions. It represents the version string of the operating +// system as defined in [Version +// Attributes](../../resource/semantic_conventions/README.md#version-attributes). +func OSVersion(val string) attribute.KeyValue { + return OSVersionKey.String(val) +} + +// An operating system process. +const ( + // ProcessPIDKey is the attribute Key conforming to the "process.pid" + // semantic conventions. It represents the process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + + // ProcessParentPIDKey is the attribute Key conforming to the + // "process.parent_pid" semantic conventions. It represents the parent + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + + // ProcessExecutableNameKey is the attribute Key conforming to the + // "process.executable.name" semantic conventions. It represents the name + // of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name + // of `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + + // ProcessExecutablePathKey is the attribute Key conforming to the + // "process.executable.path" semantic conventions. It represents the full + // path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + + // ProcessCommandKey is the attribute Key conforming to the + // "process.command" semantic conventions. It represents the command used + // to launch the process (i.e. the command name). On Linux based systems, + // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can + // be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + + // ProcessCommandLineKey is the attribute Key conforming to the + // "process.command_line" semantic conventions. It represents the full + // command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. + // Do not set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + + // ProcessCommandArgsKey is the attribute Key conforming to the + // "process.command_args" semantic conventions. It represents the all the + // command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited + // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, + // this would be the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes + // below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + + // ProcessOwnerKey is the attribute Key conforming to the "process.owner" + // semantic conventions. It represents the username of the user that owns + // the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// ProcessPID returns an attribute KeyValue conforming to the "process.pid" +// semantic conventions. It represents the process identifier (PID). +func ProcessPID(val int) attribute.KeyValue { + return ProcessPIDKey.Int(val) +} + +// ProcessParentPID returns an attribute KeyValue conforming to the +// "process.parent_pid" semantic conventions. It represents the parent Process +// identifier (PID). +func ProcessParentPID(val int) attribute.KeyValue { + return ProcessParentPIDKey.Int(val) +} + +// ProcessExecutableName returns an attribute KeyValue conforming to the +// "process.executable.name" semantic conventions. It represents the name of +// the process executable. On Linux based systems, can be set to the `Name` in +// `proc/[pid]/status`. On Windows, can be set to the base name of +// `GetProcessImageFileNameW`. +func ProcessExecutableName(val string) attribute.KeyValue { + return ProcessExecutableNameKey.String(val) +} + +// ProcessExecutablePath returns an attribute KeyValue conforming to the +// "process.executable.path" semantic conventions. It represents the full path +// to the process executable. On Linux based systems, can be set to the target +// of `proc/[pid]/exe`. On Windows, can be set to the result of +// `GetProcessImageFileNameW`. +func ProcessExecutablePath(val string) attribute.KeyValue { + return ProcessExecutablePathKey.String(val) +} + +// ProcessCommand returns an attribute KeyValue conforming to the +// "process.command" semantic conventions. It represents the command used to +// launch the process (i.e. the command name). On Linux based systems, can be +// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to +// the first parameter extracted from `GetCommandLineW`. +func ProcessCommand(val string) attribute.KeyValue { + return ProcessCommandKey.String(val) +} + +// ProcessCommandLine returns an attribute KeyValue conforming to the +// "process.command_line" semantic conventions. It represents the full command +// used to launch the process as a single string representing the full command. +// On Windows, can be set to the result of `GetCommandLineW`. Do not set this +// if you have to assemble it just for monitoring; use `process.command_args` +// instead. +func ProcessCommandLine(val string) attribute.KeyValue { + return ProcessCommandLineKey.String(val) +} + +// ProcessCommandArgs returns an attribute KeyValue conforming to the +// "process.command_args" semantic conventions. It represents the all the +// command arguments (including the command/executable itself) as received by +// the process. On Linux-based systems (and some other Unixoid systems +// supporting procfs), can be set according to the list of null-delimited +// strings extracted from `proc/[pid]/cmdline`. For libc-based executables, +// this would be the full argv vector passed to `main`. +func ProcessCommandArgs(val ...string) attribute.KeyValue { + return ProcessCommandArgsKey.StringSlice(val) +} + +// ProcessOwner returns an attribute KeyValue conforming to the +// "process.owner" semantic conventions. It represents the username of the user +// that owns the process. +func ProcessOwner(val string) attribute.KeyValue { + return ProcessOwnerKey.String(val) +} + +// The single (language) runtime instance which is monitored. +const ( + // ProcessRuntimeNameKey is the attribute Key conforming to the + // "process.runtime.name" semantic conventions. It represents the name of + // the runtime of this process. For compiled native binaries, this SHOULD + // be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + + // ProcessRuntimeVersionKey is the attribute Key conforming to the + // "process.runtime.version" semantic conventions. It represents the + // version of the runtime of this process, as returned by the runtime + // without modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + + // ProcessRuntimeDescriptionKey is the attribute Key conforming to the + // "process.runtime.description" semantic conventions. It represents an + // additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// ProcessRuntimeName returns an attribute KeyValue conforming to the +// "process.runtime.name" semantic conventions. It represents the name of the +// runtime of this process. For compiled native binaries, this SHOULD be the +// name of the compiler. +func ProcessRuntimeName(val string) attribute.KeyValue { + return ProcessRuntimeNameKey.String(val) +} + +// ProcessRuntimeVersion returns an attribute KeyValue conforming to the +// "process.runtime.version" semantic conventions. It represents the version of +// the runtime of this process, as returned by the runtime without +// modification. +func ProcessRuntimeVersion(val string) attribute.KeyValue { + return ProcessRuntimeVersionKey.String(val) +} + +// ProcessRuntimeDescription returns an attribute KeyValue conforming to the +// "process.runtime.description" semantic conventions. It represents an +// additional description about the runtime of the process, for example a +// specific vendor customization of the runtime environment. +func ProcessRuntimeDescription(val string) attribute.KeyValue { + return ProcessRuntimeDescriptionKey.String(val) +} + +// A service instance. +const ( + // ServiceNameKey is the attribute Key conforming to the "service.name" + // semantic conventions. It represents the logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled + // services. If the value was not specified, SDKs MUST fallback to + // `unknown_service:` concatenated with + // [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, + // the value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") +) + +// ServiceName returns an attribute KeyValue conforming to the +// "service.name" semantic conventions. It represents the logical name of the +// service. +func ServiceName(val string) attribute.KeyValue { + return ServiceNameKey.String(val) +} + +// A service instance. +const ( + // ServiceNamespaceKey is the attribute Key conforming to the + // "service.namespace" semantic conventions. It represents a namespace for + // `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group + // of services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` + // is expected to be unique for all services that have no explicit + // namespace defined (so the empty/unspecified namespace is simply one more + // valid namespace). Zero-length namespace string is assumed equal to + // unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + + // ServiceInstanceIDKey is the attribute Key conforming to the + // "service.instance.id" semantic conventions. It represents the string ID + // of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-k8s-pod-deployment-1', + // '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be + // globally unique). The ID helps to distinguish instances of the same + // service that exist at the same time (e.g. instances of a horizontally + // scaled service). It is preferable for the ID to be persistent and stay + // the same for the lifetime of the service instance, however it is + // acceptable that the ID is ephemeral and changes during important + // lifetime events for the service (e.g. service restarts). If the service + // has no inherent unique ID that can be used as the value of this + // attribute it is recommended to generate a random Version 1 or Version 4 + // RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + + // ServiceVersionKey is the attribute Key conforming to the + // "service.version" semantic conventions. It represents the version string + // of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// ServiceNamespace returns an attribute KeyValue conforming to the +// "service.namespace" semantic conventions. It represents a namespace for +// `service.name`. +func ServiceNamespace(val string) attribute.KeyValue { + return ServiceNamespaceKey.String(val) +} + +// ServiceInstanceID returns an attribute KeyValue conforming to the +// "service.instance.id" semantic conventions. It represents the string ID of +// the service instance. +func ServiceInstanceID(val string) attribute.KeyValue { + return ServiceInstanceIDKey.String(val) +} + +// ServiceVersion returns an attribute KeyValue conforming to the +// "service.version" semantic conventions. It represents the version string of +// the service API or implementation. +func ServiceVersion(val string) attribute.KeyValue { + return ServiceVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetrySDKNameKey is the attribute Key conforming to the + // "telemetry.sdk.name" semantic conventions. It represents the name of the + // telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + + // TelemetrySDKLanguageKey is the attribute Key conforming to the + // "telemetry.sdk.language" semantic conventions. It represents the + // language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + + // TelemetrySDKVersionKey is the attribute Key conforming to the + // "telemetry.sdk.version" semantic conventions. It represents the version + // string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// TelemetrySDKName returns an attribute KeyValue conforming to the +// "telemetry.sdk.name" semantic conventions. It represents the name of the +// telemetry SDK as defined above. +func TelemetrySDKName(val string) attribute.KeyValue { + return TelemetrySDKNameKey.String(val) +} + +// TelemetrySDKVersion returns an attribute KeyValue conforming to the +// "telemetry.sdk.version" semantic conventions. It represents the version +// string of the telemetry SDK. +func TelemetrySDKVersion(val string) attribute.KeyValue { + return TelemetrySDKVersionKey.String(val) +} + +// The telemetry SDK used to capture data recorded by the instrumentation +// libraries. +const ( + // TelemetryAutoVersionKey is the attribute Key conforming to the + // "telemetry.auto.version" semantic conventions. It represents the version + // string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +// TelemetryAutoVersion returns an attribute KeyValue conforming to the +// "telemetry.auto.version" semantic conventions. It represents the version +// string of the auto instrumentation agent, if used. +func TelemetryAutoVersion(val string) attribute.KeyValue { + return TelemetryAutoVersionKey.String(val) +} + +// Resource describing the packaged software running the application code. Web +// engines are typically executed using process.runtime. +const ( + // WebEngineNameKey is the attribute Key conforming to the "webengine.name" + // semantic conventions. It represents the name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + + // WebEngineVersionKey is the attribute Key conforming to the + // "webengine.version" semantic conventions. It represents the version of + // the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + + // WebEngineDescriptionKey is the attribute Key conforming to the + // "webengine.description" semantic conventions. It represents the + // additional description of the web engine (e.g. detailed version and + // edition information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - + // 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// WebEngineName returns an attribute KeyValue conforming to the +// "webengine.name" semantic conventions. It represents the name of the web +// engine. +func WebEngineName(val string) attribute.KeyValue { + return WebEngineNameKey.String(val) +} + +// WebEngineVersion returns an attribute KeyValue conforming to the +// "webengine.version" semantic conventions. It represents the version of the +// web engine. +func WebEngineVersion(val string) attribute.KeyValue { + return WebEngineVersionKey.String(val) +} + +// WebEngineDescription returns an attribute KeyValue conforming to the +// "webengine.description" semantic conventions. It represents the additional +// description of the web engine (e.g. detailed version and edition +// information). +func WebEngineDescription(val string) attribute.KeyValue { + return WebEngineDescriptionKey.String(val) +} + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's +// concepts. +const ( + // OTelScopeNameKey is the attribute Key conforming to the + // "otel.scope.name" semantic conventions. It represents the name of the + // instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelScopeNameKey = attribute.Key("otel.scope.name") + + // OTelScopeVersionKey is the attribute Key conforming to the + // "otel.scope.version" semantic conventions. It represents the version of + // the instrumentation scope - (`InstrumentationScope.Version` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OTelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// OTelScopeName returns an attribute KeyValue conforming to the +// "otel.scope.name" semantic conventions. It represents the name of the +// instrumentation scope - (`InstrumentationScope.Name` in OTLP). +func OTelScopeName(val string) attribute.KeyValue { + return OTelScopeNameKey.String(val) +} + +// OTelScopeVersion returns an attribute KeyValue conforming to the +// "otel.scope.version" semantic conventions. It represents the version of the +// instrumentation scope - (`InstrumentationScope.Version` in OTLP). +func OTelScopeVersion(val string) attribute.KeyValue { + return OTelScopeVersionKey.String(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry +// Scope's concepts. +const ( + // OTelLibraryNameKey is the attribute Key conforming to the + // "otel.library.name" semantic conventions. It represents the deprecated, + // use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OTelLibraryNameKey = attribute.Key("otel.library.name") + + // OTelLibraryVersionKey is the attribute Key conforming to the + // "otel.library.version" semantic conventions. It represents the + // deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OTelLibraryVersionKey = attribute.Key("otel.library.version") +) + +// OTelLibraryName returns an attribute KeyValue conforming to the +// "otel.library.name" semantic conventions. It represents the deprecated, use +// the `otel.scope.name` attribute. +func OTelLibraryName(val string) attribute.KeyValue { + return OTelLibraryNameKey.String(val) +} + +// OTelLibraryVersion returns an attribute KeyValue conforming to the +// "otel.library.version" semantic conventions. It represents the deprecated, +// use the `otel.scope.version` attribute. +func OTelLibraryVersion(val string) attribute.KeyValue { + return OTelLibraryVersionKey.String(val) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go new file mode 100644 index 000000000..e449e5c3b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.20.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go new file mode 100644 index 000000000..851774148 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.20.0/trace.go @@ -0,0 +1,2610 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.20.0" + +import "go.opentelemetry.io/otel/attribute" + +// The shared attributes used to report a single exception associated with a +// span or log. +const ( + // ExceptionTypeKey is the attribute Key conforming to the "exception.type" + // semantic conventions. It represents the type of the exception (its + // fully-qualified class name, if applicable). The dynamic type of the + // exception should be preferred over the static type in languages that + // support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + + // ExceptionMessageKey is the attribute Key conforming to the + // "exception.message" semantic conventions. It represents the exception + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str + // implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + + // ExceptionStacktraceKey is the attribute Key conforming to the + // "exception.stacktrace" semantic conventions. It represents a stacktrace + // as a string in the natural representation for the language runtime. The + // representation is to be determined and documented by each language SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// ExceptionType returns an attribute KeyValue conforming to the +// "exception.type" semantic conventions. It represents the type of the +// exception (its fully-qualified class name, if applicable). The dynamic type +// of the exception should be preferred over the static type in languages that +// support it. +func ExceptionType(val string) attribute.KeyValue { + return ExceptionTypeKey.String(val) +} + +// ExceptionMessage returns an attribute KeyValue conforming to the +// "exception.message" semantic conventions. It represents the exception +// message. +func ExceptionMessage(val string) attribute.KeyValue { + return ExceptionMessageKey.String(val) +} + +// ExceptionStacktrace returns an attribute KeyValue conforming to the +// "exception.stacktrace" semantic conventions. It represents a stacktrace as a +// string in the natural representation for the language runtime. The +// representation is to be determined and documented by each language SIG. +func ExceptionStacktrace(val string) attribute.KeyValue { + return ExceptionStacktraceKey.String(val) +} + +// The attributes described in this section are rather generic. They may be +// used in any Log Record they apply to. +const ( + // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid" + // semantic conventions. It represents a unique identifier for the Log + // Record. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV' + // Note: If an id is provided, other log records with the same id will be + // considered duplicates and can be removed safely. This means, that two + // distinguishable log records MUST have different values. + // The id MAY be an [Universally Unique Lexicographically Sortable + // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers + // (e.g. UUID) may be used as needed. + LogRecordUIDKey = attribute.Key("log.record.uid") +) + +// LogRecordUID returns an attribute KeyValue conforming to the +// "log.record.uid" semantic conventions. It represents a unique identifier for +// the Log Record. +func LogRecordUID(val string) attribute.KeyValue { + return LogRecordUIDKey.String(val) +} + +// Span attributes used by AWS Lambda (in addition to general `faas` +// attributes). +const ( + // AWSLambdaInvokedARNKey is the attribute Key conforming to the + // "aws.lambda.invoked_arn" semantic conventions. It represents the full + // invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the + // `/runtime/invocation/next` applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `cloud.resource_id` if an alias is + // involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the +// "aws.lambda.invoked_arn" semantic conventions. It represents the full +// invoked ARN as provided on the `Context` passed to the function +// (`Lambda-Runtime-Invoked-Function-ARN` header on the +// `/runtime/invocation/next` applicable). +func AWSLambdaInvokedARN(val string) attribute.KeyValue { + return AWSLambdaInvokedARNKey.String(val) +} + +// Attributes for CloudEvents. CloudEvents is a specification on how to define +// event data in a standard way. These attributes can be attached to spans when +// performing operations with CloudEvents, regardless of the protocol being +// used. +const ( + // CloudeventsEventIDKey is the attribute Key conforming to the + // "cloudevents.event_id" semantic conventions. It represents the + // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) + // uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + + // CloudeventsEventSourceKey is the attribute Key conforming to the + // "cloudevents.event_source" semantic conventions. It represents the + // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) + // identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', + // '/cloudevents/spec/pull/123', 'my-service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + + // CloudeventsEventSpecVersionKey is the attribute Key conforming to the + // "cloudevents.event_spec_version" semantic conventions. It represents the + // [version of the CloudEvents + // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) + // which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + + // CloudeventsEventTypeKey is the attribute Key conforming to the + // "cloudevents.event_type" semantic conventions. It represents the + // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) + // contains a value describing the type of event related to the originating + // occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', + // 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + + // CloudeventsEventSubjectKey is the attribute Key conforming to the + // "cloudevents.event_subject" semantic conventions. It represents the + // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) + // of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// CloudeventsEventID returns an attribute KeyValue conforming to the +// "cloudevents.event_id" semantic conventions. It represents the +// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id) +// uniquely identifies the event. +func CloudeventsEventID(val string) attribute.KeyValue { + return CloudeventsEventIDKey.String(val) +} + +// CloudeventsEventSource returns an attribute KeyValue conforming to the +// "cloudevents.event_source" semantic conventions. It represents the +// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) +// identifies the context in which an event happened. +func CloudeventsEventSource(val string) attribute.KeyValue { + return CloudeventsEventSourceKey.String(val) +} + +// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to +// the "cloudevents.event_spec_version" semantic conventions. It represents the +// [version of the CloudEvents +// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion) +// which the event uses. +func CloudeventsEventSpecVersion(val string) attribute.KeyValue { + return CloudeventsEventSpecVersionKey.String(val) +} + +// CloudeventsEventType returns an attribute KeyValue conforming to the +// "cloudevents.event_type" semantic conventions. It represents the +// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) +// contains a value describing the type of event related to the originating +// occurrence. +func CloudeventsEventType(val string) attribute.KeyValue { + return CloudeventsEventTypeKey.String(val) +} + +// CloudeventsEventSubject returns an attribute KeyValue conforming to the +// "cloudevents.event_subject" semantic conventions. It represents the +// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject) +// of the event in the context of the event producer (identified by source). +func CloudeventsEventSubject(val string) attribute.KeyValue { + return CloudeventsEventSubjectKey.String(val) +} + +// Semantic conventions for the OpenTracing Shim +const ( + // OpentracingRefTypeKey is the attribute Key conforming to the + // "opentracing.ref_type" semantic conventions. It represents the + // parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// The attributes used to perform database client calls. +const ( + // DBSystemKey is the attribute Key conforming to the "db.system" semantic + // conventions. It represents an identifier for the database management + // system (DBMS) product being used. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + + // DBConnectionStringKey is the attribute Key conforming to the + // "db.connection_string" semantic conventions. It represents the + // connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + + // DBUserKey is the attribute Key conforming to the "db.user" semantic + // conventions. It represents the username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + + // DBJDBCDriverClassnameKey is the attribute Key conforming to the + // "db.jdbc.driver_classname" semantic conventions. It represents the + // fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) + // driver used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + + // DBNameKey is the attribute Key conforming to the "db.name" semantic + // conventions. It represents the this attribute is used to report the name + // of the database being accessed. For commands that switch the database, + // this should be set to the target database (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called + // "schema name". In case there are multiple layers that could be + // considered for database name (e.g. Oracle instance name and schema + // name), the database name to be used is the more specific layer (e.g. + // Oracle schema name). + DBNameKey = attribute.Key("db.name") + + // DBStatementKey is the attribute Key conforming to the "db.statement" + // semantic conventions. It represents the database statement being + // executed. + // + // Type: string + // RequirementLevel: Recommended (Should be collected by default only if + // there is sanitization that excludes sensitive information.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + DBStatementKey = attribute.Key("db.statement") + + // DBOperationKey is the attribute Key conforming to the "db.operation" + // semantic conventions. It represents the name of the operation being + // executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not + // applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to + // attempt any client-side parsing of `db.statement` just to get this + // property, but it should be set if the operation name is provided by the + // library being instrumented. If the SQL statement has an ambiguous + // operation, or performs more than one operation, this value may be + // omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // Microsoft SQL Server Compact + DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") + // Cloud Spanner + DBSystemSpanner = DBSystemKey.String("spanner") + // Trino + DBSystemTrino = DBSystemKey.String("trino") +) + +// DBConnectionString returns an attribute KeyValue conforming to the +// "db.connection_string" semantic conventions. It represents the connection +// string used to connect to the database. It is recommended to remove embedded +// credentials. +func DBConnectionString(val string) attribute.KeyValue { + return DBConnectionStringKey.String(val) +} + +// DBUser returns an attribute KeyValue conforming to the "db.user" semantic +// conventions. It represents the username for accessing the database. +func DBUser(val string) attribute.KeyValue { + return DBUserKey.String(val) +} + +// DBJDBCDriverClassname returns an attribute KeyValue conforming to the +// "db.jdbc.driver_classname" semantic conventions. It represents the +// fully-qualified class name of the [Java Database Connectivity +// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver +// used to connect. +func DBJDBCDriverClassname(val string) attribute.KeyValue { + return DBJDBCDriverClassnameKey.String(val) +} + +// DBName returns an attribute KeyValue conforming to the "db.name" semantic +// conventions. It represents the this attribute is used to report the name of +// the database being accessed. For commands that switch the database, this +// should be set to the target database (even if the command fails). +func DBName(val string) attribute.KeyValue { + return DBNameKey.String(val) +} + +// DBStatement returns an attribute KeyValue conforming to the +// "db.statement" semantic conventions. It represents the database statement +// being executed. +func DBStatement(val string) attribute.KeyValue { + return DBStatementKey.String(val) +} + +// DBOperation returns an attribute KeyValue conforming to the +// "db.operation" semantic conventions. It represents the name of the operation +// being executed, e.g. the [MongoDB command +// name](https://docs.mongodb.com/manual/reference/command/#database-operations) +// such as `findAndModify`, or the SQL keyword. +func DBOperation(val string) attribute.KeyValue { + return DBOperationKey.String(val) +} + +// Connection-level attributes for Microsoft SQL Server +const ( + // DBMSSQLInstanceNameKey is the attribute Key conforming to the + // "db.mssql.instance_name" semantic conventions. It represents the + // Microsoft SQL Server [instance + // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named + // instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no + // longer required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// DBMSSQLInstanceName returns an attribute KeyValue conforming to the +// "db.mssql.instance_name" semantic conventions. It represents the Microsoft +// SQL Server [instance +// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) +// connecting to. This name is used to determine the port of a named instance. +func DBMSSQLInstanceName(val string) attribute.KeyValue { + return DBMSSQLInstanceNameKey.String(val) +} + +// Call-level attributes for Cassandra +const ( + // DBCassandraPageSizeKey is the attribute Key conforming to the + // "db.cassandra.page_size" semantic conventions. It represents the fetch + // size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + + // DBCassandraConsistencyLevelKey is the attribute Key conforming to the + // "db.cassandra.consistency_level" semantic conventions. It represents the + // consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + + // DBCassandraTableKey is the attribute Key conforming to the + // "db.cassandra.table" semantic conventions. It represents the name of the + // primary table that the operation is acting upon, including the keyspace + // name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra + // rather than sql. It is not recommended to attempt any client-side + // parsing of `db.statement` just to get this property, but it should be + // set if it is provided by the library being instrumented. If the + // operation is acting upon an anonymous table, or more than one table, + // this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + + // DBCassandraIdempotenceKey is the attribute Key conforming to the + // "db.cassandra.idempotence" semantic conventions. It represents the + // whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + + // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming + // to the "db.cassandra.speculative_execution_count" semantic conventions. + // It represents the number of times a query was speculatively executed. + // Not set or `0` if the query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + + // DBCassandraCoordinatorIDKey is the attribute Key conforming to the + // "db.cassandra.coordinator.id" semantic conventions. It represents the ID + // of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + + // DBCassandraCoordinatorDCKey is the attribute Key conforming to the + // "db.cassandra.coordinator.dc" semantic conventions. It represents the + // data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// DBCassandraPageSize returns an attribute KeyValue conforming to the +// "db.cassandra.page_size" semantic conventions. It represents the fetch size +// used for paging, i.e. how many rows will be returned at once. +func DBCassandraPageSize(val int) attribute.KeyValue { + return DBCassandraPageSizeKey.Int(val) +} + +// DBCassandraTable returns an attribute KeyValue conforming to the +// "db.cassandra.table" semantic conventions. It represents the name of the +// primary table that the operation is acting upon, including the keyspace name +// (if applicable). +func DBCassandraTable(val string) attribute.KeyValue { + return DBCassandraTableKey.String(val) +} + +// DBCassandraIdempotence returns an attribute KeyValue conforming to the +// "db.cassandra.idempotence" semantic conventions. It represents the whether +// or not the query is idempotent. +func DBCassandraIdempotence(val bool) attribute.KeyValue { + return DBCassandraIdempotenceKey.Bool(val) +} + +// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue +// conforming to the "db.cassandra.speculative_execution_count" semantic +// conventions. It represents the number of times a query was speculatively +// executed. Not set or `0` if the query was not executed speculatively. +func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue { + return DBCassandraSpeculativeExecutionCountKey.Int(val) +} + +// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of +// the coordinating node for a query. +func DBCassandraCoordinatorID(val string) attribute.KeyValue { + return DBCassandraCoordinatorIDKey.String(val) +} + +// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the +// "db.cassandra.coordinator.dc" semantic conventions. It represents the data +// center of the coordinating node for a query. +func DBCassandraCoordinatorDC(val string) attribute.KeyValue { + return DBCassandraCoordinatorDCKey.String(val) +} + +// Call-level attributes for Redis +const ( + // DBRedisDBIndexKey is the attribute Key conforming to the + // "db.redis.database_index" semantic conventions. It represents the index + // of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To + // be used instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default + // database (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// DBRedisDBIndex returns an attribute KeyValue conforming to the +// "db.redis.database_index" semantic conventions. It represents the index of +// the database being accessed as used in the [`SELECT` +// command](https://redis.io/commands/select), provided as an integer. To be +// used instead of the generic `db.name` attribute. +func DBRedisDBIndex(val int) attribute.KeyValue { + return DBRedisDBIndexKey.Int(val) +} + +// Call-level attributes for MongoDB +const ( + // DBMongoDBCollectionKey is the attribute Key conforming to the + // "db.mongodb.collection" semantic conventions. It represents the + // collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// DBMongoDBCollection returns an attribute KeyValue conforming to the +// "db.mongodb.collection" semantic conventions. It represents the collection +// being accessed within the database stated in `db.name`. +func DBMongoDBCollection(val string) attribute.KeyValue { + return DBMongoDBCollectionKey.String(val) +} + +// Call-level attributes for SQL databases +const ( + // DBSQLTableKey is the attribute Key conforming to the "db.sql.table" + // semantic conventions. It represents the name of the primary table that + // the operation is acting upon, including the database name (if + // applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting + // upon an anonymous table, or more than one table, this value MUST NOT be + // set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table" +// semantic conventions. It represents the name of the primary table that the +// operation is acting upon, including the database name (if applicable). +func DBSQLTable(val string) attribute.KeyValue { + return DBSQLTableKey.String(val) +} + +// Call-level attributes for Cosmos DB. +const ( + // DBCosmosDBClientIDKey is the attribute Key conforming to the + // "db.cosmosdb.client_id" semantic conventions. It represents the unique + // Cosmos client instance id. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d' + DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id") + + // DBCosmosDBOperationTypeKey is the attribute Key conforming to the + // "db.cosmosdb.operation_type" semantic conventions. It represents the + // cosmosDB Operation Type. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (when performing one of the + // operations in this list) + // Stability: stable + DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type") + + // DBCosmosDBConnectionModeKey is the attribute Key conforming to the + // "db.cosmosdb.connection_mode" semantic conventions. It represents the + // cosmos client connection mode. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (if not `direct` (or pick gw as + // default)) + // Stability: stable + DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode") + + // DBCosmosDBContainerKey is the attribute Key conforming to the + // "db.cosmosdb.container" semantic conventions. It represents the cosmos + // DB container name. + // + // Type: string + // RequirementLevel: ConditionallyRequired (if available) + // Stability: stable + // Examples: 'anystring' + DBCosmosDBContainerKey = attribute.Key("db.cosmosdb.container") + + // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the + // "db.cosmosdb.request_content_length" semantic conventions. It represents + // the request payload size in bytes + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length") + + // DBCosmosDBStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos + // DB status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (if response was received) + // Stability: stable + // Examples: 200, 201 + DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code") + + // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the + // "db.cosmosdb.sub_status_code" semantic conventions. It represents the + // cosmos DB sub status code. + // + // Type: int + // RequirementLevel: ConditionallyRequired (when response was received and + // contained sub-code.) + // Stability: stable + // Examples: 1000, 1002 + DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code") + + // DBCosmosDBRequestChargeKey is the attribute Key conforming to the + // "db.cosmosdb.request_charge" semantic conventions. It represents the rU + // consumed for that operation + // + // Type: double + // RequirementLevel: ConditionallyRequired (when available) + // Stability: stable + // Examples: 46.18, 1.0 + DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge") +) + +var ( + // invalid + DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid") + // create + DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create") + // patch + DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch") + // read + DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read") + // read_feed + DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed") + // delete + DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete") + // replace + DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace") + // execute + DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute") + // query + DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query") + // head + DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head") + // head_feed + DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed") + // upsert + DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert") + // batch + DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch") + // query_plan + DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan") + // execute_javascript + DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript") +) + +var ( + // Gateway (HTTP) connections mode + DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway") + // Direct connection + DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct") +) + +// DBCosmosDBClientID returns an attribute KeyValue conforming to the +// "db.cosmosdb.client_id" semantic conventions. It represents the unique +// Cosmos client instance id. +func DBCosmosDBClientID(val string) attribute.KeyValue { + return DBCosmosDBClientIDKey.String(val) +} + +// DBCosmosDBContainer returns an attribute KeyValue conforming to the +// "db.cosmosdb.container" semantic conventions. It represents the cosmos DB +// container name. +func DBCosmosDBContainer(val string) attribute.KeyValue { + return DBCosmosDBContainerKey.String(val) +} + +// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming +// to the "db.cosmosdb.request_content_length" semantic conventions. It +// represents the request payload size in bytes +func DBCosmosDBRequestContentLength(val int) attribute.KeyValue { + return DBCosmosDBRequestContentLengthKey.Int(val) +} + +// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB +// status code. +func DBCosmosDBStatusCode(val int) attribute.KeyValue { + return DBCosmosDBStatusCodeKey.Int(val) +} + +// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the +// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos +// DB sub status code. +func DBCosmosDBSubStatusCode(val int) attribute.KeyValue { + return DBCosmosDBSubStatusCodeKey.Int(val) +} + +// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the +// "db.cosmosdb.request_charge" semantic conventions. It represents the rU +// consumed for that operation +func DBCosmosDBRequestCharge(val float64) attribute.KeyValue { + return DBCosmosDBRequestChargeKey.Float64(val) +} + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's +// concepts. +const ( + // OTelStatusCodeKey is the attribute Key conforming to the + // "otel.status_code" semantic conventions. It represents the name of the + // code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OTelStatusCodeKey = attribute.Key("otel.status_code") + + // OTelStatusDescriptionKey is the attribute Key conforming to the + // "otel.status_description" semantic conventions. It represents the + // description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OTelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OTelStatusCodeOk = OTelStatusCodeKey.String("OK") + // The operation contains an error + OTelStatusCodeError = OTelStatusCodeKey.String("ERROR") +) + +// OTelStatusDescription returns an attribute KeyValue conforming to the +// "otel.status_description" semantic conventions. It represents the +// description of the Status if it has a value, otherwise not set. +func OTelStatusDescription(val string) attribute.KeyValue { + return OTelStatusDescriptionKey.String(val) +} + +// This semantic convention describes an instance of a function that runs +// without provisioning or managing of servers (also known as serverless +// functions or Function as a Service (FaaS)) with spans. +const ( + // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" + // semantic conventions. It represents the type of the trigger which caused + // this function invocation. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + // + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + + // FaaSInvocationIDKey is the attribute Key conforming to the + // "faas.invocation_id" semantic conventions. It represents the invocation + // ID of the current function invocation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSInvocationIDKey = attribute.Key("faas.invocation_id") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// FaaSInvocationID returns an attribute KeyValue conforming to the +// "faas.invocation_id" semantic conventions. It represents the invocation ID +// of the current function invocation. +func FaaSInvocationID(val string) attribute.KeyValue { + return FaaSInvocationIDKey.String(val) +} + +// Semantic Convention for FaaS triggered as a response to some data source +// operation such as a database or filesystem read/write. +const ( + // FaaSDocumentCollectionKey is the attribute Key conforming to the + // "faas.document.collection" semantic conventions. It represents the name + // of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in + // Cosmos DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + + // FaaSDocumentOperationKey is the attribute Key conforming to the + // "faas.document.operation" semantic conventions. It represents the + // describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + + // FaaSDocumentTimeKey is the attribute Key conforming to the + // "faas.document.time" semantic conventions. It represents a string + // containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + + // FaaSDocumentNameKey is the attribute Key conforming to the + // "faas.document.name" semantic conventions. It represents the document + // name/table subjected to the operation. For example, in Cloud Storage or + // S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// FaaSDocumentCollection returns an attribute KeyValue conforming to the +// "faas.document.collection" semantic conventions. It represents the name of +// the source on which the triggering operation was performed. For example, in +// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the +// database name. +func FaaSDocumentCollection(val string) attribute.KeyValue { + return FaaSDocumentCollectionKey.String(val) +} + +// FaaSDocumentTime returns an attribute KeyValue conforming to the +// "faas.document.time" semantic conventions. It represents a string containing +// the time when the data was accessed in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSDocumentTime(val string) attribute.KeyValue { + return FaaSDocumentTimeKey.String(val) +} + +// FaaSDocumentName returns an attribute KeyValue conforming to the +// "faas.document.name" semantic conventions. It represents the document +// name/table subjected to the operation. For example, in Cloud Storage or S3 +// is the name of the file, and in Cosmos DB the table name. +func FaaSDocumentName(val string) attribute.KeyValue { + return FaaSDocumentNameKey.String(val) +} + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic + // conventions. It represents a string containing the function invocation + // time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format + // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + + // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic + // conventions. It represents a string containing the schedule period as + // [Cron + // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// FaaSTime returns an attribute KeyValue conforming to the "faas.time" +// semantic conventions. It represents a string containing the function +// invocation time in the [ISO +// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format +// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime). +func FaaSTime(val string) attribute.KeyValue { + return FaaSTimeKey.String(val) +} + +// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" +// semantic conventions. It represents a string containing the schedule period +// as [Cron +// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). +func FaaSCron(val string) attribute.KeyValue { + return FaaSCronKey.String(val) +} + +// Contains additional attributes for incoming FaaS spans. +const ( + // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart" + // semantic conventions. It represents a boolean that is true if the + // serverless function is executed for the first time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// FaaSColdstart returns an attribute KeyValue conforming to the +// "faas.coldstart" semantic conventions. It represents a boolean that is true +// if the serverless function is executed for the first time (aka cold-start). +func FaaSColdstart(val bool) attribute.KeyValue { + return FaaSColdstartKey.Bool(val) +} + +// Contains additional attributes for outgoing FaaS spans. +const ( + // FaaSInvokedNameKey is the attribute Key conforming to the + // "faas.invoked_name" semantic conventions. It represents the name of the + // invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the + // invoked function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + + // FaaSInvokedProviderKey is the attribute Key conforming to the + // "faas.invoked_provider" semantic conventions. It represents the cloud + // provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the + // invoked function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + + // FaaSInvokedRegionKey is the attribute Key conforming to the + // "faas.invoked_region" semantic conventions. It represents the cloud + // region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like + // AWS or GCP, the region in which a function is hosted is essential to + // uniquely identify the function and also part of its endpoint. Since it's + // part of the endpoint being called, the region is always known to + // clients. In these cases, `faas.invoked_region` MUST be set accordingly. + // If the region is unknown to the client or not required for identifying + // the invoked function, setting `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the + // invoked function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// FaaSInvokedName returns an attribute KeyValue conforming to the +// "faas.invoked_name" semantic conventions. It represents the name of the +// invoked function. +func FaaSInvokedName(val string) attribute.KeyValue { + return FaaSInvokedNameKey.String(val) +} + +// FaaSInvokedRegion returns an attribute KeyValue conforming to the +// "faas.invoked_region" semantic conventions. It represents the cloud region +// of the invoked function. +func FaaSInvokedRegion(val string) attribute.KeyValue { + return FaaSInvokedRegionKey.String(val) +} + +// Operations that access some remote service. +const ( + // PeerServiceKey is the attribute Key conforming to the "peer.service" + // semantic conventions. It represents the + // [`service.name`](../../resource/semantic_conventions/README.md#service) + // of the remote service. SHOULD be equal to the actual `service.name` + // resource attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// PeerService returns an attribute KeyValue conforming to the +// "peer.service" semantic conventions. It represents the +// [`service.name`](../../resource/semantic_conventions/README.md#service) of +// the remote service. SHOULD be equal to the actual `service.name` resource +// attribute of the remote service if any. +func PeerService(val string) attribute.KeyValue { + return PeerServiceKey.String(val) +} + +// These attributes may be used for any operation with an authenticated and/or +// authorized enduser. +const ( + // EnduserIDKey is the attribute Key conforming to the "enduser.id" + // semantic conventions. It represents the username or client_id extracted + // from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header + // in the inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + + // EnduserRoleKey is the attribute Key conforming to the "enduser.role" + // semantic conventions. It represents the actual/assumed role the client + // is making the request under extracted from token or application security + // context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + + // EnduserScopeKey is the attribute Key conforming to the "enduser.scope" + // semantic conventions. It represents the scopes or granted authorities + // the client currently possesses extracted from token or application + // security context. The value would come from the scope associated with an + // [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute + // value in a [SAML 2.0 + // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// EnduserID returns an attribute KeyValue conforming to the "enduser.id" +// semantic conventions. It represents the username or client_id extracted from +// the access token or +// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in +// the inbound request from outside the system. +func EnduserID(val string) attribute.KeyValue { + return EnduserIDKey.String(val) +} + +// EnduserRole returns an attribute KeyValue conforming to the +// "enduser.role" semantic conventions. It represents the actual/assumed role +// the client is making the request under extracted from token or application +// security context. +func EnduserRole(val string) attribute.KeyValue { + return EnduserRoleKey.String(val) +} + +// EnduserScope returns an attribute KeyValue conforming to the +// "enduser.scope" semantic conventions. It represents the scopes or granted +// authorities the client currently possesses extracted from token or +// application security context. The value would come from the scope associated +// with an [OAuth 2.0 Access +// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute +// value in a [SAML 2.0 +// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). +func EnduserScope(val string) attribute.KeyValue { + return EnduserScopeKey.String(val) +} + +// These attributes may be used for any operation to store information about a +// thread that started a span. +const ( + // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic + // conventions. It represents the current "managed" thread ID (as opposed + // to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + + // ThreadNameKey is the attribute Key conforming to the "thread.name" + // semantic conventions. It represents the current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// ThreadID returns an attribute KeyValue conforming to the "thread.id" +// semantic conventions. It represents the current "managed" thread ID (as +// opposed to OS thread ID). +func ThreadID(val int) attribute.KeyValue { + return ThreadIDKey.Int(val) +} + +// ThreadName returns an attribute KeyValue conforming to the "thread.name" +// semantic conventions. It represents the current thread name. +func ThreadName(val string) attribute.KeyValue { + return ThreadNameKey.String(val) +} + +// These attributes allow to report this unit of code and therefore to provide +// more context about the span. +const ( + // CodeFunctionKey is the attribute Key conforming to the "code.function" + // semantic conventions. It represents the method or function name, or + // equivalent (usually rightmost part of the code unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + + // CodeNamespaceKey is the attribute Key conforming to the "code.namespace" + // semantic conventions. It represents the "namespace" within which + // `code.function` is defined. Usually the qualified class or module name, + // such that `code.namespace` + some separator + `code.function` form a + // unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + + // CodeFilepathKey is the attribute Key conforming to the "code.filepath" + // semantic conventions. It represents the source code file name that + // identifies the code unit as uniquely as possible (preferably an absolute + // file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + + // CodeLineNumberKey is the attribute Key conforming to the "code.lineno" + // semantic conventions. It represents the line number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + + // CodeColumnKey is the attribute Key conforming to the "code.column" + // semantic conventions. It represents the column number in `code.filepath` + // best representing the operation. It SHOULD point within the code unit + // named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// CodeFunction returns an attribute KeyValue conforming to the +// "code.function" semantic conventions. It represents the method or function +// name, or equivalent (usually rightmost part of the code unit's name). +func CodeFunction(val string) attribute.KeyValue { + return CodeFunctionKey.String(val) +} + +// CodeNamespace returns an attribute KeyValue conforming to the +// "code.namespace" semantic conventions. It represents the "namespace" within +// which `code.function` is defined. Usually the qualified class or module +// name, such that `code.namespace` + some separator + `code.function` form a +// unique identifier for the code unit. +func CodeNamespace(val string) attribute.KeyValue { + return CodeNamespaceKey.String(val) +} + +// CodeFilepath returns an attribute KeyValue conforming to the +// "code.filepath" semantic conventions. It represents the source code file +// name that identifies the code unit as uniquely as possible (preferably an +// absolute file path). +func CodeFilepath(val string) attribute.KeyValue { + return CodeFilepathKey.String(val) +} + +// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno" +// semantic conventions. It represents the line number in `code.filepath` best +// representing the operation. It SHOULD point within the code unit named in +// `code.function`. +func CodeLineNumber(val int) attribute.KeyValue { + return CodeLineNumberKey.Int(val) +} + +// CodeColumn returns an attribute KeyValue conforming to the "code.column" +// semantic conventions. It represents the column number in `code.filepath` +// best representing the operation. It SHOULD point within the code unit named +// in `code.function`. +func CodeColumn(val int) attribute.KeyValue { + return CodeColumnKey.Int(val) +} + +// Semantic Convention for HTTP Client +const ( + // HTTPURLKey is the attribute Key conforming to the "http.url" semantic + // conventions. It represents the full HTTP request URL in the form + // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is + // not transmitted over HTTP, but if it is known, it should be included + // nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the + // attribute's value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + + // HTTPResendCountKey is the attribute Key conforming to the + // "http.resend_count" semantic conventions. It represents the ordinal + // number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets + // resent by the client, regardless of what was the cause of the resending + // (e.g. redirection, authorization failure, 503 Server Unavailable, + // network issues, or any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// HTTPURL returns an attribute KeyValue conforming to the "http.url" +// semantic conventions. It represents the full HTTP request URL in the form +// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not +// transmitted over HTTP, but if it is known, it should be included +// nevertheless. +func HTTPURL(val string) attribute.KeyValue { + return HTTPURLKey.String(val) +} + +// HTTPResendCount returns an attribute KeyValue conforming to the +// "http.resend_count" semantic conventions. It represents the ordinal number +// of request resending attempt (for any reason, including redirects). +func HTTPResendCount(val int) attribute.KeyValue { + return HTTPResendCountKey.Int(val) +} + +// Semantic Convention for HTTP Server +const ( + // HTTPTargetKey is the attribute Key conforming to the "http.target" + // semantic conventions. It represents the full request target as passed in + // a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/users/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + + // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip" + // semantic conventions. It represents the IP address of the original + // client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which + // would + // identify the network-level peer, which may be a proxy. + // + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that + // other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// HTTPTarget returns an attribute KeyValue conforming to the "http.target" +// semantic conventions. It represents the full request target as passed in a +// HTTP request line or equivalent. +func HTTPTarget(val string) attribute.KeyValue { + return HTTPTargetKey.String(val) +} + +// HTTPClientIP returns an attribute KeyValue conforming to the +// "http.client_ip" semantic conventions. It represents the IP address of the +// original client behind all proxies, if known (e.g. from +// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)). +func HTTPClientIP(val string) attribute.KeyValue { + return HTTPClientIPKey.String(val) +} + +// The `aws` conventions apply to operations using the AWS SDK. They map +// request or response parameters in AWS SDK API calls to attributes on a Span. +// The conventions have been collected over time based on feedback from AWS +// users of tracing and will continue to evolve as new interesting conventions +// are found. +// Some descriptions are also provided for populating general OpenTelemetry +// semantic conventions based on these APIs. +const ( + // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id" + // semantic conventions. It represents the AWS request ID as returned in + // the response headers `x-amz-request-id` or `x-amz-requestid`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ' + AWSRequestIDKey = attribute.Key("aws.request_id") +) + +// AWSRequestID returns an attribute KeyValue conforming to the +// "aws.request_id" semantic conventions. It represents the AWS request ID as +// returned in the response headers `x-amz-request-id` or `x-amz-requestid`. +func AWSRequestID(val string) attribute.KeyValue { + return AWSRequestIDKey.String(val) +} + +// Attributes that exist for multiple DynamoDB request types. +const ( + // AWSDynamoDBTableNamesKey is the attribute Key conforming to the + // "aws.dynamodb.table_names" semantic conventions. It represents the keys + // in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + + // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the + // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the + // JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { + // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" : + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table": + // { "CapacityUnits": number, "ReadCapacityUnits": number, + // "WriteCapacityUnits": number }, "TableName": "string", + // "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + + // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to + // the "aws.dynamodb.item_collection_metrics" semantic conventions. It + // represents the JSON-serialized value of the `ItemCollectionMetrics` + // response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": + // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { + // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ], + // "NULL": boolean, "S": "string", "SS": [ "string" ] } }, + // "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + + // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to + // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It + // represents the value of the `ProvisionedThroughput.ReadCapacityUnits` + // request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + + // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming + // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. + // It represents the value of the + // `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + + // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the + // "aws.dynamodb.consistent_read" semantic conventions. It represents the + // value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + + // AWSDynamoDBProjectionKey is the attribute Key conforming to the + // "aws.dynamodb.projection" semantic conventions. It represents the value + // of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, + // RelatedItems, ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + + // AWSDynamoDBLimitKey is the attribute Key conforming to the + // "aws.dynamodb.limit" semantic conventions. It represents the value of + // the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + + // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the + // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the + // value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + + // AWSDynamoDBIndexNameKey is the attribute Key conforming to the + // "aws.dynamodb.index_name" semantic conventions. It represents the value + // of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + + // AWSDynamoDBSelectKey is the attribute Key conforming to the + // "aws.dynamodb.select" semantic conventions. It represents the value of + // the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_names" semantic conventions. It represents the keys in +// the `RequestItems` object field. +func AWSDynamoDBTableNames(val ...string) attribute.KeyValue { + return AWSDynamoDBTableNamesKey.StringSlice(val) +} + +// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to +// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the +// JSON-serialized value of each item in the `ConsumedCapacity` response field. +func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue { + return AWSDynamoDBConsumedCapacityKey.StringSlice(val) +} + +// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming +// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It +// represents the JSON-serialized value of the `ItemCollectionMetrics` response +// field. +func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue { + return AWSDynamoDBItemCollectionMetricsKey.String(val) +} + +// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.ReadCapacityUnits` request parameter. +func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedReadCapacityKey.Float64(val) +} + +// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue +// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic +// conventions. It represents the value of the +// `ProvisionedThroughput.WriteCapacityUnits` request parameter. +func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue { + return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val) +} + +// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the +// "aws.dynamodb.consistent_read" semantic conventions. It represents the value +// of the `ConsistentRead` request parameter. +func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue { + return AWSDynamoDBConsistentReadKey.Bool(val) +} + +// AWSDynamoDBProjection returns an attribute KeyValue conforming to the +// "aws.dynamodb.projection" semantic conventions. It represents the value of +// the `ProjectionExpression` request parameter. +func AWSDynamoDBProjection(val string) attribute.KeyValue { + return AWSDynamoDBProjectionKey.String(val) +} + +// AWSDynamoDBLimit returns an attribute KeyValue conforming to the +// "aws.dynamodb.limit" semantic conventions. It represents the value of the +// `Limit` request parameter. +func AWSDynamoDBLimit(val int) attribute.KeyValue { + return AWSDynamoDBLimitKey.Int(val) +} + +// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to +// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the +// value of the `AttributesToGet` request parameter. +func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributesToGetKey.StringSlice(val) +} + +// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the +// "aws.dynamodb.index_name" semantic conventions. It represents the value of +// the `IndexName` request parameter. +func AWSDynamoDBIndexName(val string) attribute.KeyValue { + return AWSDynamoDBIndexNameKey.String(val) +} + +// AWSDynamoDBSelect returns an attribute KeyValue conforming to the +// "aws.dynamodb.select" semantic conventions. It represents the value of the +// `Select` request parameter. +func AWSDynamoDBSelect(val string) attribute.KeyValue { + return AWSDynamoDBSelectKey.String(val) +} + +// DynamoDB.CreateTable +const ( + // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `GlobalSecondaryIndexes` request field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": + // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ + // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": { + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + + // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to + // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It + // represents the JSON-serialized value of each item of the + // `LocalSecondaryIndexes` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", + // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_indexes" semantic +// conventions. It represents the JSON-serialized value of each item of the +// `GlobalSecondaryIndexes` request field +func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val) +} + +// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming +// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It +// represents the JSON-serialized value of each item of the +// `LocalSecondaryIndexes` request field. +func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue { + return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val) +} + +// DynamoDB.ListTables +const ( + // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the + // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents + // the value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + + // AWSDynamoDBTableCountKey is the attribute Key conforming to the + // "aws.dynamodb.table_count" semantic conventions. It represents the the + // number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming +// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It +// represents the value of the `ExclusiveStartTableName` request parameter. +func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue { + return AWSDynamoDBExclusiveStartTableKey.String(val) +} + +// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.table_count" semantic conventions. It represents the the +// number of items in the `TableNames` response parameter. +func AWSDynamoDBTableCount(val int) attribute.KeyValue { + return AWSDynamoDBTableCountKey.Int(val) +} + +// DynamoDB.Query +const ( + // AWSDynamoDBScanForwardKey is the attribute Key conforming to the + // "aws.dynamodb.scan_forward" semantic conventions. It represents the + // value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the +// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of +// the `ScanIndexForward` request parameter. +func AWSDynamoDBScanForward(val bool) attribute.KeyValue { + return AWSDynamoDBScanForwardKey.Bool(val) +} + +// DynamoDB.Scan +const ( + // AWSDynamoDBSegmentKey is the attribute Key conforming to the + // "aws.dynamodb.segment" semantic conventions. It represents the value of + // the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + + // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the + // "aws.dynamodb.total_segments" semantic conventions. It represents the + // value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + + // AWSDynamoDBCountKey is the attribute Key conforming to the + // "aws.dynamodb.count" semantic conventions. It represents the value of + // the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + + // AWSDynamoDBScannedCountKey is the attribute Key conforming to the + // "aws.dynamodb.scanned_count" semantic conventions. It represents the + // value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// AWSDynamoDBSegment returns an attribute KeyValue conforming to the +// "aws.dynamodb.segment" semantic conventions. It represents the value of the +// `Segment` request parameter. +func AWSDynamoDBSegment(val int) attribute.KeyValue { + return AWSDynamoDBSegmentKey.Int(val) +} + +// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the +// "aws.dynamodb.total_segments" semantic conventions. It represents the value +// of the `TotalSegments` request parameter. +func AWSDynamoDBTotalSegments(val int) attribute.KeyValue { + return AWSDynamoDBTotalSegmentsKey.Int(val) +} + +// AWSDynamoDBCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.count" semantic conventions. It represents the value of the +// `Count` response parameter. +func AWSDynamoDBCount(val int) attribute.KeyValue { + return AWSDynamoDBCountKey.Int(val) +} + +// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the +// "aws.dynamodb.scanned_count" semantic conventions. It represents the value +// of the `ScannedCount` response parameter. +func AWSDynamoDBScannedCount(val int) attribute.KeyValue { + return AWSDynamoDBScannedCountKey.Int(val) +} + +// DynamoDB.UpdateTable +const ( + // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to + // the "aws.dynamodb.attribute_definitions" semantic conventions. It + // represents the JSON-serialized value of each item in the + // `AttributeDefinitions` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + + // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key + // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic + // conventions. It represents the JSON-serialized value of each item in the + // the `GlobalSecondaryIndexUpdates` request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, + // "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming +// to the "aws.dynamodb.attribute_definitions" semantic conventions. It +// represents the JSON-serialized value of each item in the +// `AttributeDefinitions` request field. +func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue { + return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val) +} + +// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue +// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic +// conventions. It represents the JSON-serialized value of each item in the the +// `GlobalSecondaryIndexUpdates` request field. +func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue { + return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val) +} + +// Attributes that exist for S3 request types. +const ( + // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket" + // semantic conventions. It represents the S3 bucket name the request + // refers to. Corresponds to the `--bucket` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'some-bucket-name' + // Note: The `bucket` attribute is applicable to all S3 operations that + // reference a bucket, i.e. that require the bucket name as a mandatory + // parameter. + // This applies to almost all S3 operations except `list-buckets`. + AWSS3BucketKey = attribute.Key("aws.s3.bucket") + + // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic + // conventions. It represents the S3 object key the request refers to. + // Corresponds to the `--key` parameter of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // operations. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `key` attribute is applicable to all object-related S3 + // operations, i.e. that require the object key as a mandatory parameter. + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // - + // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html) + // - + // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html) + // - + // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html) + // - + // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html) + // - + // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html) + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3KeyKey = attribute.Key("aws.s3.key") + + // AWSS3CopySourceKey is the attribute Key conforming to the + // "aws.s3.copy_source" semantic conventions. It represents the source + // object (in the form `bucket`/`key`) for the copy operation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'someFile.yml' + // Note: The `copy_source` attribute applies to S3 copy operations and + // corresponds to the `--copy-source` parameter + // of the [copy-object operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html). + // This applies in particular to the following operations: + // + // - + // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source") + + // AWSS3UploadIDKey is the attribute Key conforming to the + // "aws.s3.upload_id" semantic conventions. It represents the upload ID + // that identifies the multipart upload. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ' + // Note: The `upload_id` attribute applies to S3 multipart-upload + // operations and corresponds to the `--upload-id` parameter + // of the [S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) + // multipart operations. + // This applies in particular to the following operations: + // + // - + // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html) + // - + // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html) + // - + // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html) + // - + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // - + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id") + + // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete" + // semantic conventions. It represents the delete request container that + // specifies the objects to be deleted. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: + // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean' + // Note: The `delete` attribute is only applicable to the + // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html) + // operation. + // The `delete` attribute corresponds to the `--delete` parameter of the + // [delete-objects operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html). + AWSS3DeleteKey = attribute.Key("aws.s3.delete") + + // AWSS3PartNumberKey is the attribute Key conforming to the + // "aws.s3.part_number" semantic conventions. It represents the part number + // of the part being uploaded in a multipart-upload operation. This is a + // positive integer between 1 and 10,000. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3456 + // Note: The `part_number` attribute is only applicable to the + // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html) + // and + // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html) + // operations. + // The `part_number` attribute corresponds to the `--part-number` parameter + // of the + // [upload-part operation within the S3 + // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html). + AWSS3PartNumberKey = attribute.Key("aws.s3.part_number") +) + +// AWSS3Bucket returns an attribute KeyValue conforming to the +// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the +// request refers to. Corresponds to the `--bucket` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Bucket(val string) attribute.KeyValue { + return AWSS3BucketKey.String(val) +} + +// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" +// semantic conventions. It represents the S3 object key the request refers to. +// Corresponds to the `--key` parameter of the [S3 +// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html) +// operations. +func AWSS3Key(val string) attribute.KeyValue { + return AWSS3KeyKey.String(val) +} + +// AWSS3CopySource returns an attribute KeyValue conforming to the +// "aws.s3.copy_source" semantic conventions. It represents the source object +// (in the form `bucket`/`key`) for the copy operation. +func AWSS3CopySource(val string) attribute.KeyValue { + return AWSS3CopySourceKey.String(val) +} + +// AWSS3UploadID returns an attribute KeyValue conforming to the +// "aws.s3.upload_id" semantic conventions. It represents the upload ID that +// identifies the multipart upload. +func AWSS3UploadID(val string) attribute.KeyValue { + return AWSS3UploadIDKey.String(val) +} + +// AWSS3Delete returns an attribute KeyValue conforming to the +// "aws.s3.delete" semantic conventions. It represents the delete request +// container that specifies the objects to be deleted. +func AWSS3Delete(val string) attribute.KeyValue { + return AWSS3DeleteKey.String(val) +} + +// AWSS3PartNumber returns an attribute KeyValue conforming to the +// "aws.s3.part_number" semantic conventions. It represents the part number of +// the part being uploaded in a multipart-upload operation. This is a positive +// integer between 1 and 10,000. +func AWSS3PartNumber(val int) attribute.KeyValue { + return AWSS3PartNumberKey.Int(val) +} + +// Semantic conventions to apply when instrumenting the GraphQL implementation. +// They map GraphQL operations to attributes on a Span. +const ( + // GraphqlOperationNameKey is the attribute Key conforming to the + // "graphql.operation.name" semantic conventions. It represents the name of + // the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + + // GraphqlOperationTypeKey is the attribute Key conforming to the + // "graphql.operation.type" semantic conventions. It represents the type of + // the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + + // GraphqlDocumentKey is the attribute Key conforming to the + // "graphql.document" semantic conventions. It represents the GraphQL + // document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// GraphqlOperationName returns an attribute KeyValue conforming to the +// "graphql.operation.name" semantic conventions. It represents the name of the +// operation being executed. +func GraphqlOperationName(val string) attribute.KeyValue { + return GraphqlOperationNameKey.String(val) +} + +// GraphqlDocument returns an attribute KeyValue conforming to the +// "graphql.document" semantic conventions. It represents the GraphQL document +// being executed. +func GraphqlDocument(val string) attribute.KeyValue { + return GraphqlDocumentKey.String(val) +} + +// General attributes used in messaging systems. +const ( + // MessagingSystemKey is the attribute Key conforming to the + // "messaging.system" semantic conventions. It represents a string + // identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + + // MessagingOperationKey is the attribute Key conforming to the + // "messaging.operation" semantic conventions. It represents a string + // identifying the kind of messaging operation as defined in the [Operation + // names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + + // MessagingBatchMessageCountKey is the attribute Key conforming to the + // "messaging.batch.message_count" semantic conventions. It represents the + // number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an + // operation on a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on + // spans that operate with a single message. When a messaging client + // library supports both batch and single-message API for the same + // operation, instrumentations SHOULD use `messaging.batch.message_count` + // for batching APIs and SHOULD NOT use it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// MessagingSystem returns an attribute KeyValue conforming to the +// "messaging.system" semantic conventions. It represents a string identifying +// the messaging system. +func MessagingSystem(val string) attribute.KeyValue { + return MessagingSystemKey.String(val) +} + +// MessagingBatchMessageCount returns an attribute KeyValue conforming to +// the "messaging.batch.message_count" semantic conventions. It represents the +// number of messages sent, received, or processed in the scope of the batching +// operation. +func MessagingBatchMessageCount(val int) attribute.KeyValue { + return MessagingBatchMessageCountKey.Int(val) +} + +// Semantic convention for a consumer of messages received from a messaging +// system +const ( + // MessagingConsumerIDKey is the attribute Key conforming to the + // "messaging.consumer.id" semantic conventions. It represents the + // identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if + // both are present, or only `messaging.kafka.consumer.group`. For brokers, + // such as RabbitMQ and Artemis, set it to the `client_id` of the client + // consuming the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// MessagingConsumerID returns an attribute KeyValue conforming to the +// "messaging.consumer.id" semantic conventions. It represents the identifier +// for the consumer receiving a message. For Kafka, set it to +// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both +// are present, or only `messaging.kafka.consumer.group`. For brokers, such as +// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the +// message. +func MessagingConsumerID(val string) attribute.KeyValue { + return MessagingConsumerIDKey.String(val) +} + +// Semantic conventions for remote procedure calls. +const ( + // RPCSystemKey is the attribute Key conforming to the "rpc.system" + // semantic conventions. It represents a string identifying the remoting + // system. See below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + + // RPCServiceKey is the attribute Key conforming to the "rpc.service" + // semantic conventions. It represents the full (logical) name of the + // service being called, including its package name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing + // class. The `code.namespace` attribute may be used to store the latter + // (despite the attribute name, it may include a class name; e.g., class + // with method actually executing the call on the server side, RPC client + // stub class on the client side). + RPCServiceKey = attribute.Key("rpc.service") + + // RPCMethodKey is the attribute Key conforming to the "rpc.method" + // semantic conventions. It represents the name of the (logical) method + // being called, must be equal to the $method part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the + // latter (e.g., method actually executing the call on the server side, RPC + // client stub method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") + // Connect RPC + RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc") +) + +// RPCService returns an attribute KeyValue conforming to the "rpc.service" +// semantic conventions. It represents the full (logical) name of the service +// being called, including its package name, if applicable. +func RPCService(val string) attribute.KeyValue { + return RPCServiceKey.String(val) +} + +// RPCMethod returns an attribute KeyValue conforming to the "rpc.method" +// semantic conventions. It represents the name of the (logical) method being +// called, must be equal to the $method part in the span name. +func RPCMethod(val string) attribute.KeyValue { + return RPCMethodKey.String(val) +} + +// Tech-specific attributes for gRPC. +const ( + // RPCGRPCStatusCodeKey is the attribute Key conforming to the + // "rpc.grpc.status_code" semantic conventions. It represents the [numeric + // status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of + // the gRPC request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // RPCJsonrpcVersionKey is the attribute Key conforming to the + // "rpc.jsonrpc.version" semantic conventions. It represents the protocol + // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 + // does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default + // version (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + + // RPCJsonrpcRequestIDKey is the attribute Key conforming to the + // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` + // property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be + // cast to string for simplicity. Use empty string in case of `null` value. + // Omit entirely if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + + // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_code" semantic conventions. It represents the + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + + // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the + // "rpc.jsonrpc.error_message" semantic conventions. It represents the + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPCJsonrpcVersion returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.version" semantic conventions. It represents the protocol +// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 +// does not specify this, the value can be omitted. +func RPCJsonrpcVersion(val string) attribute.KeyValue { + return RPCJsonrpcVersionKey.String(val) +} + +// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` +// property of request or response. Since protocol allows id to be int, string, +// `null` or missing (for notifications), value is expected to be cast to +// string for simplicity. Use empty string in case of `null` value. Omit +// entirely if this is a notification. +func RPCJsonrpcRequestID(val string) attribute.KeyValue { + return RPCJsonrpcRequestIDKey.String(val) +} + +// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_code" semantic conventions. It represents the +// `error.code` property of response if it is an error response. +func RPCJsonrpcErrorCode(val int) attribute.KeyValue { + return RPCJsonrpcErrorCodeKey.Int(val) +} + +// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the +// "rpc.jsonrpc.error_message" semantic conventions. It represents the +// `error.message` property of response if it is an error response. +func RPCJsonrpcErrorMessage(val string) attribute.KeyValue { + return RPCJsonrpcErrorMessageKey.String(val) +} + +// Tech-specific attributes for Connect RPC. +const ( + // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the + // "rpc.connect_rpc.error_code" semantic conventions. It represents the + // [error codes](https://connect.build/docs/protocol/#error-codes) of the + // Connect request. Error codes are always string values. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If response is not successful + // and if error code available.) + // Stability: stable + RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code") +) + +var ( + // cancelled + RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled") + // unknown + RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown") + // invalid_argument + RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument") + // deadline_exceeded + RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded") + // not_found + RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found") + // already_exists + RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists") + // permission_denied + RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied") + // resource_exhausted + RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted") + // failed_precondition + RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition") + // aborted + RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted") + // out_of_range + RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range") + // unimplemented + RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented") + // internal + RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal") + // unavailable + RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable") + // data_loss + RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss") + // unauthenticated + RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go index 7cf424855..0318b5ec4 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.21.0/doc.go @@ -15,6 +15,6 @@ // Package semconv implements OpenTelemetry semantic conventions. // // OpenTelemetry semantic conventions are agreed standardized naming -// patterns for OpenTelemetry things. This package represents the conventions -// as of the v1.21.0 version of the OpenTelemetry specification. +// patterns for OpenTelemetry things. This package represents the v1.21.0 +// version of the OpenTelemetry semantic conventions. package semconv // import "go.opentelemetry.io/otel/semconv/v1.21.0" diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index d1e47ca2f..db936ba5b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -17,20 +17,14 @@ package trace // import "go.opentelemetry.io/otel/trace" import ( "encoding/json" "fmt" - "regexp" "strings" ) const ( maxListMembers = 32 - listDelimiter = "," - - // based on the W3C Trace Context specification, see - // https://www.w3.org/TR/trace-context-1/#tracestate-header - noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]*` - withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]*@[a-z][_0-9a-z\-\*\/]*` - valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + listDelimiters = "," + memberDelimiter = "=" errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" @@ -39,43 +33,128 @@ const ( errDuplicate errorConst = "duplicate list-member in tracestate" ) -var ( - noTenantKeyRe = regexp.MustCompile(`^` + noTenantKeyFormat + `$`) - withTenantKeyRe = regexp.MustCompile(`^` + withTenantKeyFormat + `$`) - valueRe = regexp.MustCompile(`^` + valueFormat + `$`) - memberRe = regexp.MustCompile(`^\s*((?:` + noTenantKeyFormat + `)|(?:` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) -) - type member struct { Key string Value string } +// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) ) +// means (chr = %x20-2B / %x2D-3C / %x3E-7E) . +func checkValueChar(v byte) bool { + return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) . +func checkValueLast(v byte) bool { + return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d' +} + +// based on the W3C Trace Context specification +// +// value = (0*255(chr)) nblk-chr +// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E +// chr = %x20 / nblk-chr +// +// see https://www.w3.org/TR/trace-context-1/#value +func checkValue(val string) bool { + n := len(val) + if n == 0 || n > 256 { + return false + } + for i := 0; i < n-1; i++ { + if !checkValueChar(val[i]) { + return false + } + } + return checkValueLast(val[n-1]) +} + +func checkKeyRemain(key string) bool { + // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) + for _, v := range key { + if isAlphaNum(byte(v)) { + continue + } + switch v { + case '_', '-', '*', '/': + continue + } + return false + } + return true +} + +// according to +// +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// +// param n is remain part length, should be 255 in simple-key or 13 in system-id. +func checkKeyPart(key string, n int) bool { + if len(key) == 0 { + return false + } + first := key[0] // key's first char + ret := len(key[1:]) <= n + ret = ret && first >= 'a' && first <= 'z' + return ret && checkKeyRemain(key[1:]) +} + +func isAlphaNum(c byte) bool { + if c >= 'a' && c <= 'z' { + return true + } + return c >= '0' && c <= '9' +} + +// according to +// +// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ) +// +// param n is remain part length, should be 240 exactly. +func checkKeyTenant(key string, n int) bool { + if len(key) == 0 { + return false + } + return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:]) +} + +// based on the W3C Trace Context specification +// +// key = simple-key / multi-tenant-key +// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// multi-tenant-key = tenant-id "@" system-id +// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )) +// lcalpha = %x61-7A ; a-z +// +// see https://www.w3.org/TR/trace-context-1/#tracestate-header. +func checkKey(key string) bool { + tenant, system, ok := strings.Cut(key, "@") + if !ok { + return checkKeyPart(key, 255) + } + return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13) +} + func newMember(key, value string) (member, error) { - if len(key) > 256 { - return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + if !checkKey(key) { + return member{}, errInvalidKey } - if !noTenantKeyRe.MatchString(key) { - if !withTenantKeyRe.MatchString(key) { - return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) - } - atIndex := strings.LastIndex(key, "@") - if atIndex > 241 || len(key)-1-atIndex > 14 { - return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) - } - } - if len(value) > 256 || !valueRe.MatchString(value) { - return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + if !checkValue(value) { + return member{}, errInvalidValue } return member{Key: key, Value: value}, nil } func parseMember(m string) (member, error) { - matches := memberRe.FindStringSubmatch(m) - if len(matches) != 3 { + key, val, ok := strings.Cut(m, memberDelimiter) + if !ok { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } - result, e := newMember(matches[1], matches[2]) + key = strings.TrimLeft(key, " \t") + val = strings.TrimRight(val, " \t") + result, e := newMember(key, val) if e != nil { return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) } @@ -85,7 +164,7 @@ func parseMember(m string) (member, error) { // String encodes member into a string compliant with the W3C Trace Context // specification. func (m member) String() string { - return fmt.Sprintf("%s=%s", m.Key, m.Value) + return m.Key + "=" + m.Value } // TraceState provides additional vendor-specific trace identification @@ -109,8 +188,8 @@ var _ json.Marshaler = TraceState{} // ParseTraceState attempts to decode a TraceState from the passed // string. It returns an error if the input is invalid according to the W3C // Trace Context specification. -func ParseTraceState(tracestate string) (TraceState, error) { - if tracestate == "" { +func ParseTraceState(ts string) (TraceState, error) { + if ts == "" { return TraceState{}, nil } @@ -120,7 +199,9 @@ func ParseTraceState(tracestate string) (TraceState, error) { var members []member found := make(map[string]struct{}) - for _, memberStr := range strings.Split(tracestate, listDelimiter) { + for ts != "" { + var memberStr string + memberStr, ts, _ = strings.Cut(ts, listDelimiters) if len(memberStr) == 0 { continue } @@ -153,11 +234,29 @@ func (ts TraceState) MarshalJSON() ([]byte, error) { // Trace Context specification. The returned string will be invalid if the // TraceState contains any invalid members. func (ts TraceState) String() string { - members := make([]string, len(ts.list)) - for i, m := range ts.list { - members[i] = m.String() + if len(ts.list) == 0 { + return "" } - return strings.Join(members, listDelimiter) + var n int + n += len(ts.list) // member delimiters: '=' + n += len(ts.list) - 1 // list delimiters: ',' + for _, mem := range ts.list { + n += len(mem.Key) + n += len(mem.Value) + } + + var sb strings.Builder + sb.Grow(n) + _, _ = sb.WriteString(ts.list[0].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[0].Value) + for i := 1; i < len(ts.list); i++ { + _ = sb.WriteByte(listDelimiters[0]) + _, _ = sb.WriteString(ts.list[i].Key) + _ = sb.WriteByte('=') + _, _ = sb.WriteString(ts.list[i].Value) + } + return sb.String() } // Get returns the value paired with key from the corresponding TraceState @@ -189,15 +288,25 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) { if err != nil { return ts, err } - - cTS := ts.Delete(key) - if cTS.Len()+1 <= maxListMembers { - cTS.list = append(cTS.list, member{}) + n := len(ts.list) + found := n + for i := range ts.list { + if ts.list[i].Key == key { + found = i + } + } + cTS := TraceState{} + if found == n && n < maxListMembers { + cTS.list = make([]member, n+1) + } else { + cTS.list = make([]member, n) } - // When the number of members exceeds capacity, drop the "right-most". - copy(cTS.list[1:], cTS.list) cTS.list[0] = m - + // When the number of members exceeds capacity, drop the "right-most". + copy(cTS.list[1:], ts.list[0:found]) + if found < n { + copy(cTS.list[1+found:], ts.list[found+1:]) + } return cTS, nil } diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index e2f743585..c7aba1c3f 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.21.0" + return "1.22.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 3c153c9d6..a9cfb80ae 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,7 +14,7 @@ module-sets: stable-v1: - version: v1.21.0 + version: v1.22.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing @@ -34,7 +34,7 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.44.0 + version: v0.45.0 modules: - go.opentelemetry.io/otel/bridge/opencensus - go.opentelemetry.io/otel/bridge/opencensus/test diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 5e8158bba..46ceac343 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -209,25 +209,37 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { return s } -// Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if s[i:j] is not a valid slice of s. -// Delete is O(len(s)-j), so if many items must be deleted, it is better to -// make a single call deleting them all together than to delete one at a time. -// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those -// elements contain pointers you might consider zeroing those elements so that -// objects they reference can be garbage collected. -func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check +// clearSlice sets all elements up to the length of s to the zero value of E. +// We may use the builtin clear func instead, and remove clearSlice, when upgrading +// to Go 1.21+. +func clearSlice[S ~[]E, E any](s S) { + var zero E + for i := range s { + s[i] = zero + } +} - return append(s[:i], s[j:]...) +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. +// Delete is O(len(s)-i), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete zeroes the elements s[len(s)-(j-i):len(s)]. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } + + oldlen := len(s) + s = append(s[:i], s[j:]...) + clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC + return s } // DeleteFunc removes any elements from s for which del returns true, // returning the modified slice. -// When DeleteFunc removes m elements, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage -// collected. +// DeleteFunc zeroes the elements between the new length and the original length. func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i := IndexFunc(s, del) if i == -1 { @@ -240,11 +252,13 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. +// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice @@ -272,6 +286,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { if i+len(v) != j { copy(r[i+len(v):], s[j:]) } + clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC return r } @@ -345,9 +360,7 @@ func Clone[S ~[]E, E any](s S) S { // This is like the uniq command found on Unix. // Compact modifies the contents of the slice s and returns the modified slice, // which may have a smaller length. -// When Compact discards m elements in total, it might not modify the elements -// s[len(s)-m:len(s)]. If those elements contain pointers you might consider -// zeroing those elements so that objects they reference can be garbage collected. +// Compact zeroes the elements between the new length and the original length. func Compact[S ~[]E, E comparable](s S) S { if len(s) < 2 { return s @@ -361,11 +374,13 @@ func Compact[S ~[]E, E comparable](s S) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } // CompactFunc is like [Compact] but uses an equality function to compare elements. // For runs of elements that compare equal, CompactFunc keeps the first one. +// CompactFunc zeroes the elements between the new length and the original length. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s @@ -379,6 +394,7 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { i++ } } + clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC return s[:i] } diff --git a/vendor/google.golang.org/api/internal/cba.go b/vendor/google.golang.org/api/internal/cba.go index 829383f55..fbf4ef1c6 100644 --- a/vendor/google.golang.org/api/internal/cba.go +++ b/vendor/google.golang.org/api/internal/cba.go @@ -35,6 +35,7 @@ package internal import ( "context" "crypto/tls" + "errors" "net" "net/url" "os" @@ -53,6 +54,12 @@ const ( // Experimental: if true, the code will try MTLS with S2A as the default for transport security. Default value is false. googleAPIUseS2AEnv = "EXPERIMENTAL_GOOGLE_API_USE_S2A" + + universeDomainPlaceholder = "UNIVERSE_DOMAIN" +) + +var ( + errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com") ) // getClientCertificateSourceAndEndpoint is a convenience function that invokes @@ -67,6 +74,14 @@ func getClientCertificateSourceAndEndpoint(settings *DialSettings) (cert.Source, if err != nil { return nil, "", err } + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + if settings.Endpoint == "" && !settings.IsUniverseDomainGDU() && settings.DefaultEndpointTemplate != "" { + // TODO(chrisdsmith): https://github.com/googleapis/google-api-go-client/issues/2359 + // if settings.DefaultEndpointTemplate == "" { + // return nil, "", errors.New("internaloption.WithDefaultEndpointTemplate is required if option.WithUniverseDomain is not googleapis.com") + // } + endpoint = resolvedDefaultEndpoint(settings) + } return clientCertSource, endpoint, nil } @@ -80,9 +95,7 @@ type transportConfig struct { func getTransportConfig(settings *DialSettings) (*transportConfig, error) { clientCertSource, endpoint, err := getClientCertificateSourceAndEndpoint(settings) if err != nil { - return &transportConfig{ - clientCertSource: nil, endpoint: "", s2aAddress: "", s2aMTLSEndpoint: "", - }, err + return nil, err } defaultTransportConfig := transportConfig{ clientCertSource: clientCertSource, @@ -94,12 +107,10 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { if !shouldUseS2A(clientCertSource, settings) { return &defaultTransportConfig, nil } - - s2aMTLSEndpoint := settings.DefaultMTLSEndpoint - // If there is endpoint override, honor it. - if settings.Endpoint != "" { - s2aMTLSEndpoint = endpoint + if !settings.IsUniverseDomainGDU() { + return nil, errUniverseNotSupportedMTLS } + s2aAddress := GetS2AAddress() if s2aAddress == "" { return &defaultTransportConfig, nil @@ -108,7 +119,7 @@ func getTransportConfig(settings *DialSettings) (*transportConfig, error) { clientCertSource: clientCertSource, endpoint: endpoint, s2aAddress: s2aAddress, - s2aMTLSEndpoint: s2aMTLSEndpoint, + s2aMTLSEndpoint: settings.DefaultMTLSEndpoint, }, nil } @@ -153,24 +164,41 @@ func isClientCertificateEnabled() bool { // WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz" func getEndpoint(settings *DialSettings, clientCertSource cert.Source) (string, error) { if settings.Endpoint == "" { - mtlsMode := getMTLSMode() - if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) { + if isMTLS(clientCertSource) { + if !settings.IsUniverseDomainGDU() { + return "", errUniverseNotSupportedMTLS + } return settings.DefaultMTLSEndpoint, nil } - return settings.DefaultEndpoint, nil + return resolvedDefaultEndpoint(settings), nil } if strings.Contains(settings.Endpoint, "://") { // User passed in a full URL path, use it verbatim. return settings.Endpoint, nil } - if settings.DefaultEndpoint == "" { + if resolvedDefaultEndpoint(settings) == "" { // If DefaultEndpoint is not configured, use the user provided endpoint verbatim. // This allows a naked "host[:port]" URL to be used with GRPC Direct Path. return settings.Endpoint, nil } // Assume user-provided endpoint is host[:port], merge it with the default endpoint. - return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint) + return mergeEndpoints(resolvedDefaultEndpoint(settings), settings.Endpoint) +} + +func isMTLS(clientCertSource cert.Source) bool { + mtlsMode := getMTLSMode() + return mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) +} + +// resolvedDefaultEndpoint returns the DefaultEndpointTemplate merged with the +// Universe Domain if the DefaultEndpointTemplate is set, otherwise returns the +// deprecated DefaultEndpoint value. +func resolvedDefaultEndpoint(settings *DialSettings) string { + if settings.DefaultEndpointTemplate == "" { + return settings.DefaultEndpoint + } + return strings.Replace(settings.DefaultEndpointTemplate, universeDomainPlaceholder, settings.GetUniverseDomain(), 1) } func getMTLSMode() string { @@ -274,25 +302,15 @@ func shouldUseS2A(clientCertSource cert.Source, settings *DialSettings) bool { if !isGoogleS2AEnabled() { return false } - // If DefaultMTLSEndpoint is not set and no endpoint override, skip S2A. - if settings.DefaultMTLSEndpoint == "" && settings.Endpoint == "" { - return false - } - // If MTLS is not enabled for this endpoint, skip S2A. - if !mtlsEndpointEnabledForS2A() { + // If DefaultMTLSEndpoint is not set or has endpoint override, skip S2A. + if settings.DefaultMTLSEndpoint == "" || settings.Endpoint != "" { return false } // If custom HTTP client is provided, skip S2A. if settings.HTTPClient != nil { return false } - return true -} - -// mtlsEndpointEnabledForS2A checks if the endpoint is indeed MTLS-enabled, so that we can use S2A for MTLS connection. -var mtlsEndpointEnabledForS2A = func() bool { - // TODO(xmenxk): determine this via discovery config. - return true + return !settings.EnableDirectPath && !settings.EnableDirectPathXds } func isGoogleS2AEnabled() bool { diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index 05165f333..b64893098 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -16,6 +16,7 @@ import ( "time" "golang.org/x/oauth2" + "google.golang.org/api/internal/cert" "google.golang.org/api/internal/impersonate" "golang.org/x/oauth2/google" @@ -90,11 +91,11 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g // Determine configurations for the OAuth2 transport, which is separate from the API transport. // The OAuth2 transport and endpoint will be configured for mTLS if applicable. - clientCertSource, oauth2Endpoint, err := getClientCertificateSourceAndEndpoint(oauth2DialSettings(ds)) + clientCertSource, err := getClientCertificateSource(ds) if err != nil { return nil, err } - params.TokenURL = oauth2Endpoint + params.TokenURL = oAuth2Endpoint(clientCertSource) if clientCertSource != nil { tlsConfig := &tls.Config{ GetClientCertificate: clientCertSource, @@ -124,22 +125,37 @@ func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*g return cred, err } +func oAuth2Endpoint(clientCertSource cert.Source) string { + if isMTLS(clientCertSource) { + return google.MTLSTokenURL + } + return google.Endpoint.TokenURL +} + func isSelfSignedJWTFlow(data []byte, ds *DialSettings) (bool, error) { - if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && - ds.ImpersonationConfig == nil { - // Check if JSON is a service account and if so create a self-signed JWT. - var f struct { - Type string `json:"type"` - // The rest JSON fields are omitted because they are not used. - } - if err := json.Unmarshal(data, &f); err != nil { - return false, err - } - return f.Type == serviceAccountKey, nil + // For non-GDU universe domains, token exchange is impossible and services + // must support self-signed JWTs with scopes. + if !ds.IsUniverseDomainGDU() { + return typeServiceAccount(data) + } + if (ds.EnableJwtWithScope || ds.HasCustomAudience()) && ds.ImpersonationConfig == nil { + return typeServiceAccount(data) } return false, nil } +// typeServiceAccount checks if JSON data is for a service account. +func typeServiceAccount(data []byte) (bool, error) { + var f struct { + Type string `json:"type"` + // The remaining JSON fields are omitted because they are not used. + } + if err := json.Unmarshal(data, &f); err != nil { + return false, err + } + return f.Type == serviceAccountKey, nil +} + func selfSignedJWTTokenSource(data []byte, ds *DialSettings) (oauth2.TokenSource, error) { if len(ds.GetScopes()) > 0 && !ds.HasCustomAudience() { // Scopes are preferred in self-signed JWT unless the scope is not available @@ -188,15 +204,6 @@ func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds * }, nil } -// oauth2DialSettings returns the settings to be used by the OAuth2 transport, which is separate from the API transport. -func oauth2DialSettings(ds *DialSettings) *DialSettings { - var ods DialSettings - ods.DefaultEndpoint = google.Endpoint.TokenURL - ods.DefaultMTLSEndpoint = google.MTLSTokenURL - ods.ClientCertSource = ds.ClientCertSource - return &ods -} - // customHTTPClient constructs an HTTPClient using the provided tlsConfig, to support mTLS. func customHTTPClient(tlsConfig *tls.Config) *http.Client { trans := baseTransport() @@ -219,3 +226,14 @@ func baseTransport() *http.Transport { ExpectContinueTimeout: 1 * time.Second, } } + +// ErrUniverseNotMatch composes an error string from the provided universe +// domain sources (DialSettings and Credentials, respectively). +func ErrUniverseNotMatch(settingsUD, credsUD string) error { + return fmt.Errorf( + "the configured universe domain (%q) does not match the universe "+ + "domain found in the credentials (%q). If you haven't configured "+ + "WithUniverseDomain explicitly, \"googleapis.com\" is the default", + settingsUD, + credsUD) +} diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 285e6e04d..99210ebe0 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -19,7 +19,8 @@ import ( ) const ( - newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" + newAuthLibEnVar = "GOOGLE_API_GO_EXPERIMENTAL_USE_NEW_AUTH_LIB" + universeDomainDefault = "googleapis.com" ) // DialSettings holds information needed to establish a connection with a @@ -161,3 +162,28 @@ func (ds *DialSettings) Validate() error { } return nil } + +// GetDefaultUniverseDomain returns the default service domain for a given Cloud +// universe, as configured with internaloption.WithDefaultUniverseDomain. +// The default value is "googleapis.com". +func (ds *DialSettings) GetDefaultUniverseDomain() string { + if ds.DefaultUniverseDomain == "" { + return universeDomainDefault + } + return ds.DefaultUniverseDomain +} + +// GetUniverseDomain returns the default service domain for a given Cloud +// universe, as configured with option.WithUniverseDomain. +// The default value is the value of GetDefaultUniverseDomain, as configured +// with internaloption.WithDefaultUniverseDomain. +func (ds *DialSettings) GetUniverseDomain() string { + if ds.UniverseDomain == "" { + return ds.GetDefaultUniverseDomain() + } + return ds.UniverseDomain +} + +func (ds *DialSettings) IsUniverseDomainGDU() bool { + return ds.GetUniverseDomain() == ds.GetDefaultUniverseDomain() +} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index 8ecad3542..f33c762df 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.156.0" +const Version = "0.161.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index c15be9faa..e6b5c1025 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -38,7 +38,10 @@ func (o defaultEndpointTemplateOption) Apply(settings *internal.DialSettings) { // WithDefaultEndpointTemplate provides a template for creating the endpoint // using a universe domain. See also WithDefaultUniverseDomain and -// option.WithUniverseDomain. +// option.WithUniverseDomain. The placeholder UNIVERSE_DOMAIN should be used +// instead of a concrete universe domain such as "googleapis.com". +// +// Example: WithDefaultEndpointTemplate("https://logging.UNIVERSE_DOMAIN/") // // It should only be used internally by generated clients. func WithDefaultEndpointTemplate(url string) option.ClientOption { @@ -163,6 +166,11 @@ func (w withDefaultUniverseDomain) Apply(o *internal.DialSettings) { // EnableJwtWithScope returns a ClientOption that specifies if scope can be used // with self-signed JWT. +// +// EnableJwtWithScope is ignored when option.WithUniverseDomain is set +// to a value other than the Google Default Universe (GDU) of "googleapis.com". +// For non-GDU domains, token exchange is impossible and services must +// support self-signed JWTs with scopes. func EnableJwtWithScope() option.ClientOption { return enableJwtWithScope(true) } diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index 10830f016..9e8b5a122 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -177,6 +177,17 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } + credsUniverseDomain, err := creds.GetUniverseDomain() + if err != nil { + return nil, err + } + if o.TokenSource == nil { + // We only validate non-tokensource creds, as TokenSource-based credentials + // don't propagate universe. + if o.GetUniverseDomain() != credsUniverseDomain { + return nil, internal.ErrUniverseNotMatch(o.GetUniverseDomain(), credsUniverseDomain) + } + } grpcOpts = append(grpcOpts, grpc.WithPerRPCCredentials(grpcTokenSource{ TokenSource: oauth.TokenSource{TokenSource: creds.TokenSource}, quotaProject: internal.GetQuotaProject(creds, o.QuotaProject), diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go index 7e322a17c..75b266acf 100644 --- a/vendor/google.golang.org/api/transport/http/dial.go +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -88,6 +88,17 @@ func newTransport(ctx context.Context, base http.RoundTripper, settings *interna if err != nil { return nil, err } + credsUniverseDomain, err := creds.GetUniverseDomain() + if err != nil { + return nil, err + } + if settings.TokenSource == nil { + // We only validate non-tokensource creds, as TokenSource-based credentials + // don't propagate universe. + if settings.GetUniverseDomain() != credsUniverseDomain { + return nil, internal.ErrUniverseNotMatch(settings.GetUniverseDomain(), credsUniverseDomain) + } + } paramTransport.quotaProject = internal.GetQuotaProject(creds, settings.QuotaProject) ts := creds.TokenSource if settings.ImpersonationConfig == nil && settings.TokenSource != nil { diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 83774fbcb..d5dccb933 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.12 +// protoc v4.24.4 // source: google/api/client.proto package annotations @@ -1033,6 +1033,18 @@ type MethodSettings struct { // total_poll_timeout: // seconds: 54000 # 90 minutes LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` + // List of top-level fields of the request message, that should be + // automatically populated by the client libraries based on their + // (google.api.field_info).format. Currently supported format: UUID4. + // + // Example of a YAML configuration: + // + // publishing: + // method_settings: + // - selector: google.example.v1.ExampleService.CreateExample + // auto_populated_fields: + // - request_id + AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"` } func (x *MethodSettings) Reset() { @@ -1081,6 +1093,13 @@ func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { return nil } +func (x *MethodSettings) GetAutoPopulatedFields() []string { + if x != nil { + return x.AutoPopulatedFields + } + return nil +} + // Describes settings to use when generating API methods that use the // long-running operation pattern. // All default values below are from those used in the client library @@ -1452,69 +1471,73 @@ var file_google_api_client_proto_rawDesc = []byte{ 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xc2, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a, - 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, - 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, - 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, - 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, - 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, - 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, - 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, - 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, - 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, - 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, - 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54, - 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, - 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10, - 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57, - 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, - 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e, - 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18, - 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, - 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, - 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, - 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, - 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, - 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, - 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, - 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, - 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63, - 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, - 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, + 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, + 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, + 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, + 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, + 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, + 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, + 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, + 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, + 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, + 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, + 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, + 0x49, 0x5a, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, + 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, + 0x54, 0x4f, 0x53, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, + 0x56, 0x49, 0x45, 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, + 0x4e, 0x47, 0x10, 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, + 0x0d, 0x47, 0x45, 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, + 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, + 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, + 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, + 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, + 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, + 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, + 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, + 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, + 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, + 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, + 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, + 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 595480112..e9e97d451 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -430,7 +430,7 @@ type ClientHeader struct { MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"` // A single process may be used to run multiple virtual // servers with different identities. - // The authority is the name of such a server identitiy. + // The authority is the name of such a server identity. // It is typically a portion of the URI in the form of // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index e6f2625b6..f6e815e6b 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1860,27 +1860,15 @@ func (cc *ClientConn) determineAuthority() error { } endpoint := cc.parsedTarget.Endpoint() - target := cc.target - switch { - case authorityFromDialOption != "": + if authorityFromDialOption != "" { cc.authority = authorityFromDialOption - case authorityFromCreds != "": + } else if authorityFromCreds != "" { cc.authority = authorityFromCreds - case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): - // TODO: remove when the unix resolver implements optional interface to - // return channel authority. - cc.authority = "localhost" - case strings.HasPrefix(endpoint, ":"): + } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok { + cc.authority = auth.OverrideAuthority(cc.parsedTarget) + } else if strings.HasPrefix(endpoint, ":") { cc.authority = "localhost" + endpoint - default: - // TODO: Define an optional interface on the resolver builder to return - // the channel authority given the user's dial target. For resolvers - // which don't implement this interface, we will use the endpoint from - // "scheme://authority/endpoint" as the default authority. - // Escape the endpoint to handle use cases where the endpoint - // might not be a valid authority by default. - // For example an endpoint which has multiple paths like - // 'a/b/c', which is not a valid authority by default. + } else { cc.authority = encodeAuthority(endpoint) } channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go index 0854e7af6..6c867dd85 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/handshaker/handshaker.go @@ -25,6 +25,7 @@ import ( "fmt" "io" "net" + "time" "golang.org/x/sync/semaphore" grpc "google.golang.org/grpc" @@ -60,8 +61,6 @@ var ( // control number of concurrent created (but not closed) handshakes. clientHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) serverHandshakes = semaphore.NewWeighted(int64(envconfig.ALTSMaxConcurrentHandshakes)) - // errDropped occurs when maxPendingHandshakes is reached. - errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached") // errOutOfBound occurs when the handshake service returns a consumed // bytes value larger than the buffer that was passed to it originally. errOutOfBound = errors.New("handshaker service consumed bytes value is out-of-bound") @@ -155,8 +154,8 @@ func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, // ClientHandshake starts and completes a client ALTS handshake for GCP. Once // done, ClientHandshake returns a secure connection. func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !clientHandshakes.TryAcquire(1) { - return nil, nil, errDropped + if err := clientHandshakes.Acquire(ctx, 1); err != nil { + return nil, nil, err } defer clientHandshakes.Release(1) @@ -208,8 +207,8 @@ func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credent // ServerHandshake starts and completes a server ALTS handshake for GCP. Once // done, ServerHandshake returns a secure connection. func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) { - if !serverHandshakes.TryAcquire(1) { - return nil, nil, errDropped + if err := serverHandshakes.Acquire(ctx, 1); err != nil { + return nil, nil, err } defer serverHandshakes.Release(1) @@ -308,8 +307,10 @@ func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*al // the results. Handshaker service takes care of frame parsing, so we read // whatever received from the network and send it to the handshaker service. func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) { + var lastWriteTime time.Time for { if len(resp.OutFrames) > 0 { + lastWriteTime = time.Now() if _, err := h.conn.Write(resp.OutFrames); err != nil { return nil, nil, err } @@ -333,11 +334,15 @@ func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []b // Append extra bytes from the previous interaction with the // handshaker service with the current buffer read from conn. p := append(extra, buf[:n]...) + // Compute the time elapsed since the last write to the peer. + timeElapsed := time.Since(lastWriteTime) + timeElapsedMs := uint32(timeElapsed.Milliseconds()) // From here on, p and extra point to the same slice. resp, err = h.accessHandshakerService(&altspb.HandshakerReq{ ReqOneof: &altspb.HandshakerReq_Next{ Next: &altspb.NextHandshakeMessageReq{ - InBytes: p, + InBytes: p, + NetworkLatencyMs: timeElapsedMs, }, }, }) diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 81d0f1140..00407de75 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -613,6 +613,10 @@ type NextHandshakeMessageReq struct { // that the peer's out_frames are split into multiple NextHandshakerMessageReq // messages. InBytes []byte `protobuf:"bytes,1,opt,name=in_bytes,json=inBytes,proto3" json:"in_bytes,omitempty"` + // Number of milliseconds between when the application send the last handshake + // message to the peer and when the application received the current handshake + // message (in the in_bytes field) from the peer. + NetworkLatencyMs uint32 `protobuf:"varint,2,opt,name=network_latency_ms,json=networkLatencyMs,proto3" json:"network_latency_ms,omitempty"` } func (x *NextHandshakeMessageReq) Reset() { @@ -654,6 +658,13 @@ func (x *NextHandshakeMessageReq) GetInBytes() []byte { return nil } +func (x *NextHandshakeMessageReq) GetNetworkLatencyMs() uint32 { + if x != nil { + return x.NetworkLatencyMs + } + return 0 +} + type HandshakerReq struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1116,89 +1127,92 @@ var file_grpc_gcp_handshaker_proto_rawDesc = []byte{ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x62, 0x0a, 0x17, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x6e, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, - 0x0d, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, - 0x0a, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, - 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, - 0x00, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, - 0x0a, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, - 0x00, 0x52, 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, - 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, - 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, - 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, - 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, - 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, - 0x72, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, - 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, - 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x6b, 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, - 0x12, 0x49, 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, - 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, - 0x52, 0x70, 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, - 0x61, 0x78, 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x22, 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, - 0x61, 0x69, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, - 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, - 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, - 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, - 0x79, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, - 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, - 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, - 0x44, 0x53, 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, - 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, - 0x03, 0x54, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, - 0x2a, 0x45, 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x63, 0x6f, 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, - 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, - 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, - 0x0a, 0x03, 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, - 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, - 0x44, 0x6f, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, - 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, - 0x28, 0x01, 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x61, 0x6c, 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, - 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x61, 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, - 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x69, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, + 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x10, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4d, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x0d, 0x48, + 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x71, 0x12, 0x46, 0x0a, 0x0c, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, + 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x04, + 0x6e, 0x65, 0x78, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x4e, 0x65, 0x78, 0x74, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x48, 0x00, 0x52, + 0x04, 0x6e, 0x65, 0x78, 0x74, 0x42, 0x0b, 0x0a, 0x09, 0x72, 0x65, 0x71, 0x5f, 0x6f, 0x6e, 0x65, + 0x6f, 0x66, 0x22, 0x9a, 0x03, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, + 0x72, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x31, 0x0a, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, + 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x63, 0x6f, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6b, 0x65, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x37, + 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x0c, 0x70, 0x65, 0x65, 0x72, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0e, 0x6c, 0x6f, 0x63, 0x61, 0x6c, + 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x52, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, + 0x65, 0x6c, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6b, + 0x65, 0x65, 0x70, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4f, 0x70, 0x65, 0x6e, 0x12, 0x49, + 0x0a, 0x11, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x72, 0x70, 0x63, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x52, 0x70, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x52, 0x70, + 0x63, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, + 0x5f, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x46, 0x72, 0x61, 0x6d, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x22, + 0x40, 0x0a, 0x10, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, + 0x73, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x5f, 0x66, 0x72, 0x61, 0x6d, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x46, 0x72, 0x61, + 0x6d, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6d, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0d, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x32, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, + 0x61, 0x6b, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x2a, 0x4a, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x22, 0x0a, 0x1e, 0x48, 0x41, 0x4e, 0x44, 0x53, + 0x48, 0x41, 0x4b, 0x45, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, + 0x4c, 0x53, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x41, 0x4c, 0x54, 0x53, 0x10, 0x02, 0x2a, 0x45, + 0x0a, 0x0f, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, + 0x6c, 0x12, 0x20, 0x0a, 0x1c, 0x4e, 0x45, 0x54, 0x57, 0x4f, 0x52, 0x4b, 0x5f, 0x50, 0x52, 0x4f, + 0x54, 0x4f, 0x43, 0x4f, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x54, 0x43, 0x50, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, + 0x55, 0x44, 0x50, 0x10, 0x02, 0x32, 0x5b, 0x0a, 0x11, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, + 0x6b, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x0b, 0x44, 0x6f, + 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x12, 0x17, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x1a, 0x18, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x63, 0x70, 0x2e, 0x48, 0x61, + 0x6e, 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x22, 0x00, 0x28, 0x01, + 0x30, 0x01, 0x42, 0x6b, 0x0a, 0x15, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x61, 0x6c, + 0x74, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x42, 0x0f, 0x48, 0x61, 0x6e, + 0x64, 0x73, 0x68, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, + 0x6c, 0x73, 0x2f, 0x61, 0x6c, 0x74, 0x73, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x67, 0x63, 0x70, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 2549fe8e3..6c7ea6a53 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -57,7 +57,7 @@ var ( // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. @@ -68,11 +68,6 @@ var ( // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. CanonicalString any // func (codes.Code) string - // DrainServerTransports initiates a graceful close of existing connections - // on a gRPC server accepted on the provided listener address. An - // xDS-enabled server invokes this method on a grpc.Server when a particular - // listener moves to "not-serving" mode. - DrainServerTransports any // func(*grpc.Server, string) // IsRegisteredMethod returns whether the passed in method is registered as // a method on the server. IsRegisteredMethod any // func(*grpc.Server, string) bool @@ -188,6 +183,19 @@ var ( ExitIdleModeForTesting any // func(*grpc.ClientConn) error ChannelzTurnOffForTesting func() + + // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found + // error for a given resource type and name. This is usually triggered when + // the associated watch timer fires. For testing purposes, having this + // function makes events more predictable than relying on timer events. + TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error + + // TriggerXDSResourceNotFoundClient invokes the testing xDS Client singleton + // to invoke resource not found for a resource type name and resource name. + TriggerXDSResourceNameNotFoundClient any // func(string, string) error + + // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD. + FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool) ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 160911687..27cd81af9 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -61,6 +61,10 @@ func (b *builder) Scheme() string { return b.scheme } +func (b *builder) OverrideAuthority(resolver.Target) string { + return "localhost" +} + type nopResolver struct { } diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go similarity index 96% rename from vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go rename to vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go index aeffd3e1c..4f347edd4 100644 --- a/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go @@ -1,4 +1,4 @@ -//go:build !unix +//go:build !unix && !windows /* * Copyright 2023 gRPC authors. diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go new file mode 100644 index 000000000..fd7d43a89 --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go @@ -0,0 +1,54 @@ +//go:build windows + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/windows" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 59f67655a..c33ac5961 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -59,6 +59,8 @@ import ( // atomically. var clientConnectionCounter uint64 +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. @@ -568,7 +570,7 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) } - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { var k string for k, vv := range md { // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. @@ -1323,10 +1325,8 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { for streamID, stream := range t.activeStreams { if streamID > id && streamID <= upperLimit { // The stream was unprocessed by the server. - if streamID > id && streamID <= upperLimit { - atomic.StoreUint32(&stream.unprocessed, 1) - streamsToClose = append(streamsToClose, stream) - } + atomic.StoreUint32(&stream.unprocessed, 1) + streamsToClose = append(streamsToClose, stream) } } t.mu.Unlock() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 680c9eba0..f6bac0e8a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -960,7 +960,12 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { } } if err := t.writeHeaderLocked(s); err != nil { - return status.Convert(err).Err() + switch e := err.(type) { + case ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + return status.Convert(err).Err() + } } return nil } diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 494468257..1e9485fd6 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -25,8 +25,14 @@ import ( "context" "fmt" "strings" + + "google.golang.org/grpc/internal" ) +func init() { + internal.FromOutgoingContextRaw = fromOutgoingContextRaw +} + // DecodeKeyValue returns k, v, nil. // // Deprecated: use k and v directly instead. @@ -238,16 +244,13 @@ func copyOf(v []string) []string { return vals } -// FromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. +// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD. // // Remember to perform strings.ToLower on the keys, for both the returned MD (MD // is a map, there's no guarantee it's created using our helper functions) and // the extra kv pairs (AppendToOutgoingContext doesn't turn them into // lowercase). -// -// This is intended for gRPC-internal use ONLY. Users should use -// FromOutgoingContext instead. -func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { +func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) if !ok { return nil, nil, false diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index bd1c7d01b..adf89dd9c 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -314,3 +314,13 @@ type Resolver interface { // Close closes the resolver. Close() } + +// AuthorityOverrider is implemented by Builders that wish to override the +// default authority for the ClientConn. +// By default, the authority used is target.Endpoint(). +type AuthorityOverrider interface { + // OverrideAuthority returns the authority to use for a ClientConn with the + // given target. The implementation must generate it without blocking, + // typically in line, and must keep it unchanged. + OverrideAuthority(Target) string +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index b7723aa09..a4b6bc687 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -640,14 +640,18 @@ func encode(c baseCodec, msg any) ([]byte, error) { return b, nil } -// compress returns the input bytes compressed by compressor or cp. If both -// compressors are nil, returns nil. +// compress returns the input bytes compressed by compressor or cp. +// If both compressors are nil, or if the message has zero length, returns nil, +// indicating no compression was done. // // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor. func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) { if compressor == nil && cp == nil { return nil, nil } + if len(in) == 0 { + return nil, nil + } wrapErr := func(err error) error { return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 682fa1831..e89c5ac61 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -74,9 +74,6 @@ func init() { return srv.isRegisteredMethod(method) } internal.ServerFromContext = serverFromContext - internal.DrainServerTransports = func(srv *Server, addr string) { - srv.drainServerTransports(addr) - } internal.AddGlobalServerOptions = func(opt ...ServerOption) { globalServerOptions = append(globalServerOptions, opt...) } @@ -139,7 +136,8 @@ type Server struct { quit *grpcsync.Event done *grpcsync.Event channelzRemoveOnce sync.Once - serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop + handlersWG sync.WaitGroup // counts active method handler goroutines channelzID *channelz.Identifier czData *channelzData @@ -176,6 +174,7 @@ type serverOptions struct { headerTableSize *uint32 numServerWorkers uint32 recvBufferPool SharedBufferPool + waitForHandlers bool } var defaultServerOptions = serverOptions{ @@ -573,6 +572,21 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// WaitForHandlers cause Stop to wait until all outstanding method handlers have +// exited before returning. If false, Stop will return as soon as all +// connections have closed, but method handlers may still be running. By +// default, Stop does not wait for method handlers to return. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WaitForHandlers(w bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.waitForHandlers = w + }) +} + // RecvBufferPool returns a ServerOption that configures the server // to use the provided shared buffer pool for parsing incoming messages. Depending // on the application's workload, this could result in reduced memory allocation. @@ -932,6 +946,12 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } + if cc, ok := rawConn.(interface { + PassServerTransport(transport.ServerTransport) + }); ok { + cc.PassServerTransport(st) + } + if !s.addConn(lisAddr, st) { return } @@ -941,15 +961,6 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { }() } -func (s *Server) drainServerTransports(addr string) { - s.mu.Lock() - conns := s.conns[addr] - for st := range conns { - st.Drain("") - } - s.mu.Unlock() -} - // newHTTP2Transport sets up a http/2 transport (using the // gRPC http2 server transport in transport/http2_server.go). func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { @@ -1010,9 +1021,11 @@ func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) st.HandleStreams(ctx, func(stream *transport.Stream) { + s.handlersWG.Add(1) streamQuota.acquire() f := func() { defer streamQuota.release() + defer s.handlersWG.Done() s.handleStream(st, stream) } @@ -1911,6 +1924,10 @@ func (s *Server) stop(graceful bool) { s.serverWorkerChannelClose() } + if graceful || s.opts.waitForHandlers { + s.handlersWG.Wait() + } + if s.events != nil { s.events.Finish() s.events = nil diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index b14b2fbea..d621f52b1 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -48,6 +48,8 @@ import ( "google.golang.org/grpc/status" ) +var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool)) + // StreamHandler defines the handler called by gRPC server to complete the // execution of a streaming RPC. // @@ -184,7 +186,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth // when the RPC completes. opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) - if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { return nil, status.Error(codes.Internal, err.Error()) diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index dc2cea59c..1ad1ba2ad 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.60.1" +const Version = "1.61.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index 896dc38f5..5da38a409 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -88,7 +88,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' # - Do not use "interface{}"; use "any" instead. -git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc\|grpc_testing_not_regenerate' # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -127,7 +127,7 @@ staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" # Exclude underscore checks for generated code. -grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)' +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)\|\(grpc_testing_not_regenerate\)' # Error for duplicate imports not including grpc protos. grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused @@ -152,6 +152,7 @@ grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused XXXXX Protobuf related deprecation errors: "github.com/golang/protobuf .pb.go: +grpc_testing_not_regenerate : ptypes. proto.RegisterType XXXXX gRPC internal usage deprecation errors: @@ -184,9 +185,6 @@ GetSafeRegexMatch GetSuffixMatch GetTlsCertificateCertificateProviderInstance GetValidationContextCertificateProviderInstance -XXXXX TODO: Remove the below deprecation usages: -CloseNotifier -Roots.Subjects XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/vendor/modules.txt b/vendor/modules.txt index ad2c33db9..31c1497fa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,17 +4,17 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.23.3 +# cloud.google.com/go/compute v1.23.4 ## explicit; go 1.19 cloud.google.com/go/compute/internal # cloud.google.com/go/compute/metadata v0.2.3 ## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/iam v1.1.5 +# cloud.google.com/go/iam v1.1.6 ## explicit; go 1.19 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/storage v1.36.0 +# cloud.google.com/go/storage v1.37.0 ## explicit; go 1.19 cloud.google.com/go/storage cloud.google.com/go/storage/internal @@ -23,6 +23,9 @@ cloud.google.com/go/storage/internal/apiv2/storagepb # github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azcore +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/policy +github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/log @@ -39,9 +42,10 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming github.com/Azure/azure-sdk-for-go/sdk/azcore/to github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing -# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 +# github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/azidentity +github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal # github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 ## explicit; go 1.18 github.com/Azure/azure-sdk-for-go/sdk/internal/diag @@ -110,7 +114,7 @@ github.com/VividCortex/ewma # github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 ## explicit; go 1.15 github.com/alecthomas/units -# github.com/aws/aws-sdk-go v1.49.22 +# github.com/aws/aws-sdk-go v1.50.8 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/auth/bearer @@ -186,10 +190,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.26.4 +# github.com/aws/aws-sdk-go-v2/config v1.26.6 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.16.15 +# github.com/aws/aws-sdk-go-v2/credentials v1.16.16 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -202,7 +206,7 @@ github.com/aws/aws-sdk-go-v2/credentials/stscreds ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.12 +# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.15 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/feature/s3/manager # github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.10 @@ -211,7 +215,7 @@ github.com/aws/aws-sdk-go-v2/internal/configsources # github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.10 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.7.3 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/internal/ini # github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.10 @@ -233,14 +237,14 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url github.com/aws/aws-sdk-go-v2/service/internal/s3shared github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config -# github.com/aws/aws-sdk-go-v2/service/s3 v1.48.0 +# github.com/aws/aws-sdk-go-v2/service/s3 v1.48.1 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/s3 github.com/aws/aws-sdk-go-v2/service/s3/internal/arn github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints github.com/aws/aws-sdk-go-v2/service/s3/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.18.6 +# github.com/aws/aws-sdk-go-v2/service/sso v1.18.7 ## explicit; go 1.19 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints @@ -291,7 +295,7 @@ github.com/bmatcuk/doublestar/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/cheggaaa/pb/v3 v3.1.4 +# github.com/cheggaaa/pb/v3 v3.1.5 ## explicit; go 1.17 github.com/cheggaaa/pb/v3 github.com/cheggaaa/pb/v3/termutil @@ -372,7 +376,7 @@ github.com/google/s2a-go/internal/v2/remotesigner github.com/google/s2a-go/internal/v2/tlsconfigstore github.com/google/s2a-go/retry github.com/google/s2a-go/stream -# github.com/google/uuid v1.5.0 +# github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid # github.com/googleapis/enterprise-certificate-proxy v0.3.2 @@ -390,6 +394,9 @@ github.com/googleapis/gax-go/v2/internal ## explicit; go 1.17 github.com/grafana/regexp github.com/grafana/regexp/syntax +# github.com/hashicorp/go-version v1.6.0 +## explicit +github.com/hashicorp/go-version # github.com/influxdata/influxdb v1.11.4 ## explicit; go 1.20 github.com/influxdata/influxdb/client/v2 @@ -404,7 +411,7 @@ github.com/jpillora/backoff # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.17.4 +# github.com/klauspost/compress v1.17.5 ## explicit; go 1.19 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -415,6 +422,7 @@ github.com/klauspost/compress/gzhttp/writer/gzkp github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/race github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/s2 github.com/klauspost/compress/zlib @@ -480,7 +488,7 @@ github.com/prometheus/common/sigv4 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.48.1 +# github.com/prometheus/prometheus v0.49.1 ## explicit; go 1.20 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -521,7 +529,7 @@ github.com/prometheus/prometheus/util/osutil github.com/prometheus/prometheus/util/pool github.com/prometheus/prometheus/util/testutil github.com/prometheus/prometheus/util/zeropool -# github.com/rivo/uniseg v0.4.4 +# github.com/rivo/uniseg v0.4.6 ## explicit; go 1.18 github.com/rivo/uniseg # github.com/russross/blackfriday/v2 v2.1.0 @@ -578,6 +586,9 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.opentelemetry.io/collector/featuregate v1.0.1 +## explicit; go 1.20 +go.opentelemetry.io/collector/featuregate # go.opentelemetry.io/collector/pdata v1.0.1 ## explicit; go 1.20 go.opentelemetry.io/collector/pdata/internal @@ -595,18 +606,18 @@ go.opentelemetry.io/collector/pdata/internal/otlp go.opentelemetry.io/collector/pdata/pcommon go.opentelemetry.io/collector/pdata/pmetric go.opentelemetry.io/collector/pdata/pmetric/pmetricotlp -# go.opentelemetry.io/collector/semconv v0.92.0 +# go.opentelemetry.io/collector/semconv v0.93.0 ## explicit; go 1.20 go.opentelemetry.io/collector/semconv/v1.6.1 -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 ## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 ## explicit; go 1.20 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.21.0 +# go.opentelemetry.io/otel v1.22.0 ## explicit; go 1.20 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -618,12 +629,13 @@ go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation go.opentelemetry.io/otel/semconv/v1.17.0 +go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 -# go.opentelemetry.io/otel/metric v1.21.0 +# go.opentelemetry.io/otel/metric v1.22.0 ## explicit; go 1.20 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded -# go.opentelemetry.io/otel/trace v1.21.0 +# go.opentelemetry.io/otel/trace v1.22.0 ## explicit; go 1.20 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded @@ -648,7 +660,7 @@ golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 golang.org/x/crypto/pkcs12 golang.org/x/crypto/pkcs12/internal/rc2 -# golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 +# golang.org/x/exp v0.0.0-20240119083558-1b970713d09a ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/slices @@ -693,9 +705,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.5.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 -## explicit; go 1.18 -# google.golang.org/api v0.156.0 +# google.golang.org/api v0.161.0 ## explicit; go 1.19 google.golang.org/api/googleapi google.golang.org/api/googleapi/transport @@ -725,21 +735,21 @@ google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20240108191215-35c7eff3a6b1 +# google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe ## explicit; go 1.19 google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal -# google.golang.org/genproto/googleapis/api v0.0.0-20240108191215-35c7eff3a6b1 +# google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240108191215-35c7eff3a6b1 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240125205218-1f4bbc51befe ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.60.1 +# google.golang.org/grpc v1.61.0 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes