mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
lib/protoparser/opentelemetry: use github.com/VictoriaMetrics/easyproto for protobuf message unmarshaling and marshaling
This reduces VictoriaMetrics binary size by 100KB. Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2570 Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2424
This commit is contained in:
parent
0597718435
commit
dd25049858
19 changed files with 1030 additions and 7588 deletions
1
lib/protoparser/opentelemetry/pb/README.md
Normal file
1
lib/protoparser/opentelemetry/pb/README.md
Normal file
|
@ -0,0 +1 @@
|
||||||
|
The original protobuf definition is located at https://github.com/open-telemetry/opentelemetry-proto/tree/34d29fe5ad4689b5db0259d3750de2bfa195bc85/opentelemetry/proto
|
|
@ -1,120 +0,0 @@
|
||||||
// Copyright 2019, OpenTelemetry Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.28.1
|
|
||||||
// protoc v3.21.12
|
|
||||||
// source: lib/protoparser/opentelemetry/proto/common.proto
|
|
||||||
|
|
||||||
package pb
|
|
||||||
|
|
||||||
// AnyValue is used to represent any type of attribute value. AnyValue may contain a
|
|
||||||
// primitive value such as a string or integer or it may contain an arbitrary nested
|
|
||||||
// object containing arrays, key-value lists and primitives.
|
|
||||||
type AnyValue struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// The value is one of the listed fields. It is valid for all values to be unspecified
|
|
||||||
// in which case this AnyValue is considered to be "empty".
|
|
||||||
//
|
|
||||||
// Types that are assignable to Value:
|
|
||||||
//
|
|
||||||
// *AnyValue_StringValue
|
|
||||||
// *AnyValue_BoolValue
|
|
||||||
// *AnyValue_IntValue
|
|
||||||
// *AnyValue_DoubleValue
|
|
||||||
// *AnyValue_ArrayValue
|
|
||||||
// *AnyValue_KvlistValue
|
|
||||||
// *AnyValue_BytesValue
|
|
||||||
Value isAnyValue_Value `protobuf_oneof:"value"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type isAnyValue_Value interface {
|
|
||||||
isAnyValue_Value()
|
|
||||||
}
|
|
||||||
|
|
||||||
type AnyValue_StringValue struct {
|
|
||||||
StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AnyValue_BoolValue struct {
|
|
||||||
BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AnyValue_IntValue struct {
|
|
||||||
IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AnyValue_DoubleValue struct {
|
|
||||||
DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AnyValue_ArrayValue struct {
|
|
||||||
ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AnyValue_KvlistValue struct {
|
|
||||||
KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type AnyValue_BytesValue struct {
|
|
||||||
BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*AnyValue_StringValue) isAnyValue_Value() {}
|
|
||||||
|
|
||||||
func (*AnyValue_BoolValue) isAnyValue_Value() {}
|
|
||||||
|
|
||||||
func (*AnyValue_IntValue) isAnyValue_Value() {}
|
|
||||||
|
|
||||||
func (*AnyValue_DoubleValue) isAnyValue_Value() {}
|
|
||||||
|
|
||||||
func (*AnyValue_ArrayValue) isAnyValue_Value() {}
|
|
||||||
|
|
||||||
func (*AnyValue_KvlistValue) isAnyValue_Value() {}
|
|
||||||
|
|
||||||
func (*AnyValue_BytesValue) isAnyValue_Value() {}
|
|
||||||
|
|
||||||
// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
|
|
||||||
// since oneof in AnyValue does not allow repeated fields.
|
|
||||||
type ArrayValue struct {
|
|
||||||
unknownFields []byte
|
|
||||||
// Array of values. The array may be empty (contain 0 elements).
|
|
||||||
Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
|
|
||||||
// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
|
|
||||||
// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
|
|
||||||
// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
|
|
||||||
// are semantically equivalent.
|
|
||||||
type KeyValueList struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// A collection of key/value pairs of key-value pairs. The list may be empty (may
|
|
||||||
// contain 0 elements).
|
|
||||||
// The keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// value with the same key).
|
|
||||||
Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyValue is a key-value pair that is used to store Span attributes, Link
|
|
||||||
// attributes, etc.
|
|
||||||
type KeyValue struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
|
||||||
Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
|
@ -8,32 +8,25 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FormatString formats strings
|
// FormatString returns string reperesentation for av.
|
||||||
func (x *AnyValue) FormatString() string {
|
func (av *AnyValue) FormatString() string {
|
||||||
switch v := x.Value.(type) {
|
switch {
|
||||||
case *AnyValue_StringValue:
|
case av.StringValue != nil:
|
||||||
return v.StringValue
|
return *av.StringValue
|
||||||
|
case av.BoolValue != nil:
|
||||||
case *AnyValue_BoolValue:
|
return strconv.FormatBool(*av.BoolValue)
|
||||||
return strconv.FormatBool(v.BoolValue)
|
case av.IntValue != nil:
|
||||||
|
return strconv.FormatInt(*av.IntValue, 10)
|
||||||
case *AnyValue_DoubleValue:
|
case av.DoubleValue != nil:
|
||||||
return float64AsString(v.DoubleValue)
|
return float64AsString(*av.DoubleValue)
|
||||||
|
case av.ArrayValue != nil:
|
||||||
case *AnyValue_IntValue:
|
jsonStr, _ := json.Marshal(av.ArrayValue.Values)
|
||||||
return strconv.FormatInt(v.IntValue, 10)
|
|
||||||
|
|
||||||
case *AnyValue_KvlistValue:
|
|
||||||
jsonStr, _ := json.Marshal(v.KvlistValue.Values)
|
|
||||||
return string(jsonStr)
|
return string(jsonStr)
|
||||||
|
case av.KeyValueList != nil:
|
||||||
case *AnyValue_BytesValue:
|
jsonStr, _ := json.Marshal(av.KeyValueList.Values)
|
||||||
return base64.StdEncoding.EncodeToString(v.BytesValue)
|
|
||||||
|
|
||||||
case *AnyValue_ArrayValue:
|
|
||||||
jsonStr, _ := json.Marshal(v.ArrayValue.Values)
|
|
||||||
return string(jsonStr)
|
return string(jsonStr)
|
||||||
|
case av.BytesValue != nil:
|
||||||
|
return base64.StdEncoding.EncodeToString(*av.BytesValue)
|
||||||
default:
|
default:
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,736 +0,0 @@
|
||||||
// Copyright 2019, OpenTelemetry Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.28.1
|
|
||||||
// protoc v3.21.12
|
|
||||||
// source: lib/protoparser/opentelemetry/proto/metrics.proto
|
|
||||||
|
|
||||||
package pb
|
|
||||||
|
|
||||||
// AggregationTemporality defines how a metric aggregator reports aggregated
|
|
||||||
// values. It describes how those values relate to the time interval over
|
|
||||||
// which they are aggregated.
|
|
||||||
type AggregationTemporality int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
|
|
||||||
AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
|
|
||||||
// DELTA is an AggregationTemporality for a metric aggregator which reports
|
|
||||||
// changes since last report time. Successive metrics contain aggregation of
|
|
||||||
// values from continuous and non-overlapping intervals.
|
|
||||||
//
|
|
||||||
// The values for a DELTA metric are based only on the time interval
|
|
||||||
// associated with one measurement cycle. There is no dependency on
|
|
||||||
// previous measurements like is the case for CUMULATIVE metrics.
|
|
||||||
//
|
|
||||||
// For example, consider a system measuring the number of requests that
|
|
||||||
// it receives and reports the sum of these requests every second as a
|
|
||||||
// DELTA metric:
|
|
||||||
//
|
|
||||||
// 1. The system starts receiving at time=t_0.
|
|
||||||
// 2. A request is received, the system measures 1 request.
|
|
||||||
// 3. A request is received, the system measures 1 request.
|
|
||||||
// 4. A request is received, the system measures 1 request.
|
|
||||||
// 5. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_0 to
|
|
||||||
// t_0+1 with a value of 3.
|
|
||||||
// 6. A request is received, the system measures 1 request.
|
|
||||||
// 7. A request is received, the system measures 1 request.
|
|
||||||
// 8. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_0+1 to
|
|
||||||
// t_0+2 with a value of 2.
|
|
||||||
AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
|
|
||||||
// CUMULATIVE is an AggregationTemporality for a metric aggregator which
|
|
||||||
// reports changes since a fixed start time. This means that current values
|
|
||||||
// of a CUMULATIVE metric depend on all previous measurements since the
|
|
||||||
// start time. Because of this, the sender is required to retain this state
|
|
||||||
// in some form. If this state is lost or invalidated, the CUMULATIVE metric
|
|
||||||
// values MUST be reset and a new fixed start time following the last
|
|
||||||
// reported measurement time sent MUST be used.
|
|
||||||
//
|
|
||||||
// For example, consider a system measuring the number of requests that
|
|
||||||
// it receives and reports the sum of these requests every second as a
|
|
||||||
// CUMULATIVE metric:
|
|
||||||
//
|
|
||||||
// 1. The system starts receiving at time=t_0.
|
|
||||||
// 2. A request is received, the system measures 1 request.
|
|
||||||
// 3. A request is received, the system measures 1 request.
|
|
||||||
// 4. A request is received, the system measures 1 request.
|
|
||||||
// 5. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_0 to
|
|
||||||
// t_0+1 with a value of 3.
|
|
||||||
// 6. A request is received, the system measures 1 request.
|
|
||||||
// 7. A request is received, the system measures 1 request.
|
|
||||||
// 8. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_0 to
|
|
||||||
// t_0+2 with a value of 5.
|
|
||||||
// 9. The system experiences a fault and loses state.
|
|
||||||
// 10. The system recovers and resumes receiving at time=t_1.
|
|
||||||
// 11. A request is received, the system measures 1 request.
|
|
||||||
// 12. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_1 to
|
|
||||||
// t_0+1 with a value of 1.
|
|
||||||
//
|
|
||||||
// Note: Even though, when reporting changes since last report time, using
|
|
||||||
// CUMULATIVE is valid, it is not recommended. This may cause problems for
|
|
||||||
// systems that do not use start_time to determine when the aggregation
|
|
||||||
// value was reset (e.g. Prometheus).
|
|
||||||
AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
|
|
||||||
)
|
|
||||||
|
|
||||||
// Enum value maps for AggregationTemporality.
|
|
||||||
var (
|
|
||||||
AggregationTemporality_name = map[int32]string{
|
|
||||||
0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
|
|
||||||
1: "AGGREGATION_TEMPORALITY_DELTA",
|
|
||||||
2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
|
|
||||||
}
|
|
||||||
AggregationTemporality_value = map[string]int32{
|
|
||||||
"AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
|
|
||||||
"AGGREGATION_TEMPORALITY_DELTA": 1,
|
|
||||||
"AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (x AggregationTemporality) Enum() *AggregationTemporality {
|
|
||||||
p := new(AggregationTemporality)
|
|
||||||
*p = x
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
|
|
||||||
// bit-field representing 32 distinct boolean flags. Each flag defined in this
|
|
||||||
// enum is a bit-mask. To test the presence of a single flag in the flags of
|
|
||||||
// a data point, for example, use an expression like:
|
|
||||||
//
|
|
||||||
// (point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE
|
|
||||||
type DataPointFlags int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
DataPointFlags_FLAG_NONE DataPointFlags = 0
|
|
||||||
// This DataPoint is valid but has no recorded value. This value
|
|
||||||
// SHOULD be used to reflect explicitly missing data in a series, as
|
|
||||||
// for an equivalent to the Prometheus "staleness marker".
|
|
||||||
DataPointFlags_FLAG_NO_RECORDED_VALUE DataPointFlags = 1
|
|
||||||
)
|
|
||||||
|
|
||||||
// Enum value maps for DataPointFlags.
|
|
||||||
var (
|
|
||||||
DataPointFlags_name = map[int32]string{
|
|
||||||
0: "FLAG_NONE",
|
|
||||||
1: "FLAG_NO_RECORDED_VALUE",
|
|
||||||
}
|
|
||||||
DataPointFlags_value = map[string]int32{
|
|
||||||
"FLAG_NONE": 0,
|
|
||||||
"FLAG_NO_RECORDED_VALUE": 1,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (x DataPointFlags) Enum() *DataPointFlags {
|
|
||||||
p := new(DataPointFlags)
|
|
||||||
*p = x
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// MetricsData represents the metrics data that can be stored in a persistent
|
|
||||||
// storage, OR can be embedded by other protocols that transfer OTLP metrics
|
|
||||||
// data but do not implement the OTLP protocol.
|
|
||||||
//
|
|
||||||
// The main difference between this message and collector protocol is that
|
|
||||||
// in this message there will not be any "control" or "metadata" specific to
|
|
||||||
// OTLP protocol.
|
|
||||||
//
|
|
||||||
// When new fields are added into this message, the OTLP request MUST be updated
|
|
||||||
// as well.
|
|
||||||
type MetricsData struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// An array of ResourceMetrics.
|
|
||||||
// For data coming from a single resource this array will typically contain
|
|
||||||
// one element. Intermediary nodes that receive data from multiple origins
|
|
||||||
// typically batch the data before forwarding further and in that case this
|
|
||||||
// array will contain multiple elements.
|
|
||||||
ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// A collection of ScopeMetrics from a Resource.
|
|
||||||
type ResourceMetrics struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// The resource for the metrics in this message.
|
|
||||||
// If this field is not set then no resource info is known.
|
|
||||||
Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
|
|
||||||
// A list of metrics that originate from a resource.
|
|
||||||
ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
|
|
||||||
// This schema_url applies to the data in the "resource" field. It does not apply
|
|
||||||
// to the data in the "scope_metrics" field which have their own schema_url field.
|
|
||||||
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// A collection of Metrics produced by an Scope.
|
|
||||||
type ScopeMetrics struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// A list of metrics that originate from an instrumentation library.
|
|
||||||
Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
|
|
||||||
// This schema_url applies to all metrics in the "metrics" field.
|
|
||||||
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Defines a Metric which has one or more timeseries. The following is a
|
|
||||||
// brief summary of the Metric data model. For more details, see:
|
|
||||||
//
|
|
||||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
|
|
||||||
//
|
|
||||||
// The data model and relation between entities is shown in the
|
|
||||||
// diagram below. Here, "DataPoint" is the term used to refer to any
|
|
||||||
// one of the specific data point value types, and "points" is the term used
|
|
||||||
// to refer to any one of the lists of points contained in the Metric.
|
|
||||||
//
|
|
||||||
// - Metric is composed of a metadata and data.
|
|
||||||
//
|
|
||||||
// - Metadata part contains a name, description, unit.
|
|
||||||
//
|
|
||||||
// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
|
|
||||||
//
|
|
||||||
// - DataPoint contains timestamps, attributes, and one of the possible value type
|
|
||||||
// fields.
|
|
||||||
//
|
|
||||||
// Metric
|
|
||||||
// +------------+
|
|
||||||
// |name |
|
|
||||||
// |description |
|
|
||||||
// |unit | +------------------------------------+
|
|
||||||
// |data |---> |Gauge, Sum, Histogram, Summary, ... |
|
|
||||||
// +------------+ +------------------------------------+
|
|
||||||
//
|
|
||||||
// Data [One of Gauge, Sum, Histogram, Summary, ...]
|
|
||||||
// +-----------+
|
|
||||||
// |... | // Metadata about the Data.
|
|
||||||
// |points |--+
|
|
||||||
// +-----------+ |
|
|
||||||
// | +---------------------------+
|
|
||||||
// | |DataPoint 1 |
|
|
||||||
// v |+------+------+ +------+ |
|
|
||||||
// +-----+ ||label |label |...|label | |
|
|
||||||
// | 1 |-->||value1|value2|...|valueN| |
|
|
||||||
// +-----+ |+------+------+ +------+ |
|
|
||||||
// | . | |+-----+ |
|
|
||||||
// | . | ||value| |
|
|
||||||
// | . | |+-----+ |
|
|
||||||
// | . | +---------------------------+
|
|
||||||
// | . | .
|
|
||||||
// | . | .
|
|
||||||
// | . | .
|
|
||||||
// | . | +---------------------------+
|
|
||||||
// | . | |DataPoint M |
|
|
||||||
// +-----+ |+------+------+ +------+ |
|
|
||||||
// | M |-->||label |label |...|label | |
|
|
||||||
// +-----+ ||value1|value2|...|valueN| |
|
|
||||||
// |+------+------+ +------+ |
|
|
||||||
// |+-----+ |
|
|
||||||
// ||value| |
|
|
||||||
// |+-----+ |
|
|
||||||
// +---------------------------+
|
|
||||||
//
|
|
||||||
// Each distinct type of DataPoint represents the output of a specific
|
|
||||||
// aggregation function, the result of applying the DataPoint's
|
|
||||||
// associated function of to one or more measurements.
|
|
||||||
//
|
|
||||||
// All DataPoint types have three common fields:
|
|
||||||
// - Attributes includes key-value pairs associated with the data point
|
|
||||||
// - TimeUnixNano is required, set to the end time of the aggregation
|
|
||||||
// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
|
|
||||||
// having an AggregationTemporality field, as discussed below.
|
|
||||||
//
|
|
||||||
// Both TimeUnixNano and StartTimeUnixNano values are expressed as
|
|
||||||
// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
|
|
||||||
//
|
|
||||||
// # TimeUnixNano
|
|
||||||
//
|
|
||||||
// This field is required, having consistent interpretation across
|
|
||||||
// DataPoint types. TimeUnixNano is the moment corresponding to when
|
|
||||||
// the data point's aggregate value was captured.
|
|
||||||
//
|
|
||||||
// Data points with the 0 value for TimeUnixNano SHOULD be rejected
|
|
||||||
// by consumers.
|
|
||||||
//
|
|
||||||
// # StartTimeUnixNano
|
|
||||||
//
|
|
||||||
// StartTimeUnixNano in general allows detecting when a sequence of
|
|
||||||
// observations is unbroken. This field indicates to consumers the
|
|
||||||
// start time for points with cumulative and delta
|
|
||||||
// AggregationTemporality, and it should be included whenever possible
|
|
||||||
// to support correct rate calculation. Although it may be omitted
|
|
||||||
// when the start time is truly unknown, setting StartTimeUnixNano is
|
|
||||||
// strongly encouraged.
|
|
||||||
type Metric struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// name of the metric, including its DNS name prefix. It must be unique.
|
|
||||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
|
||||||
// description of the metric, which can be used in documentation.
|
|
||||||
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
|
|
||||||
// unit in which the metric value is reported. Follows the format
|
|
||||||
// described by http://unitsofmeasure.org/ucum.html.
|
|
||||||
Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
|
|
||||||
// Data determines the aggregation type (if any) of the metric, what is the
|
|
||||||
// reported value type for the data points, as well as the relatationship to
|
|
||||||
// the time interval over which they are reported.
|
|
||||||
//
|
|
||||||
// Types that are assignable to Data:
|
|
||||||
//
|
|
||||||
// *Metric_Gauge
|
|
||||||
// *Metric_Sum
|
|
||||||
// *Metric_Histogram
|
|
||||||
// *Metric_ExponentialHistogram
|
|
||||||
// *Metric_Summary
|
|
||||||
Data isMetric_Data `protobuf_oneof:"data"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type isMetric_Data interface {
|
|
||||||
isMetric_Data()
|
|
||||||
}
|
|
||||||
|
|
||||||
type Metric_Gauge struct {
|
|
||||||
Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Metric_Sum struct {
|
|
||||||
Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Metric_Histogram struct {
|
|
||||||
Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Metric_ExponentialHistogram struct {
|
|
||||||
ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Metric_Summary struct {
|
|
||||||
Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Metric_Gauge) isMetric_Data() {}
|
|
||||||
|
|
||||||
func (*Metric_Sum) isMetric_Data() {}
|
|
||||||
|
|
||||||
func (*Metric_Histogram) isMetric_Data() {}
|
|
||||||
|
|
||||||
func (*Metric_ExponentialHistogram) isMetric_Data() {}
|
|
||||||
|
|
||||||
func (*Metric_Summary) isMetric_Data() {}
|
|
||||||
|
|
||||||
// Gauge represents the type of a scalar metric that always exports the
|
|
||||||
// "current value" for every data point. It should be used for an "unknown"
|
|
||||||
// aggregation.
|
|
||||||
//
|
|
||||||
// A Gauge does not support different aggregation temporalities. Given the
|
|
||||||
// aggregation is unknown, points cannot be combined using the same
|
|
||||||
// aggregation, regardless of aggregation temporalities. Therefore,
|
|
||||||
// AggregationTemporality is not included. Consequently, this also means
|
|
||||||
// "StartTimeUnixNano" is ignored for all data points.
|
|
||||||
type Gauge struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum represents the type of a scalar metric that is calculated as a sum of all
|
|
||||||
// reported measurements over a time interval.
|
|
||||||
type Sum struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
|
||||||
// aggregation_temporality describes if the aggregator reports delta changes
|
|
||||||
// since last report time, or cumulative changes since a fixed start time.
|
|
||||||
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
|
|
||||||
// If "true" means that the sum is monotonic.
|
|
||||||
IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Histogram represents the type of a metric that is calculated by aggregating
|
|
||||||
// as a Histogram of all reported measurements over a time interval.
|
|
||||||
type Histogram struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
|
||||||
// aggregation_temporality describes if the aggregator reports delta changes
|
|
||||||
// since last report time, or cumulative changes since a fixed start time.
|
|
||||||
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
|
|
||||||
// as a ExponentialHistogram of all reported double measurements over a time interval.
|
|
||||||
type ExponentialHistogram struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
|
||||||
// aggregation_temporality describes if the aggregator reports delta changes
|
|
||||||
// since last report time, or cumulative changes since a fixed start time.
|
|
||||||
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Summary metric data are used to convey quantile summaries,
|
|
||||||
// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
|
|
||||||
// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
|
|
||||||
// data type. These data points cannot always be merged in a meaningful way.
|
|
||||||
// While they can be useful in some applications, histogram data points are
|
|
||||||
// recommended for new applications.
|
|
||||||
type Summary struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberDataPoint is a single data point in a timeseries that describes the
|
|
||||||
// time-varying scalar value of a metric.
|
|
||||||
type NumberDataPoint struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// The set of key/value pairs that uniquely identify the timeseries from
|
|
||||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
|
||||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
|
||||||
// the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
|
|
||||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
|
||||||
// The value itself. A point is considered invalid when one of the recognized
|
|
||||||
// value fields is not present inside this oneof.
|
|
||||||
//
|
|
||||||
// Types that are assignable to Value:
|
|
||||||
//
|
|
||||||
// *NumberDataPoint_AsDouble
|
|
||||||
// *NumberDataPoint_AsInt
|
|
||||||
Value isNumberDataPoint_Value `protobuf_oneof:"value"`
|
|
||||||
// (Optional) List of exemplars collected from
|
|
||||||
// measurements that were used to form the data point
|
|
||||||
Exemplars []*Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
|
|
||||||
// Flags that apply to this specific data point. See DataPointFlags
|
|
||||||
// for the available flags and their meaning.
|
|
||||||
Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type isNumberDataPoint_Value interface {
|
|
||||||
isNumberDataPoint_Value()
|
|
||||||
}
|
|
||||||
|
|
||||||
type NumberDataPoint_AsDouble struct {
|
|
||||||
AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type NumberDataPoint_AsInt struct {
|
|
||||||
AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {}
|
|
||||||
|
|
||||||
func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {}
|
|
||||||
|
|
||||||
// HistogramDataPoint is a single data point in a timeseries that describes the
|
|
||||||
// time-varying values of a Histogram. A Histogram contains summary statistics
|
|
||||||
// for a population of values, it may optionally contain the distribution of
|
|
||||||
// those values across a set of buckets.
|
|
||||||
//
|
|
||||||
// If the histogram contains the distribution of values, then both
|
|
||||||
// "explicit_bounds" and "bucket counts" fields must be defined.
|
|
||||||
// If the histogram does not contain the distribution of values, then both
|
|
||||||
// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
|
|
||||||
// "sum" are known.
|
|
||||||
type HistogramDataPoint struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// The set of key/value pairs that uniquely identify the timeseries from
|
|
||||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
Attributes []*KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
|
||||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
|
||||||
// the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
|
|
||||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
|
||||||
// count is the number of values in the population. Must be non-negative. This
|
|
||||||
// value must be equal to the sum of the "count" fields in buckets if a
|
|
||||||
// histogram is provided.
|
|
||||||
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
|
|
||||||
// sum of the values in the population. If count is zero then this field
|
|
||||||
// must be zero.
|
|
||||||
//
|
|
||||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
|
||||||
// events, and is assumed to be monotonic over the values of these events.
|
|
||||||
// Negative events *can* be recorded, but sum should not be filled out when
|
|
||||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
|
||||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
|
||||||
Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
|
|
||||||
// bucket_counts is an optional field contains the count values of histogram
|
|
||||||
// for each bucket.
|
|
||||||
//
|
|
||||||
// The sum of the bucket_counts must equal the value in the count field.
|
|
||||||
//
|
|
||||||
// The number of elements in bucket_counts array must be by one greater than
|
|
||||||
// the number of elements in explicit_bounds array.
|
|
||||||
BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
|
|
||||||
// explicit_bounds specifies buckets with explicitly defined bounds for values.
|
|
||||||
//
|
|
||||||
// The boundaries for bucket at index i are:
|
|
||||||
//
|
|
||||||
// (-infinity, explicit_bounds[i]] for i == 0
|
|
||||||
// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
|
|
||||||
// (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
|
|
||||||
//
|
|
||||||
// The values in the explicit_bounds array must be strictly increasing.
|
|
||||||
//
|
|
||||||
// Histogram buckets are inclusive of their upper boundary, except the last
|
|
||||||
// bucket where the boundary is at infinity. This format is intentionally
|
|
||||||
// compatible with the OpenMetrics histogram definition.
|
|
||||||
ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
|
|
||||||
// (Optional) List of exemplars collected from
|
|
||||||
// measurements that were used to form the data point
|
|
||||||
Exemplars []*Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
|
|
||||||
// Flags that apply to this specific data point. See DataPointFlags
|
|
||||||
// for the available flags and their meaning.
|
|
||||||
Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
|
|
||||||
// min is the minimum value over (start_time, end_time].
|
|
||||||
Min *float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"`
|
|
||||||
// max is the maximum value over (start_time, end_time].
|
|
||||||
Max *float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
|
|
||||||
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
|
|
||||||
// summary statistics for a population of values, it may optionally contain the
|
|
||||||
// distribution of those values across a set of buckets.
|
|
||||||
type ExponentialHistogramDataPoint struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// The set of key/value pairs that uniquely identify the timeseries from
|
|
||||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
Attributes []*KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
|
||||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
|
||||||
// the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
|
|
||||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
|
||||||
// count is the number of values in the population. Must be
|
|
||||||
// non-negative. This value must be equal to the sum of the "bucket_counts"
|
|
||||||
// values in the positive and negative Buckets plus the "zero_count" field.
|
|
||||||
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
|
|
||||||
// sum of the values in the population. If count is zero then this field
|
|
||||||
// must be zero.
|
|
||||||
//
|
|
||||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
|
||||||
// events, and is assumed to be monotonic over the values of these events.
|
|
||||||
// Negative events *can* be recorded, but sum should not be filled out when
|
|
||||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
|
||||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
|
||||||
Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
|
|
||||||
// scale describes the resolution of the histogram. Boundaries are
|
|
||||||
// located at powers of the base, where:
|
|
||||||
//
|
|
||||||
// base = (2^(2^-scale))
|
|
||||||
//
|
|
||||||
// The histogram bucket identified by `index`, a signed integer,
|
|
||||||
// contains values that are greater than (base^index) and
|
|
||||||
// less than or equal to (base^(index+1)).
|
|
||||||
//
|
|
||||||
// The positive and negative ranges of the histogram are expressed
|
|
||||||
// separately. Negative values are mapped by their absolute value
|
|
||||||
// into the negative range using the same scale as the positive range.
|
|
||||||
//
|
|
||||||
// scale is not restricted by the protocol, as the permissible
|
|
||||||
// values depend on the range of the data.
|
|
||||||
Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
|
|
||||||
// zero_count is the count of values that are either exactly zero or
|
|
||||||
// within the region considered zero by the instrumentation at the
|
|
||||||
// tolerated degree of precision. This bucket stores values that
|
|
||||||
// cannot be expressed using the standard exponential formula as
|
|
||||||
// well as values that have been rounded to zero.
|
|
||||||
//
|
|
||||||
// Implementations MAY consider the zero bucket to have probability
|
|
||||||
// mass equal to (zero_count / count).
|
|
||||||
ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
|
|
||||||
// positive carries the positive range of exponential bucket counts.
|
|
||||||
Positive *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive,omitempty"`
|
|
||||||
// negative carries the negative range of exponential bucket counts.
|
|
||||||
Negative *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative,omitempty"`
|
|
||||||
// Flags that apply to this specific data point. See DataPointFlags
|
|
||||||
// for the available flags and their meaning.
|
|
||||||
Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
|
|
||||||
// (Optional) List of exemplars collected from
|
|
||||||
// measurements that were used to form the data point
|
|
||||||
Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
|
|
||||||
// min is the minimum value over (start_time, end_time].
|
|
||||||
Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
|
|
||||||
// max is the maximum value over (start_time, end_time].
|
|
||||||
Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// SummaryDataPoint is a single data point in a timeseries that describes the
|
|
||||||
// time-varying values of a Summary metric.
|
|
||||||
type SummaryDataPoint struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// The set of key/value pairs that uniquely identify the timeseries from
|
|
||||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
|
||||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
|
||||||
// the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
|
|
||||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
|
||||||
// count is the number of values in the population. Must be non-negative.
|
|
||||||
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
|
|
||||||
// sum of the values in the population. If count is zero then this field
|
|
||||||
// must be zero.
|
|
||||||
//
|
|
||||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
|
||||||
// events, and is assumed to be monotonic over the values of these events.
|
|
||||||
// Negative events *can* be recorded, but sum should not be filled out when
|
|
||||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
|
||||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
|
|
||||||
Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
|
|
||||||
// (Optional) list of values at different quantiles of the distribution calculated
|
|
||||||
// from the current snapshot. The quantiles must be strictly increasing.
|
|
||||||
QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"`
|
|
||||||
// Flags that apply to this specific data point. See DataPointFlags
|
|
||||||
// for the available flags and their meaning.
|
|
||||||
Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// A representation of an exemplar, which is a sample input measurement.
|
|
||||||
// Exemplars also hold information about the environment when the measurement
|
|
||||||
// was recorded, for example the span and trace ID of the active span when the
|
|
||||||
// exemplar was recorded.
|
|
||||||
type Exemplar struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// The set of key/value pairs that were filtered out by the aggregator, but
|
|
||||||
// recorded alongside the original measurement. Only key/value pairs that were
|
|
||||||
// filtered out by the aggregator should be included
|
|
||||||
FilteredAttributes []*KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes,omitempty"`
|
|
||||||
// time_unix_nano is the exact time when this exemplar was recorded
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
|
||||||
// The value of the measurement that was recorded. An exemplar is
|
|
||||||
// considered invalid when one of the recognized value fields is not present
|
|
||||||
// inside this oneof.
|
|
||||||
//
|
|
||||||
// Types that are assignable to Value:
|
|
||||||
//
|
|
||||||
// *Exemplar_AsDouble
|
|
||||||
// *Exemplar_AsInt
|
|
||||||
Value isExemplar_Value `protobuf_oneof:"value"`
|
|
||||||
// (Optional) Span ID of the exemplar trace.
|
|
||||||
// span_id may be missing if the measurement is not recorded inside a trace
|
|
||||||
// or if the trace is not sampled.
|
|
||||||
SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
|
|
||||||
// (Optional) Trace ID of the exemplar trace.
|
|
||||||
// trace_id may be missing if the measurement is not recorded inside a trace
|
|
||||||
// or if the trace is not sampled.
|
|
||||||
TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type isExemplar_Value interface {
|
|
||||||
isExemplar_Value()
|
|
||||||
}
|
|
||||||
|
|
||||||
type Exemplar_AsDouble struct {
|
|
||||||
AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Exemplar_AsInt struct {
|
|
||||||
AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Exemplar_AsDouble) isExemplar_Value() {}
|
|
||||||
|
|
||||||
func (*Exemplar_AsInt) isExemplar_Value() {}
|
|
||||||
|
|
||||||
// Buckets are a set of bucket counts, encoded in a contiguous array
|
|
||||||
// of counts.
|
|
||||||
type ExponentialHistogramDataPoint_Buckets struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// Offset is the bucket index of the first entry in the bucket_counts array.
|
|
||||||
//
|
|
||||||
// Note: This uses a varint encoding as a simple form of compression.
|
|
||||||
Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
|
|
||||||
// Count is an array of counts, where count[i] carries the count
|
|
||||||
// of the bucket at index (offset+i). count[i] is the count of
|
|
||||||
// values greater than base^(offset+i) and less or equal to than
|
|
||||||
// base^(offset+i+1).
|
|
||||||
//
|
|
||||||
// Note: By contrast, the explicit HistogramDataPoint uses
|
|
||||||
// fixed64. This field is expected to have many buckets,
|
|
||||||
// especially zeros, so uint64 has been selected to ensure
|
|
||||||
// varint encoding.
|
|
||||||
BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Represents the value at a given quantile of a distribution.
|
|
||||||
//
|
|
||||||
// To record Min and Max values following conventions are used:
|
|
||||||
// - The 1.0 quantile is equivalent to the maximum value observed.
|
|
||||||
// - The 0.0 quantile is equivalent to the minimum value observed.
|
|
||||||
//
|
|
||||||
// See the following issue for more context:
|
|
||||||
// https://github.com/open-telemetry/opentelemetry-proto/issues/125
|
|
||||||
type SummaryDataPoint_ValueAtQuantile struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// The quantile of a distribution. Must be in the interval
|
|
||||||
// [0.0, 1.0].
|
|
||||||
Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
|
|
||||||
// The value at the given quantile of a distribution.
|
|
||||||
//
|
|
||||||
// Quantile values must NOT be negative.
|
|
||||||
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
|
|
||||||
}
|
|
|
@ -1,32 +0,0 @@
|
||||||
// Copyright 2019, OpenTelemetry Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.28.1
|
|
||||||
// protoc v3.21.12
|
|
||||||
// source: lib/protoparser/opentelemetry/proto/metrics_service.proto
|
|
||||||
|
|
||||||
package pb
|
|
||||||
|
|
||||||
type ExportMetricsServiceRequest struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// An array of ResourceMetrics.
|
|
||||||
// For data coming from a single resource this array will typically contain one
|
|
||||||
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
|
|
||||||
// data from multiple origins typically batch the data before forwarding further and
|
|
||||||
// in that case this array will contain multiple elements.
|
|
||||||
ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
|
|
||||||
}
|
|
|
@ -1,157 +0,0 @@
|
||||||
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
|
|
||||||
// protoc-gen-go-vtproto version: v0.4.0
|
|
||||||
// source: lib/protoparser/opentelemetry/proto/metrics_service.proto
|
|
||||||
|
|
||||||
package pb
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
io "io"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) MarshalVT() (dAtA []byte, err error) {
|
|
||||||
if m == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
size := m.SizeVT()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalToSizedBufferVT(dAtA[:size])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) MarshalToVT(dAtA []byte) (int, error) {
|
|
||||||
size := m.SizeVT()
|
|
||||||
return m.MarshalToSizedBufferVT(dAtA[:size])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
|
|
||||||
if m == nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
i := len(dAtA)
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if m.unknownFields != nil {
|
|
||||||
i -= len(m.unknownFields)
|
|
||||||
copy(dAtA[i:], m.unknownFields)
|
|
||||||
}
|
|
||||||
if len(m.ResourceMetrics) > 0 {
|
|
||||||
for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
|
|
||||||
size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i -= size
|
|
||||||
i = encodeVarint(dAtA, i, uint64(size))
|
|
||||||
i--
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(dAtA) - i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) SizeVT() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if len(m.ResourceMetrics) > 0 {
|
|
||||||
for _, e := range m.ResourceMetrics {
|
|
||||||
l = e.SizeVT()
|
|
||||||
n += 1 + l + sov(uint64(l))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
n += len(m.unknownFields)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *ExportMetricsServiceRequest) UnmarshalVT(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflow
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
|
|
||||||
}
|
|
||||||
var msglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflow
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
msglen |= int(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if msglen < 0 {
|
|
||||||
return ErrInvalidLength
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + msglen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLength
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{})
|
|
||||||
if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
iNdEx = postIndex
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := skip(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
|
||||||
return ErrInvalidLength
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load diff
976
lib/protoparser/opentelemetry/pb/pb.go
Normal file
976
lib/protoparser/opentelemetry/pb/pb.go
Normal file
|
@ -0,0 +1,976 @@
|
||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/easyproto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ExportMetricsServiceRequest represents the corresponding OTEL protobuf message
|
||||||
|
type ExportMetricsServiceRequest struct {
|
||||||
|
ResourceMetrics []*ResourceMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalProtobuf unmarshals r from protobuf message at src.
|
||||||
|
func (r *ExportMetricsServiceRequest) UnmarshalProtobuf(src []byte) error {
|
||||||
|
r.ResourceMetrics = nil
|
||||||
|
return r.unmarshalProtobuf(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalProtobuf marshals r to protobuf message, appends it to dst and returns the result.
|
||||||
|
func (r *ExportMetricsServiceRequest) MarshalProtobuf(dst []byte) []byte {
|
||||||
|
m := mp.Get()
|
||||||
|
r.marshalProtobuf(m.MessageMarshaler())
|
||||||
|
dst = m.Marshal(dst)
|
||||||
|
mp.Put(m)
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
var mp easyproto.MarshalerPool
|
||||||
|
|
||||||
|
func (r *ExportMetricsServiceRequest) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, rm := range r.ResourceMetrics {
|
||||||
|
rm.marshalProtobuf(mm.AppendMessage(1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *ExportMetricsServiceRequest) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message ExportMetricsServiceRequest {
|
||||||
|
// repeated ResourceMetrics resource_metrics = 1;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in ExportMetricsServiceRequest: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read ResourceMetrics data")
|
||||||
|
}
|
||||||
|
r.ResourceMetrics = append(r.ResourceMetrics, &ResourceMetrics{})
|
||||||
|
rm := r.ResourceMetrics[len(r.ResourceMetrics)-1]
|
||||||
|
if err := rm.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal ResourceMetrics: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceMetrics represents the corresponding OTEL protobuf message
|
||||||
|
type ResourceMetrics struct {
|
||||||
|
Resource *Resource
|
||||||
|
ScopeMetrics []*ScopeMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rm *ResourceMetrics) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
if rm.Resource != nil {
|
||||||
|
rm.Resource.marshalProtobuf(mm.AppendMessage(1))
|
||||||
|
}
|
||||||
|
for _, sm := range rm.ScopeMetrics {
|
||||||
|
sm.marshalProtobuf(mm.AppendMessage(2))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rm *ResourceMetrics) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message ResourceMetrics {
|
||||||
|
// Resource resource = 1;
|
||||||
|
// repeated ScopeMetrics scope_metrics = 2;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in ResourceMetrics: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Resource data")
|
||||||
|
}
|
||||||
|
rm.Resource = &Resource{}
|
||||||
|
if err := rm.Resource.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot umarshal Resource: %w", err)
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read ScopeMetrics data")
|
||||||
|
}
|
||||||
|
rm.ScopeMetrics = append(rm.ScopeMetrics, &ScopeMetrics{})
|
||||||
|
sm := rm.ScopeMetrics[len(rm.ScopeMetrics)-1]
|
||||||
|
if err := sm.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal ScopeMetrics: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resource represents the corresponding OTEL protobuf message
|
||||||
|
type Resource struct {
|
||||||
|
Attributes []*KeyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Resource) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, a := range r.Attributes {
|
||||||
|
a.marshalProtobuf(mm.AppendMessage(1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Resource) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message Resource {
|
||||||
|
// repeated KeyValue attributes = 1;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in Resource: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Attribute data")
|
||||||
|
}
|
||||||
|
r.Attributes = append(r.Attributes, &KeyValue{})
|
||||||
|
a := r.Attributes[len(r.Attributes)-1]
|
||||||
|
if err := a.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Attribute: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScopeMetrics represents the corresponding OTEL protobuf message
|
||||||
|
type ScopeMetrics struct {
|
||||||
|
Metrics []*Metric
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *ScopeMetrics) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, m := range sm.Metrics {
|
||||||
|
m.marshalProtobuf(mm.AppendMessage(2))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (sm *ScopeMetrics) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message ScopeMetrics {
|
||||||
|
// repeated Metric metrics = 2;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in ScopeMetrics: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 2:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Metric data")
|
||||||
|
}
|
||||||
|
sm.Metrics = append(sm.Metrics, &Metric{})
|
||||||
|
m := sm.Metrics[len(sm.Metrics)-1]
|
||||||
|
if err := m.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Metric: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Metric represents the corresponding OTEL protobuf message
|
||||||
|
type Metric struct {
|
||||||
|
Name string
|
||||||
|
Gauge *Gauge
|
||||||
|
Sum *Sum
|
||||||
|
Histogram *Histogram
|
||||||
|
Summary *Summary
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Metric) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
mm.AppendString(1, m.Name)
|
||||||
|
switch {
|
||||||
|
case m.Gauge != nil:
|
||||||
|
m.Gauge.marshalProtobuf(mm.AppendMessage(5))
|
||||||
|
case m.Sum != nil:
|
||||||
|
m.Sum.marshalProtobuf(mm.AppendMessage(7))
|
||||||
|
case m.Histogram != nil:
|
||||||
|
m.Histogram.marshalProtobuf(mm.AppendMessage(9))
|
||||||
|
case m.Summary != nil:
|
||||||
|
m.Summary.marshalProtobuf(mm.AppendMessage(11))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Metric) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message Metric {
|
||||||
|
// string name = 1;
|
||||||
|
// oneof data {
|
||||||
|
// Gauge gauge = 5;
|
||||||
|
// Sum sum = 7;
|
||||||
|
// Histogram histogram = 9;
|
||||||
|
// Summary summary = 11;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in Metric: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
name, ok := fc.String()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read metric name")
|
||||||
|
}
|
||||||
|
m.Name = strings.Clone(name)
|
||||||
|
case 5:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Gauge data")
|
||||||
|
}
|
||||||
|
m.Gauge = &Gauge{}
|
||||||
|
if err := m.Gauge.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Gauge: %w", err)
|
||||||
|
}
|
||||||
|
case 7:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Sum data")
|
||||||
|
}
|
||||||
|
m.Sum = &Sum{}
|
||||||
|
if err := m.Sum.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Sum: %w", err)
|
||||||
|
}
|
||||||
|
case 9:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Histogram data")
|
||||||
|
}
|
||||||
|
m.Histogram = &Histogram{}
|
||||||
|
if err := m.Histogram.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Histogram: %w", err)
|
||||||
|
}
|
||||||
|
case 11:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Summary data")
|
||||||
|
}
|
||||||
|
m.Summary = &Summary{}
|
||||||
|
if err := m.Summary.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Summary: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyValue represents the corresponding OTEL protobuf message
|
||||||
|
type KeyValue struct {
|
||||||
|
Key string
|
||||||
|
Value *AnyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *KeyValue) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
mm.AppendString(1, kv.Key)
|
||||||
|
if kv.Value != nil {
|
||||||
|
kv.Value.marshalProtobuf(mm.AppendMessage(2))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kv *KeyValue) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message KeyValue {
|
||||||
|
// string key = 1;
|
||||||
|
// AnyValue value = 2;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in KeyValue: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
key, ok := fc.String()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Key")
|
||||||
|
}
|
||||||
|
kv.Key = strings.Clone(key)
|
||||||
|
case 2:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Value")
|
||||||
|
}
|
||||||
|
kv.Value = &AnyValue{}
|
||||||
|
if err := kv.Value.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Value: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnyValue represents the corresponding OTEL protobuf message
|
||||||
|
type AnyValue struct {
|
||||||
|
StringValue *string
|
||||||
|
BoolValue *bool
|
||||||
|
IntValue *int64
|
||||||
|
DoubleValue *float64
|
||||||
|
ArrayValue *ArrayValue
|
||||||
|
KeyValueList *KeyValueList
|
||||||
|
BytesValue *[]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func (av *AnyValue) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
switch {
|
||||||
|
case av.StringValue != nil:
|
||||||
|
mm.AppendString(1, *av.StringValue)
|
||||||
|
case av.BoolValue != nil:
|
||||||
|
mm.AppendBool(2, *av.BoolValue)
|
||||||
|
case av.IntValue != nil:
|
||||||
|
mm.AppendInt64(3, *av.IntValue)
|
||||||
|
case av.DoubleValue != nil:
|
||||||
|
mm.AppendDouble(4, *av.DoubleValue)
|
||||||
|
case av.ArrayValue != nil:
|
||||||
|
av.ArrayValue.marshalProtobuf(mm.AppendMessage(5))
|
||||||
|
case av.KeyValueList != nil:
|
||||||
|
av.KeyValueList.marshalProtobuf(mm.AppendMessage(6))
|
||||||
|
case av.BytesValue != nil:
|
||||||
|
mm.AppendBytes(7, *av.BytesValue)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (av *AnyValue) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message AnyValue {
|
||||||
|
// oneof value {
|
||||||
|
// string string_value = 1;
|
||||||
|
// bool bool_value = 2;
|
||||||
|
// int64 int_value = 3;
|
||||||
|
// double double_value = 4;
|
||||||
|
// ArrayValue array_value = 5;
|
||||||
|
// KeyValueList kvlist_value = 6;
|
||||||
|
// bytes bytes_value = 7;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in AnyValue")
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
stringValue, ok := fc.String()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read StringValue")
|
||||||
|
}
|
||||||
|
stringValue = strings.Clone(stringValue)
|
||||||
|
av.StringValue = &stringValue
|
||||||
|
case 2:
|
||||||
|
boolValue, ok := fc.Bool()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read BoolValue")
|
||||||
|
}
|
||||||
|
av.BoolValue = &boolValue
|
||||||
|
case 3:
|
||||||
|
intValue, ok := fc.Int64()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read IntValue")
|
||||||
|
}
|
||||||
|
av.IntValue = &intValue
|
||||||
|
case 4:
|
||||||
|
doubleValue, ok := fc.Double()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read DoubleValue")
|
||||||
|
}
|
||||||
|
av.DoubleValue = &doubleValue
|
||||||
|
case 5:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read ArrayValue")
|
||||||
|
}
|
||||||
|
av.ArrayValue = &ArrayValue{}
|
||||||
|
if err := av.ArrayValue.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal ArrayValue: %w", err)
|
||||||
|
}
|
||||||
|
case 6:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read KeyValueList")
|
||||||
|
}
|
||||||
|
av.KeyValueList = &KeyValueList{}
|
||||||
|
if err := av.KeyValueList.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal KeyValueList: %w", err)
|
||||||
|
}
|
||||||
|
case 7:
|
||||||
|
bytesValue, ok := fc.Bytes()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read BytesValue")
|
||||||
|
}
|
||||||
|
bytesValue = bytes.Clone(bytesValue)
|
||||||
|
av.BytesValue = &bytesValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArrayValue represents the corresponding OTEL protobuf message
|
||||||
|
type ArrayValue struct {
|
||||||
|
Values []*AnyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (av *ArrayValue) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, v := range av.Values {
|
||||||
|
v.marshalProtobuf(mm.AppendMessage(1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (av *ArrayValue) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message ArrayValue {
|
||||||
|
// repeated AnyValue values = 1;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in ArrayValue")
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Value data")
|
||||||
|
}
|
||||||
|
av.Values = append(av.Values, &AnyValue{})
|
||||||
|
v := av.Values[len(av.Values)-1]
|
||||||
|
if err := v.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Value: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyValueList represents the corresponding OTEL protobuf message
|
||||||
|
type KeyValueList struct {
|
||||||
|
Values []*KeyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvl *KeyValueList) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, v := range kvl.Values {
|
||||||
|
v.marshalProtobuf(mm.AppendMessage(1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (kvl *KeyValueList) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message KeyValueList {
|
||||||
|
// repeated KeyValue values = 1;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in KeyValueList")
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Value data")
|
||||||
|
}
|
||||||
|
kvl.Values = append(kvl.Values, &KeyValue{})
|
||||||
|
v := kvl.Values[len(kvl.Values)-1]
|
||||||
|
if err := v.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Value: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gauge represents the corresponding OTEL protobuf message
|
||||||
|
type Gauge struct {
|
||||||
|
DataPoints []*NumberDataPoint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Gauge) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, dp := range g.DataPoints {
|
||||||
|
dp.marshalProtobuf(mm.AppendMessage(1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *Gauge) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message Gauge {
|
||||||
|
// repeated NumberDataPoint data_points = 1;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in Gauge")
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read DataPoint data")
|
||||||
|
}
|
||||||
|
g.DataPoints = append(g.DataPoints, &NumberDataPoint{})
|
||||||
|
dp := g.DataPoints[len(g.DataPoints)-1]
|
||||||
|
if err := dp.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal DataPoint: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NumberDataPoint represents the corresponding OTEL protobuf message
|
||||||
|
type NumberDataPoint struct {
|
||||||
|
Attributes []*KeyValue
|
||||||
|
TimeUnixNano uint64
|
||||||
|
DoubleValue *float64
|
||||||
|
IntValue *int64
|
||||||
|
Flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ndp *NumberDataPoint) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, a := range ndp.Attributes {
|
||||||
|
a.marshalProtobuf(mm.AppendMessage(7))
|
||||||
|
}
|
||||||
|
mm.AppendFixed64(3, ndp.TimeUnixNano)
|
||||||
|
switch {
|
||||||
|
case ndp.DoubleValue != nil:
|
||||||
|
mm.AppendDouble(4, *ndp.DoubleValue)
|
||||||
|
case ndp.IntValue != nil:
|
||||||
|
mm.AppendSfixed64(6, *ndp.IntValue)
|
||||||
|
}
|
||||||
|
mm.AppendUint32(8, ndp.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ndp *NumberDataPoint) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message NumberDataPoint {
|
||||||
|
// repeated KeyValue attributes = 7;
|
||||||
|
// fixed64 time_unix_nano = 3;
|
||||||
|
// oneof value {
|
||||||
|
// double as_double = 4;
|
||||||
|
// sfixed64 as_int = 6;
|
||||||
|
// }
|
||||||
|
// uint32 flags = 8;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in NumberDataPoint: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 7:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Attribute")
|
||||||
|
}
|
||||||
|
ndp.Attributes = append(ndp.Attributes, &KeyValue{})
|
||||||
|
a := ndp.Attributes[len(ndp.Attributes)-1]
|
||||||
|
if err := a.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Attribute: %w", err)
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
timeUnixNano, ok := fc.Fixed64()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read TimeUnixNano")
|
||||||
|
}
|
||||||
|
ndp.TimeUnixNano = timeUnixNano
|
||||||
|
case 4:
|
||||||
|
doubleValue, ok := fc.Double()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read DoubleValue")
|
||||||
|
}
|
||||||
|
ndp.DoubleValue = &doubleValue
|
||||||
|
case 6:
|
||||||
|
intValue, ok := fc.Sfixed64()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read IntValue")
|
||||||
|
}
|
||||||
|
ndp.IntValue = &intValue
|
||||||
|
case 8:
|
||||||
|
flags, ok := fc.Uint32()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Flags")
|
||||||
|
}
|
||||||
|
ndp.Flags = flags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum represents the corresponding OTEL protobuf message
|
||||||
|
type Sum struct {
|
||||||
|
DataPoints []*NumberDataPoint
|
||||||
|
AggregationTemporality AggregationTemporality
|
||||||
|
}
|
||||||
|
|
||||||
|
// AggregationTemporality represents the corresponding OTEL protobuf enum
|
||||||
|
type AggregationTemporality int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// AggregationTemporalityUnspecified is enum value for AggregationTemporality
|
||||||
|
AggregationTemporalityUnspecified = AggregationTemporality(0)
|
||||||
|
// AggregationTemporalityDelta is enum value for AggregationTemporality
|
||||||
|
AggregationTemporalityDelta = AggregationTemporality(1)
|
||||||
|
// AggregationTemporalityCumulative is enum value for AggregationTemporality
|
||||||
|
AggregationTemporalityCumulative = AggregationTemporality(2)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (s *Sum) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, dp := range s.DataPoints {
|
||||||
|
dp.marshalProtobuf(mm.AppendMessage(1))
|
||||||
|
}
|
||||||
|
mm.AppendInt64(2, int64(s.AggregationTemporality))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Sum) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message Sum {
|
||||||
|
// repeated NumberDataPoint data_points = 1;
|
||||||
|
// AggregationTemporality aggregation_temporality = 2;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in Sum: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read DataPoint data")
|
||||||
|
}
|
||||||
|
s.DataPoints = append(s.DataPoints, &NumberDataPoint{})
|
||||||
|
dp := s.DataPoints[len(s.DataPoints)-1]
|
||||||
|
if err := dp.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal DataPoint: %w", err)
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
at, ok := fc.Int64()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read AggregationTemporality")
|
||||||
|
}
|
||||||
|
s.AggregationTemporality = AggregationTemporality(at)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Histogram represents the corresponding OTEL protobuf message
|
||||||
|
type Histogram struct {
|
||||||
|
DataPoints []*HistogramDataPoint
|
||||||
|
AggregationTemporality AggregationTemporality
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Histogram) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, dp := range h.DataPoints {
|
||||||
|
dp.marshalProtobuf(mm.AppendMessage(1))
|
||||||
|
}
|
||||||
|
mm.AppendInt64(2, int64(h.AggregationTemporality))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Histogram) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message Histogram {
|
||||||
|
// repeated HistogramDataPoint data_points = 1;
|
||||||
|
// AggregationTemporality aggregation_temporality = 2;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in Histogram: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read DataPoint")
|
||||||
|
}
|
||||||
|
h.DataPoints = append(h.DataPoints, &HistogramDataPoint{})
|
||||||
|
dp := h.DataPoints[len(h.DataPoints)-1]
|
||||||
|
if err := dp.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal DataPoint: %w", err)
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
at, ok := fc.Int64()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read AggregationTemporality")
|
||||||
|
}
|
||||||
|
h.AggregationTemporality = AggregationTemporality(at)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Summary represents the corresponding OTEL protobuf message
|
||||||
|
type Summary struct {
|
||||||
|
DataPoints []*SummaryDataPoint
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Summary) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, dp := range s.DataPoints {
|
||||||
|
dp.marshalProtobuf(mm.AppendMessage(1))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Summary) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message Summary {
|
||||||
|
// repeated SummaryDataPoint data_points = 1;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in Summary: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read DataPoint")
|
||||||
|
}
|
||||||
|
s.DataPoints = append(s.DataPoints, &SummaryDataPoint{})
|
||||||
|
dp := s.DataPoints[len(s.DataPoints)-1]
|
||||||
|
if err := dp.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal DataPoint: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HistogramDataPoint represents the corresponding OTEL protobuf message
|
||||||
|
type HistogramDataPoint struct {
|
||||||
|
Attributes []*KeyValue
|
||||||
|
TimeUnixNano uint64
|
||||||
|
Count uint64
|
||||||
|
Sum *float64
|
||||||
|
BucketCounts []uint64
|
||||||
|
ExplicitBounds []float64
|
||||||
|
Flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp *HistogramDataPoint) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, a := range dp.Attributes {
|
||||||
|
a.marshalProtobuf(mm.AppendMessage(9))
|
||||||
|
}
|
||||||
|
mm.AppendFixed64(3, dp.TimeUnixNano)
|
||||||
|
mm.AppendFixed64(4, dp.Count)
|
||||||
|
if dp.Sum != nil {
|
||||||
|
mm.AppendDouble(5, *dp.Sum)
|
||||||
|
}
|
||||||
|
mm.AppendFixed64s(6, dp.BucketCounts)
|
||||||
|
mm.AppendDoubles(7, dp.ExplicitBounds)
|
||||||
|
mm.AppendUint32(10, dp.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp *HistogramDataPoint) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message HistogramDataPoint {
|
||||||
|
// repeated KeyValue attributes = 9;
|
||||||
|
// fixed64 time_unix_nano = 3;
|
||||||
|
// fixed64 count = 4;
|
||||||
|
// optional double sum = 5;
|
||||||
|
// repeated fixed64 bucket_counts = 6;
|
||||||
|
// repeated double explicit_bounds = 7;
|
||||||
|
// uint32 flags = 10;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in HistogramDataPoint: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 9:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Attribute")
|
||||||
|
}
|
||||||
|
dp.Attributes = append(dp.Attributes, &KeyValue{})
|
||||||
|
a := dp.Attributes[len(dp.Attributes)-1]
|
||||||
|
if err := a.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Attribute: %w", err)
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
timeUnixNano, ok := fc.Fixed64()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read TimeUnixNano")
|
||||||
|
}
|
||||||
|
dp.TimeUnixNano = timeUnixNano
|
||||||
|
case 4:
|
||||||
|
count, ok := fc.Fixed64()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Count")
|
||||||
|
}
|
||||||
|
dp.Count = count
|
||||||
|
case 5:
|
||||||
|
sum, ok := fc.Double()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Sum")
|
||||||
|
}
|
||||||
|
dp.Sum = &sum
|
||||||
|
case 6:
|
||||||
|
bucketCounts, ok := fc.UnpackFixed64s(dp.BucketCounts)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read BucketCounts")
|
||||||
|
}
|
||||||
|
dp.BucketCounts = bucketCounts
|
||||||
|
case 7:
|
||||||
|
explicitBounds, ok := fc.UnpackDoubles(dp.ExplicitBounds)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read ExplicitBounds")
|
||||||
|
}
|
||||||
|
dp.ExplicitBounds = explicitBounds
|
||||||
|
case 10:
|
||||||
|
flags, ok := fc.Uint32()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Flags")
|
||||||
|
}
|
||||||
|
dp.Flags = flags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SummaryDataPoint represents the corresponding OTEL protobuf message
|
||||||
|
type SummaryDataPoint struct {
|
||||||
|
Attributes []*KeyValue
|
||||||
|
TimeUnixNano uint64
|
||||||
|
Count uint64
|
||||||
|
Sum float64
|
||||||
|
QuantileValues []*ValueAtQuantile
|
||||||
|
Flags uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp *SummaryDataPoint) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
for _, a := range dp.Attributes {
|
||||||
|
a.marshalProtobuf(mm.AppendMessage(7))
|
||||||
|
}
|
||||||
|
mm.AppendFixed64(3, dp.TimeUnixNano)
|
||||||
|
mm.AppendFixed64(4, dp.Count)
|
||||||
|
mm.AppendDouble(5, dp.Sum)
|
||||||
|
for _, v := range dp.QuantileValues {
|
||||||
|
v.marshalProtobuf(mm.AppendMessage(6))
|
||||||
|
}
|
||||||
|
mm.AppendUint32(8, dp.Flags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dp *SummaryDataPoint) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message SummaryDataPoint {
|
||||||
|
// repeated KeyValue attributes = 7;
|
||||||
|
// fixed64 time_unix_nano = 3;
|
||||||
|
// fixed64 count = 4;
|
||||||
|
// double sum = 5;
|
||||||
|
// repeated ValueAtQuantile quantile_values = 6;
|
||||||
|
// uint32 flags = 8;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in SummaryDataPoint: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 7:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Attribute")
|
||||||
|
}
|
||||||
|
dp.Attributes = append(dp.Attributes, &KeyValue{})
|
||||||
|
a := dp.Attributes[len(dp.Attributes)-1]
|
||||||
|
if err := a.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal Attribute: %w", err)
|
||||||
|
}
|
||||||
|
case 3:
|
||||||
|
timeUnixNano, ok := fc.Fixed64()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read TimeUnixNano")
|
||||||
|
}
|
||||||
|
dp.TimeUnixNano = timeUnixNano
|
||||||
|
case 4:
|
||||||
|
count, ok := fc.Fixed64()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Count")
|
||||||
|
}
|
||||||
|
dp.Count = count
|
||||||
|
case 5:
|
||||||
|
sum, ok := fc.Double()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Sum")
|
||||||
|
}
|
||||||
|
dp.Sum = sum
|
||||||
|
case 6:
|
||||||
|
data, ok := fc.MessageData()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read QuantileValue")
|
||||||
|
}
|
||||||
|
dp.QuantileValues = append(dp.QuantileValues, &ValueAtQuantile{})
|
||||||
|
v := dp.QuantileValues[len(dp.QuantileValues)-1]
|
||||||
|
if err := v.unmarshalProtobuf(data); err != nil {
|
||||||
|
return fmt.Errorf("cannot unmarshal QuantileValue: %w", err)
|
||||||
|
}
|
||||||
|
case 8:
|
||||||
|
flags, ok := fc.Uint32()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Flags")
|
||||||
|
}
|
||||||
|
dp.Flags = flags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueAtQuantile represents the corresponding OTEL protobuf message
|
||||||
|
type ValueAtQuantile struct {
|
||||||
|
Quantile float64
|
||||||
|
Value float64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *ValueAtQuantile) marshalProtobuf(mm *easyproto.MessageMarshaler) {
|
||||||
|
mm.AppendDouble(1, v.Quantile)
|
||||||
|
mm.AppendDouble(2, v.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *ValueAtQuantile) unmarshalProtobuf(src []byte) (err error) {
|
||||||
|
// message ValueAtQuantile {
|
||||||
|
// double quantile = 1;
|
||||||
|
// double value = 2;
|
||||||
|
// }
|
||||||
|
var fc easyproto.FieldContext
|
||||||
|
for len(src) > 0 {
|
||||||
|
src, err = fc.NextField(src)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("cannot read next field in ValueAtQuantile: %w", err)
|
||||||
|
}
|
||||||
|
switch fc.FieldNum {
|
||||||
|
case 1:
|
||||||
|
quantile, ok := fc.Double()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Quantile")
|
||||||
|
}
|
||||||
|
v.Quantile = quantile
|
||||||
|
case 2:
|
||||||
|
value, ok := fc.Double()
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("cannot read Value")
|
||||||
|
}
|
||||||
|
v.Value = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,48 +0,0 @@
|
||||||
// Copyright 2019, OpenTelemetry Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
|
||||||
// versions:
|
|
||||||
// protoc-gen-go v1.28.1
|
|
||||||
// protoc v3.21.12
|
|
||||||
// source: lib/protoparser/opentelemetry/proto/resource.proto
|
|
||||||
|
|
||||||
package pb
|
|
||||||
|
|
||||||
// Resource information.
|
|
||||||
type Resource struct {
|
|
||||||
unknownFields []byte
|
|
||||||
|
|
||||||
// Set of attributes that describe the resource.
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
Attributes []*KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
|
||||||
// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
|
|
||||||
// no attributes were dropped.
|
|
||||||
DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Resource) GetAttributes() []*KeyValue {
|
|
||||||
if x != nil {
|
|
||||||
return x.Attributes
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *Resource) GetDroppedAttributesCount() uint32 {
|
|
||||||
if x != nil {
|
|
||||||
return x.DroppedAttributesCount
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
|
@ -1,184 +0,0 @@
|
||||||
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
|
|
||||||
// protoc-gen-go-vtproto version: v0.4.0
|
|
||||||
// source: lib/protoparser/opentelemetry/proto/resource.proto
|
|
||||||
|
|
||||||
package pb
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
io "io"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (m *Resource) MarshalVT() (dAtA []byte, err error) {
|
|
||||||
if m == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
size := m.SizeVT()
|
|
||||||
dAtA = make([]byte, size)
|
|
||||||
n, err := m.MarshalToSizedBufferVT(dAtA[:size])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return dAtA[:n], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Resource) MarshalToVT(dAtA []byte) (int, error) {
|
|
||||||
size := m.SizeVT()
|
|
||||||
return m.MarshalToSizedBufferVT(dAtA[:size])
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Resource) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
|
|
||||||
if m == nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
i := len(dAtA)
|
|
||||||
_ = i
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if m.unknownFields != nil {
|
|
||||||
i -= len(m.unknownFields)
|
|
||||||
copy(dAtA[i:], m.unknownFields)
|
|
||||||
}
|
|
||||||
if m.DroppedAttributesCount != 0 {
|
|
||||||
i = encodeVarint(dAtA, i, uint64(m.DroppedAttributesCount))
|
|
||||||
i--
|
|
||||||
dAtA[i] = 0x10
|
|
||||||
}
|
|
||||||
if len(m.Attributes) > 0 {
|
|
||||||
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
|
|
||||||
size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
i -= size
|
|
||||||
i = encodeVarint(dAtA, i, uint64(size))
|
|
||||||
i--
|
|
||||||
dAtA[i] = 0xa
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(dAtA) - i, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Resource) SizeVT() (n int) {
|
|
||||||
if m == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
var l int
|
|
||||||
_ = l
|
|
||||||
if len(m.Attributes) > 0 {
|
|
||||||
for _, e := range m.Attributes {
|
|
||||||
l = e.SizeVT()
|
|
||||||
n += 1 + l + sov(uint64(l))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m.DroppedAttributesCount != 0 {
|
|
||||||
n += 1 + sov(uint64(m.DroppedAttributesCount))
|
|
||||||
}
|
|
||||||
n += len(m.unknownFields)
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Resource) UnmarshalVT(dAtA []byte) error {
|
|
||||||
l := len(dAtA)
|
|
||||||
iNdEx := 0
|
|
||||||
for iNdEx < l {
|
|
||||||
preIndex := iNdEx
|
|
||||||
var wire uint64
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflow
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
wire |= uint64(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fieldNum := int32(wire >> 3)
|
|
||||||
wireType := int(wire & 0x7)
|
|
||||||
if wireType == 4 {
|
|
||||||
return fmt.Errorf("proto: Resource: wiretype end group for non-group")
|
|
||||||
}
|
|
||||||
if fieldNum <= 0 {
|
|
||||||
return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire)
|
|
||||||
}
|
|
||||||
switch fieldNum {
|
|
||||||
case 1:
|
|
||||||
if wireType != 2 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
|
|
||||||
}
|
|
||||||
var msglen int
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflow
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
msglen |= int(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if msglen < 0 {
|
|
||||||
return ErrInvalidLength
|
|
||||||
}
|
|
||||||
postIndex := iNdEx + msglen
|
|
||||||
if postIndex < 0 {
|
|
||||||
return ErrInvalidLength
|
|
||||||
}
|
|
||||||
if postIndex > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.Attributes = append(m.Attributes, &KeyValue{})
|
|
||||||
if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
iNdEx = postIndex
|
|
||||||
case 2:
|
|
||||||
if wireType != 0 {
|
|
||||||
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
|
|
||||||
}
|
|
||||||
m.DroppedAttributesCount = 0
|
|
||||||
for shift := uint(0); ; shift += 7 {
|
|
||||||
if shift >= 64 {
|
|
||||||
return ErrIntOverflow
|
|
||||||
}
|
|
||||||
if iNdEx >= l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
b := dAtA[iNdEx]
|
|
||||||
iNdEx++
|
|
||||||
m.DroppedAttributesCount |= uint32(b&0x7F) << shift
|
|
||||||
if b < 0x80 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
iNdEx = preIndex
|
|
||||||
skippy, err := skip(dAtA[iNdEx:])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
|
||||||
return ErrInvalidLength
|
|
||||||
}
|
|
||||||
if (iNdEx + skippy) > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
|
|
||||||
iNdEx += skippy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if iNdEx > l {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,32 +0,0 @@
|
||||||
# Opentelemetry proto files
|
|
||||||
|
|
||||||
Content copied from https://github.com/open-telemetry/opentelemetry-proto/tree/main/opentelemetry/proto
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
- protoc binary [link](http://google.github.io/proto-lens/installing-protoc.html)
|
|
||||||
- golang-proto-gen[link](https://developers.google.com/protocol-buffers/docs/reference/go-generated)
|
|
||||||
- custom marshaller [link](https://github.com/planetscale/vtprotobuf)
|
|
||||||
|
|
||||||
## Modifications
|
|
||||||
|
|
||||||
Original proto files were modified:
|
|
||||||
1) changed package name for `package opentelemetry`.
|
|
||||||
2) changed import paths - changed directory names.
|
|
||||||
3) changed go_package for `opentelemetry/pb`.
|
|
||||||
|
|
||||||
|
|
||||||
## How to generate pbs
|
|
||||||
|
|
||||||
run command:
|
|
||||||
```bash
|
|
||||||
export GOBIN=~/go/bin protoc
|
|
||||||
protoc -I=. --go_out=./lib/protoparser/opentelemetry --go-vtproto_out=./lib/protoparser/opentelemetry --plugin protoc-gen-go-vtproto="$GOBIN/protoc-gen-go-vtproto" --go-vtproto_opt=features=marshal+unmarshal+size lib/protoparser/opentelemetry/proto/*.proto
|
|
||||||
```
|
|
||||||
|
|
||||||
Generated code will be at `lib/protoparser/opentelemetry/opentelemetry/`
|
|
||||||
|
|
||||||
manually edit it:
|
|
||||||
|
|
||||||
1) remove all external imports
|
|
||||||
2) remove all unneeded methods
|
|
||||||
3) replace `unknownFields` with `unknownFields []byte`
|
|
|
@ -1,67 +0,0 @@
|
||||||
// Copyright 2019, OpenTelemetry Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package opentelemetry;
|
|
||||||
|
|
||||||
option csharp_namespace = "OpenTelemetry.Proto.Common.V1";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option java_package = "io.opentelemetry.proto.common.v1";
|
|
||||||
option java_outer_classname = "CommonProto";
|
|
||||||
option go_package = "opentelemetry/pb";
|
|
||||||
|
|
||||||
// AnyValue is used to represent any type of attribute value. AnyValue may contain a
|
|
||||||
// primitive value such as a string or integer or it may contain an arbitrary nested
|
|
||||||
// object containing arrays, key-value lists and primitives.
|
|
||||||
message AnyValue {
|
|
||||||
// The value is one of the listed fields. It is valid for all values to be unspecified
|
|
||||||
// in which case this AnyValue is considered to be "empty".
|
|
||||||
oneof value {
|
|
||||||
string string_value = 1;
|
|
||||||
bool bool_value = 2;
|
|
||||||
int64 int_value = 3;
|
|
||||||
double double_value = 4;
|
|
||||||
ArrayValue array_value = 5;
|
|
||||||
KeyValueList kvlist_value = 6;
|
|
||||||
bytes bytes_value = 7;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
|
|
||||||
// since oneof in AnyValue does not allow repeated fields.
|
|
||||||
message ArrayValue {
|
|
||||||
// Array of values. The array may be empty (contain 0 elements).
|
|
||||||
repeated AnyValue values = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
|
|
||||||
// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
|
|
||||||
// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
|
|
||||||
// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
|
|
||||||
// are semantically equivalent.
|
|
||||||
message KeyValueList {
|
|
||||||
// A collection of key/value pairs of key-value pairs. The list may be empty (may
|
|
||||||
// contain 0 elements).
|
|
||||||
// The keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// value with the same key).
|
|
||||||
repeated KeyValue values = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeyValue is a key-value pair that is used to store Span attributes, Link
|
|
||||||
// attributes, etc.
|
|
||||||
message KeyValue {
|
|
||||||
string key = 1;
|
|
||||||
AnyValue value = 2;
|
|
||||||
}
|
|
|
@ -1,661 +0,0 @@
|
||||||
// Copyright 2019, OpenTelemetry Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package opentelemetry;
|
|
||||||
|
|
||||||
import "lib/protoparser/opentelemetry/proto/common.proto";
|
|
||||||
import "lib/protoparser/opentelemetry/proto/resource.proto";
|
|
||||||
|
|
||||||
option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option java_package = "io.opentelemetry.proto.metrics.v1";
|
|
||||||
option java_outer_classname = "MetricsProto";
|
|
||||||
option go_package = "opentelemetry/pb";
|
|
||||||
|
|
||||||
// MetricsData represents the metrics data that can be stored in a persistent
|
|
||||||
// storage, OR can be embedded by other protocols that transfer OTLP metrics
|
|
||||||
// data but do not implement the OTLP protocol.
|
|
||||||
//
|
|
||||||
// The main difference between this message and collector protocol is that
|
|
||||||
// in this message there will not be any "control" or "metadata" specific to
|
|
||||||
// OTLP protocol.
|
|
||||||
//
|
|
||||||
// When new fields are added into this message, the OTLP request MUST be updated
|
|
||||||
// as well.
|
|
||||||
message MetricsData {
|
|
||||||
// An array of ResourceMetrics.
|
|
||||||
// For data coming from a single resource this array will typically contain
|
|
||||||
// one element. Intermediary nodes that receive data from multiple origins
|
|
||||||
// typically batch the data before forwarding further and in that case this
|
|
||||||
// array will contain multiple elements.
|
|
||||||
repeated ResourceMetrics resource_metrics = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// A collection of ScopeMetrics from a Resource.
|
|
||||||
message ResourceMetrics {
|
|
||||||
reserved 1000;
|
|
||||||
|
|
||||||
// The resource for the metrics in this message.
|
|
||||||
// If this field is not set then no resource info is known.
|
|
||||||
Resource resource = 1;
|
|
||||||
|
|
||||||
// A list of metrics that originate from a resource.
|
|
||||||
repeated ScopeMetrics scope_metrics = 2;
|
|
||||||
|
|
||||||
// This schema_url applies to the data in the "resource" field. It does not apply
|
|
||||||
// to the data in the "scope_metrics" field which have their own schema_url field.
|
|
||||||
string schema_url = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// A collection of Metrics produced by an Scope.
|
|
||||||
message ScopeMetrics {
|
|
||||||
// A list of metrics that originate from an instrumentation library.
|
|
||||||
repeated Metric metrics = 2;
|
|
||||||
|
|
||||||
// This schema_url applies to all metrics in the "metrics" field.
|
|
||||||
string schema_url = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Defines a Metric which has one or more timeseries. The following is a
|
|
||||||
// brief summary of the Metric data model. For more details, see:
|
|
||||||
//
|
|
||||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// The data model and relation between entities is shown in the
|
|
||||||
// diagram below. Here, "DataPoint" is the term used to refer to any
|
|
||||||
// one of the specific data point value types, and "points" is the term used
|
|
||||||
// to refer to any one of the lists of points contained in the Metric.
|
|
||||||
//
|
|
||||||
// - Metric is composed of a metadata and data.
|
|
||||||
// - Metadata part contains a name, description, unit.
|
|
||||||
// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
|
|
||||||
// - DataPoint contains timestamps, attributes, and one of the possible value type
|
|
||||||
// fields.
|
|
||||||
//
|
|
||||||
// Metric
|
|
||||||
// +------------+
|
|
||||||
// |name |
|
|
||||||
// |description |
|
|
||||||
// |unit | +------------------------------------+
|
|
||||||
// |data |---> |Gauge, Sum, Histogram, Summary, ... |
|
|
||||||
// +------------+ +------------------------------------+
|
|
||||||
//
|
|
||||||
// Data [One of Gauge, Sum, Histogram, Summary, ...]
|
|
||||||
// +-----------+
|
|
||||||
// |... | // Metadata about the Data.
|
|
||||||
// |points |--+
|
|
||||||
// +-----------+ |
|
|
||||||
// | +---------------------------+
|
|
||||||
// | |DataPoint 1 |
|
|
||||||
// v |+------+------+ +------+ |
|
|
||||||
// +-----+ ||label |label |...|label | |
|
|
||||||
// | 1 |-->||value1|value2|...|valueN| |
|
|
||||||
// +-----+ |+------+------+ +------+ |
|
|
||||||
// | . | |+-----+ |
|
|
||||||
// | . | ||value| |
|
|
||||||
// | . | |+-----+ |
|
|
||||||
// | . | +---------------------------+
|
|
||||||
// | . | .
|
|
||||||
// | . | .
|
|
||||||
// | . | .
|
|
||||||
// | . | +---------------------------+
|
|
||||||
// | . | |DataPoint M |
|
|
||||||
// +-----+ |+------+------+ +------+ |
|
|
||||||
// | M |-->||label |label |...|label | |
|
|
||||||
// +-----+ ||value1|value2|...|valueN| |
|
|
||||||
// |+------+------+ +------+ |
|
|
||||||
// |+-----+ |
|
|
||||||
// ||value| |
|
|
||||||
// |+-----+ |
|
|
||||||
// +---------------------------+
|
|
||||||
//
|
|
||||||
// Each distinct type of DataPoint represents the output of a specific
|
|
||||||
// aggregation function, the result of applying the DataPoint's
|
|
||||||
// associated function of to one or more measurements.
|
|
||||||
//
|
|
||||||
// All DataPoint types have three common fields:
|
|
||||||
// - Attributes includes key-value pairs associated with the data point
|
|
||||||
// - TimeUnixNano is required, set to the end time of the aggregation
|
|
||||||
// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
|
|
||||||
// having an AggregationTemporality field, as discussed below.
|
|
||||||
//
|
|
||||||
// Both TimeUnixNano and StartTimeUnixNano values are expressed as
|
|
||||||
// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
|
|
||||||
//
|
|
||||||
// # TimeUnixNano
|
|
||||||
//
|
|
||||||
// This field is required, having consistent interpretation across
|
|
||||||
// DataPoint types. TimeUnixNano is the moment corresponding to when
|
|
||||||
// the data point's aggregate value was captured.
|
|
||||||
//
|
|
||||||
// Data points with the 0 value for TimeUnixNano SHOULD be rejected
|
|
||||||
// by consumers.
|
|
||||||
//
|
|
||||||
// # StartTimeUnixNano
|
|
||||||
//
|
|
||||||
// StartTimeUnixNano in general allows detecting when a sequence of
|
|
||||||
// observations is unbroken. This field indicates to consumers the
|
|
||||||
// start time for points with cumulative and delta
|
|
||||||
// AggregationTemporality, and it should be included whenever possible
|
|
||||||
// to support correct rate calculation. Although it may be omitted
|
|
||||||
// when the start time is truly unknown, setting StartTimeUnixNano is
|
|
||||||
// strongly encouraged.
|
|
||||||
message Metric {
|
|
||||||
reserved 4, 6, 8;
|
|
||||||
|
|
||||||
// name of the metric, including its DNS name prefix. It must be unique.
|
|
||||||
string name = 1;
|
|
||||||
|
|
||||||
// description of the metric, which can be used in documentation.
|
|
||||||
string description = 2;
|
|
||||||
|
|
||||||
// unit in which the metric value is reported. Follows the format
|
|
||||||
// described by http://unitsofmeasure.org/ucum.html.
|
|
||||||
string unit = 3;
|
|
||||||
|
|
||||||
// Data determines the aggregation type (if any) of the metric, what is the
|
|
||||||
// reported value type for the data points, as well as the relatationship to
|
|
||||||
// the time interval over which they are reported.
|
|
||||||
oneof data {
|
|
||||||
Gauge gauge = 5;
|
|
||||||
Sum sum = 7;
|
|
||||||
Histogram histogram = 9;
|
|
||||||
ExponentialHistogram exponential_histogram = 10;
|
|
||||||
Summary summary = 11;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gauge represents the type of a scalar metric that always exports the
|
|
||||||
// "current value" for every data point. It should be used for an "unknown"
|
|
||||||
// aggregation.
|
|
||||||
//
|
|
||||||
// A Gauge does not support different aggregation temporalities. Given the
|
|
||||||
// aggregation is unknown, points cannot be combined using the same
|
|
||||||
// aggregation, regardless of aggregation temporalities. Therefore,
|
|
||||||
// AggregationTemporality is not included. Consequently, this also means
|
|
||||||
// "StartTimeUnixNano" is ignored for all data points.
|
|
||||||
message Gauge {
|
|
||||||
repeated NumberDataPoint data_points = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sum represents the type of a scalar metric that is calculated as a sum of all
|
|
||||||
// reported measurements over a time interval.
|
|
||||||
message Sum {
|
|
||||||
repeated NumberDataPoint data_points = 1;
|
|
||||||
|
|
||||||
// aggregation_temporality describes if the aggregator reports delta changes
|
|
||||||
// since last report time, or cumulative changes since a fixed start time.
|
|
||||||
AggregationTemporality aggregation_temporality = 2;
|
|
||||||
|
|
||||||
// If "true" means that the sum is monotonic.
|
|
||||||
bool is_monotonic = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Histogram represents the type of a metric that is calculated by aggregating
|
|
||||||
// as a Histogram of all reported measurements over a time interval.
|
|
||||||
message Histogram {
|
|
||||||
repeated HistogramDataPoint data_points = 1;
|
|
||||||
|
|
||||||
// aggregation_temporality describes if the aggregator reports delta changes
|
|
||||||
// since last report time, or cumulative changes since a fixed start time.
|
|
||||||
AggregationTemporality aggregation_temporality = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
|
|
||||||
// as a ExponentialHistogram of all reported double measurements over a time interval.
|
|
||||||
message ExponentialHistogram {
|
|
||||||
repeated ExponentialHistogramDataPoint data_points = 1;
|
|
||||||
|
|
||||||
// aggregation_temporality describes if the aggregator reports delta changes
|
|
||||||
// since last report time, or cumulative changes since a fixed start time.
|
|
||||||
AggregationTemporality aggregation_temporality = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Summary metric data are used to convey quantile summaries,
|
|
||||||
// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
|
|
||||||
// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
|
|
||||||
// data type. These data points cannot always be merged in a meaningful way.
|
|
||||||
// While they can be useful in some applications, histogram data points are
|
|
||||||
// recommended for new applications.
|
|
||||||
message Summary {
|
|
||||||
repeated SummaryDataPoint data_points = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// AggregationTemporality defines how a metric aggregator reports aggregated
|
|
||||||
// values. It describes how those values relate to the time interval over
|
|
||||||
// which they are aggregated.
|
|
||||||
enum AggregationTemporality {
|
|
||||||
// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
|
|
||||||
AGGREGATION_TEMPORALITY_UNSPECIFIED = 0;
|
|
||||||
|
|
||||||
// DELTA is an AggregationTemporality for a metric aggregator which reports
|
|
||||||
// changes since last report time. Successive metrics contain aggregation of
|
|
||||||
// values from continuous and non-overlapping intervals.
|
|
||||||
//
|
|
||||||
// The values for a DELTA metric are based only on the time interval
|
|
||||||
// associated with one measurement cycle. There is no dependency on
|
|
||||||
// previous measurements like is the case for CUMULATIVE metrics.
|
|
||||||
//
|
|
||||||
// For example, consider a system measuring the number of requests that
|
|
||||||
// it receives and reports the sum of these requests every second as a
|
|
||||||
// DELTA metric:
|
|
||||||
//
|
|
||||||
// 1. The system starts receiving at time=t_0.
|
|
||||||
// 2. A request is received, the system measures 1 request.
|
|
||||||
// 3. A request is received, the system measures 1 request.
|
|
||||||
// 4. A request is received, the system measures 1 request.
|
|
||||||
// 5. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_0 to
|
|
||||||
// t_0+1 with a value of 3.
|
|
||||||
// 6. A request is received, the system measures 1 request.
|
|
||||||
// 7. A request is received, the system measures 1 request.
|
|
||||||
// 8. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_0+1 to
|
|
||||||
// t_0+2 with a value of 2.
|
|
||||||
AGGREGATION_TEMPORALITY_DELTA = 1;
|
|
||||||
|
|
||||||
// CUMULATIVE is an AggregationTemporality for a metric aggregator which
|
|
||||||
// reports changes since a fixed start time. This means that current values
|
|
||||||
// of a CUMULATIVE metric depend on all previous measurements since the
|
|
||||||
// start time. Because of this, the sender is required to retain this state
|
|
||||||
// in some form. If this state is lost or invalidated, the CUMULATIVE metric
|
|
||||||
// values MUST be reset and a new fixed start time following the last
|
|
||||||
// reported measurement time sent MUST be used.
|
|
||||||
//
|
|
||||||
// For example, consider a system measuring the number of requests that
|
|
||||||
// it receives and reports the sum of these requests every second as a
|
|
||||||
// CUMULATIVE metric:
|
|
||||||
//
|
|
||||||
// 1. The system starts receiving at time=t_0.
|
|
||||||
// 2. A request is received, the system measures 1 request.
|
|
||||||
// 3. A request is received, the system measures 1 request.
|
|
||||||
// 4. A request is received, the system measures 1 request.
|
|
||||||
// 5. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_0 to
|
|
||||||
// t_0+1 with a value of 3.
|
|
||||||
// 6. A request is received, the system measures 1 request.
|
|
||||||
// 7. A request is received, the system measures 1 request.
|
|
||||||
// 8. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_0 to
|
|
||||||
// t_0+2 with a value of 5.
|
|
||||||
// 9. The system experiences a fault and loses state.
|
|
||||||
// 10. The system recovers and resumes receiving at time=t_1.
|
|
||||||
// 11. A request is received, the system measures 1 request.
|
|
||||||
// 12. The 1 second collection cycle ends. A metric is exported for the
|
|
||||||
// number of requests received over the interval of time t_1 to
|
|
||||||
// t_0+1 with a value of 1.
|
|
||||||
//
|
|
||||||
// Note: Even though, when reporting changes since last report time, using
|
|
||||||
// CUMULATIVE is valid, it is not recommended. This may cause problems for
|
|
||||||
// systems that do not use start_time to determine when the aggregation
|
|
||||||
// value was reset (e.g. Prometheus).
|
|
||||||
AGGREGATION_TEMPORALITY_CUMULATIVE = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
|
|
||||||
// bit-field representing 32 distinct boolean flags. Each flag defined in this
|
|
||||||
// enum is a bit-mask. To test the presence of a single flag in the flags of
|
|
||||||
// a data point, for example, use an expression like:
|
|
||||||
//
|
|
||||||
// (point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE
|
|
||||||
//
|
|
||||||
enum DataPointFlags {
|
|
||||||
FLAG_NONE = 0;
|
|
||||||
|
|
||||||
// This DataPoint is valid but has no recorded value. This value
|
|
||||||
// SHOULD be used to reflect explicitly missing data in a series, as
|
|
||||||
// for an equivalent to the Prometheus "staleness marker".
|
|
||||||
FLAG_NO_RECORDED_VALUE = 1;
|
|
||||||
|
|
||||||
// Bits 2-31 are reserved for future use.
|
|
||||||
}
|
|
||||||
|
|
||||||
// NumberDataPoint is a single data point in a timeseries that describes the
|
|
||||||
// time-varying scalar value of a metric.
|
|
||||||
message NumberDataPoint {
|
|
||||||
reserved 1;
|
|
||||||
|
|
||||||
// The set of key/value pairs that uniquely identify the timeseries from
|
|
||||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
repeated KeyValue attributes = 7;
|
|
||||||
|
|
||||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
|
||||||
// the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
fixed64 start_time_unix_nano = 2;
|
|
||||||
|
|
||||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
fixed64 time_unix_nano = 3;
|
|
||||||
|
|
||||||
// The value itself. A point is considered invalid when one of the recognized
|
|
||||||
// value fields is not present inside this oneof.
|
|
||||||
oneof value {
|
|
||||||
double as_double = 4;
|
|
||||||
sfixed64 as_int = 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
// (Optional) List of exemplars collected from
|
|
||||||
// measurements that were used to form the data point
|
|
||||||
repeated Exemplar exemplars = 5;
|
|
||||||
|
|
||||||
// Flags that apply to this specific data point. See DataPointFlags
|
|
||||||
// for the available flags and their meaning.
|
|
||||||
uint32 flags = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
// HistogramDataPoint is a single data point in a timeseries that describes the
|
|
||||||
// time-varying values of a Histogram. A Histogram contains summary statistics
|
|
||||||
// for a population of values, it may optionally contain the distribution of
|
|
||||||
// those values across a set of buckets.
|
|
||||||
//
|
|
||||||
// If the histogram contains the distribution of values, then both
|
|
||||||
// "explicit_bounds" and "bucket counts" fields must be defined.
|
|
||||||
// If the histogram does not contain the distribution of values, then both
|
|
||||||
// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
|
|
||||||
// "sum" are known.
|
|
||||||
message HistogramDataPoint {
|
|
||||||
reserved 1;
|
|
||||||
|
|
||||||
// The set of key/value pairs that uniquely identify the timeseries from
|
|
||||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
repeated KeyValue attributes = 9;
|
|
||||||
|
|
||||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
|
||||||
// the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
fixed64 start_time_unix_nano = 2;
|
|
||||||
|
|
||||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
fixed64 time_unix_nano = 3;
|
|
||||||
|
|
||||||
// count is the number of values in the population. Must be non-negative. This
|
|
||||||
// value must be equal to the sum of the "count" fields in buckets if a
|
|
||||||
// histogram is provided.
|
|
||||||
fixed64 count = 4;
|
|
||||||
|
|
||||||
// sum of the values in the population. If count is zero then this field
|
|
||||||
// must be zero.
|
|
||||||
//
|
|
||||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
|
||||||
// events, and is assumed to be monotonic over the values of these events.
|
|
||||||
// Negative events *can* be recorded, but sum should not be filled out when
|
|
||||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
|
||||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
|
||||||
optional double sum = 5;
|
|
||||||
|
|
||||||
// bucket_counts is an optional field contains the count values of histogram
|
|
||||||
// for each bucket.
|
|
||||||
//
|
|
||||||
// The sum of the bucket_counts must equal the value in the count field.
|
|
||||||
//
|
|
||||||
// The number of elements in bucket_counts array must be by one greater than
|
|
||||||
// the number of elements in explicit_bounds array.
|
|
||||||
repeated fixed64 bucket_counts = 6;
|
|
||||||
|
|
||||||
// explicit_bounds specifies buckets with explicitly defined bounds for values.
|
|
||||||
//
|
|
||||||
// The boundaries for bucket at index i are:
|
|
||||||
//
|
|
||||||
// (-infinity, explicit_bounds[i]] for i == 0
|
|
||||||
// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
|
|
||||||
// (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
|
|
||||||
//
|
|
||||||
// The values in the explicit_bounds array must be strictly increasing.
|
|
||||||
//
|
|
||||||
// Histogram buckets are inclusive of their upper boundary, except the last
|
|
||||||
// bucket where the boundary is at infinity. This format is intentionally
|
|
||||||
// compatible with the OpenMetrics histogram definition.
|
|
||||||
repeated double explicit_bounds = 7;
|
|
||||||
|
|
||||||
// (Optional) List of exemplars collected from
|
|
||||||
// measurements that were used to form the data point
|
|
||||||
repeated Exemplar exemplars = 8;
|
|
||||||
|
|
||||||
// Flags that apply to this specific data point. See DataPointFlags
|
|
||||||
// for the available flags and their meaning.
|
|
||||||
uint32 flags = 10;
|
|
||||||
|
|
||||||
// min is the minimum value over (start_time, end_time].
|
|
||||||
optional double min = 11;
|
|
||||||
|
|
||||||
// max is the maximum value over (start_time, end_time].
|
|
||||||
optional double max = 12;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
|
|
||||||
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
|
|
||||||
// summary statistics for a population of values, it may optionally contain the
|
|
||||||
// distribution of those values across a set of buckets.
|
|
||||||
//
|
|
||||||
message ExponentialHistogramDataPoint {
|
|
||||||
// The set of key/value pairs that uniquely identify the timeseries from
|
|
||||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
repeated KeyValue attributes = 1;
|
|
||||||
|
|
||||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
|
||||||
// the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
fixed64 start_time_unix_nano = 2;
|
|
||||||
|
|
||||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
fixed64 time_unix_nano = 3;
|
|
||||||
|
|
||||||
// count is the number of values in the population. Must be
|
|
||||||
// non-negative. This value must be equal to the sum of the "bucket_counts"
|
|
||||||
// values in the positive and negative Buckets plus the "zero_count" field.
|
|
||||||
fixed64 count = 4;
|
|
||||||
|
|
||||||
// sum of the values in the population. If count is zero then this field
|
|
||||||
// must be zero.
|
|
||||||
//
|
|
||||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
|
||||||
// events, and is assumed to be monotonic over the values of these events.
|
|
||||||
// Negative events *can* be recorded, but sum should not be filled out when
|
|
||||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
|
||||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
|
||||||
optional double sum = 5;
|
|
||||||
|
|
||||||
// scale describes the resolution of the histogram. Boundaries are
|
|
||||||
// located at powers of the base, where:
|
|
||||||
//
|
|
||||||
// base = (2^(2^-scale))
|
|
||||||
//
|
|
||||||
// The histogram bucket identified by `index`, a signed integer,
|
|
||||||
// contains values that are greater than (base^index) and
|
|
||||||
// less than or equal to (base^(index+1)).
|
|
||||||
//
|
|
||||||
// The positive and negative ranges of the histogram are expressed
|
|
||||||
// separately. Negative values are mapped by their absolute value
|
|
||||||
// into the negative range using the same scale as the positive range.
|
|
||||||
//
|
|
||||||
// scale is not restricted by the protocol, as the permissible
|
|
||||||
// values depend on the range of the data.
|
|
||||||
sint32 scale = 6;
|
|
||||||
|
|
||||||
// zero_count is the count of values that are either exactly zero or
|
|
||||||
// within the region considered zero by the instrumentation at the
|
|
||||||
// tolerated degree of precision. This bucket stores values that
|
|
||||||
// cannot be expressed using the standard exponential formula as
|
|
||||||
// well as values that have been rounded to zero.
|
|
||||||
//
|
|
||||||
// Implementations MAY consider the zero bucket to have probability
|
|
||||||
// mass equal to (zero_count / count).
|
|
||||||
fixed64 zero_count = 7;
|
|
||||||
|
|
||||||
// positive carries the positive range of exponential bucket counts.
|
|
||||||
Buckets positive = 8;
|
|
||||||
|
|
||||||
// negative carries the negative range of exponential bucket counts.
|
|
||||||
Buckets negative = 9;
|
|
||||||
|
|
||||||
// Buckets are a set of bucket counts, encoded in a contiguous array
|
|
||||||
// of counts.
|
|
||||||
message Buckets {
|
|
||||||
// Offset is the bucket index of the first entry in the bucket_counts array.
|
|
||||||
//
|
|
||||||
// Note: This uses a varint encoding as a simple form of compression.
|
|
||||||
sint32 offset = 1;
|
|
||||||
|
|
||||||
// Count is an array of counts, where count[i] carries the count
|
|
||||||
// of the bucket at index (offset+i). count[i] is the count of
|
|
||||||
// values greater than base^(offset+i) and less or equal to than
|
|
||||||
// base^(offset+i+1).
|
|
||||||
//
|
|
||||||
// Note: By contrast, the explicit HistogramDataPoint uses
|
|
||||||
// fixed64. This field is expected to have many buckets,
|
|
||||||
// especially zeros, so uint64 has been selected to ensure
|
|
||||||
// varint encoding.
|
|
||||||
repeated uint64 bucket_counts = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flags that apply to this specific data point. See DataPointFlags
|
|
||||||
// for the available flags and their meaning.
|
|
||||||
uint32 flags = 10;
|
|
||||||
|
|
||||||
// (Optional) List of exemplars collected from
|
|
||||||
// measurements that were used to form the data point
|
|
||||||
repeated Exemplar exemplars = 11;
|
|
||||||
|
|
||||||
// min is the minimum value over (start_time, end_time].
|
|
||||||
optional double min = 12;
|
|
||||||
|
|
||||||
// max is the maximum value over (start_time, end_time].
|
|
||||||
optional double max = 13;
|
|
||||||
}
|
|
||||||
|
|
||||||
// SummaryDataPoint is a single data point in a timeseries that describes the
|
|
||||||
// time-varying values of a Summary metric.
|
|
||||||
message SummaryDataPoint {
|
|
||||||
reserved 1;
|
|
||||||
|
|
||||||
// The set of key/value pairs that uniquely identify the timeseries from
|
|
||||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
repeated KeyValue attributes = 7;
|
|
||||||
|
|
||||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
|
||||||
// the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
fixed64 start_time_unix_nano = 2;
|
|
||||||
|
|
||||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
fixed64 time_unix_nano = 3;
|
|
||||||
|
|
||||||
// count is the number of values in the population. Must be non-negative.
|
|
||||||
fixed64 count = 4;
|
|
||||||
|
|
||||||
// sum of the values in the population. If count is zero then this field
|
|
||||||
// must be zero.
|
|
||||||
//
|
|
||||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
|
||||||
// events, and is assumed to be monotonic over the values of these events.
|
|
||||||
// Negative events *can* be recorded, but sum should not be filled out when
|
|
||||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
|
||||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
|
|
||||||
double sum = 5;
|
|
||||||
|
|
||||||
// Represents the value at a given quantile of a distribution.
|
|
||||||
//
|
|
||||||
// To record Min and Max values following conventions are used:
|
|
||||||
// - The 1.0 quantile is equivalent to the maximum value observed.
|
|
||||||
// - The 0.0 quantile is equivalent to the minimum value observed.
|
|
||||||
//
|
|
||||||
// See the following issue for more context:
|
|
||||||
// https://github.com/open-telemetry/opentelemetry-proto/issues/125
|
|
||||||
message ValueAtQuantile {
|
|
||||||
// The quantile of a distribution. Must be in the interval
|
|
||||||
// [0.0, 1.0].
|
|
||||||
double quantile = 1;
|
|
||||||
|
|
||||||
// The value at the given quantile of a distribution.
|
|
||||||
//
|
|
||||||
// Quantile values must NOT be negative.
|
|
||||||
double value = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// (Optional) list of values at different quantiles of the distribution calculated
|
|
||||||
// from the current snapshot. The quantiles must be strictly increasing.
|
|
||||||
repeated ValueAtQuantile quantile_values = 6;
|
|
||||||
|
|
||||||
// Flags that apply to this specific data point. See DataPointFlags
|
|
||||||
// for the available flags and their meaning.
|
|
||||||
uint32 flags = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
// A representation of an exemplar, which is a sample input measurement.
|
|
||||||
// Exemplars also hold information about the environment when the measurement
|
|
||||||
// was recorded, for example the span and trace ID of the active span when the
|
|
||||||
// exemplar was recorded.
|
|
||||||
message Exemplar {
|
|
||||||
reserved 1;
|
|
||||||
|
|
||||||
// The set of key/value pairs that were filtered out by the aggregator, but
|
|
||||||
// recorded alongside the original measurement. Only key/value pairs that were
|
|
||||||
// filtered out by the aggregator should be included
|
|
||||||
repeated KeyValue filtered_attributes = 7;
|
|
||||||
|
|
||||||
// time_unix_nano is the exact time when this exemplar was recorded
|
|
||||||
//
|
|
||||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
|
||||||
// 1970.
|
|
||||||
fixed64 time_unix_nano = 2;
|
|
||||||
|
|
||||||
// The value of the measurement that was recorded. An exemplar is
|
|
||||||
// considered invalid when one of the recognized value fields is not present
|
|
||||||
// inside this oneof.
|
|
||||||
oneof value {
|
|
||||||
double as_double = 3;
|
|
||||||
sfixed64 as_int = 6;
|
|
||||||
}
|
|
||||||
|
|
||||||
// (Optional) Span ID of the exemplar trace.
|
|
||||||
// span_id may be missing if the measurement is not recorded inside a trace
|
|
||||||
// or if the trace is not sampled.
|
|
||||||
bytes span_id = 4;
|
|
||||||
|
|
||||||
// (Optional) Trace ID of the exemplar trace.
|
|
||||||
// trace_id may be missing if the measurement is not recorded inside a trace
|
|
||||||
// or if the trace is not sampled.
|
|
||||||
bytes trace_id = 5;
|
|
||||||
}
|
|
|
@ -1,30 +0,0 @@
|
||||||
// Copyright 2019, OpenTelemetry Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package opentelemetry;
|
|
||||||
|
|
||||||
import "lib/protoparser/opentelemetry/proto/metrics.proto";
|
|
||||||
|
|
||||||
option go_package = "opentelemetry/pb";
|
|
||||||
|
|
||||||
message ExportMetricsServiceRequest {
|
|
||||||
// An array of ResourceMetrics.
|
|
||||||
// For data coming from a single resource this array will typically contain one
|
|
||||||
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
|
|
||||||
// data from multiple origins typically batch the data before forwarding further and
|
|
||||||
// in that case this array will contain multiple elements.
|
|
||||||
repeated ResourceMetrics resource_metrics = 1;
|
|
||||||
}
|
|
|
@ -1,37 +0,0 @@
|
||||||
// Copyright 2019, OpenTelemetry Authors
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package opentelemetry;
|
|
||||||
|
|
||||||
import "lib/protoparser/opentelemetry/proto/common.proto";
|
|
||||||
|
|
||||||
option csharp_namespace = "OpenTelemetry.Proto.Resource.V1";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option java_package = "io.opentelemetry.proto.resource.v1";
|
|
||||||
option java_outer_classname = "ResourceProto";
|
|
||||||
option go_package = "opentelemetry/pb";
|
|
||||||
|
|
||||||
// Resource information.
|
|
||||||
message Resource {
|
|
||||||
// Set of attributes that describe the resource.
|
|
||||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
|
||||||
// attribute with the same key).
|
|
||||||
repeated KeyValue attributes = 1;
|
|
||||||
|
|
||||||
// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
|
|
||||||
// no attributes were dropped.
|
|
||||||
uint32 dropped_attributes_count = 2;
|
|
||||||
}
|
|
|
@ -56,34 +56,34 @@ func (wr *writeContext) appendSamplesFromScopeMetrics(sc *pb.ScopeMetrics) {
|
||||||
// skip metrics without names
|
// skip metrics without names
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch t := m.Data.(type) {
|
switch {
|
||||||
case *pb.Metric_Gauge:
|
case m.Gauge != nil:
|
||||||
for _, p := range t.Gauge.DataPoints {
|
for _, p := range m.Gauge.DataPoints {
|
||||||
wr.appendSampleFromNumericPoint(m.Name, p)
|
wr.appendSampleFromNumericPoint(m.Name, p)
|
||||||
}
|
}
|
||||||
case *pb.Metric_Sum:
|
case m.Sum != nil:
|
||||||
if t.Sum.AggregationTemporality != pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE {
|
if m.Sum.AggregationTemporality != pb.AggregationTemporalityCumulative {
|
||||||
rowsDroppedUnsupportedSum.Inc()
|
rowsDroppedUnsupportedSum.Inc()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, p := range t.Sum.DataPoints {
|
for _, p := range m.Sum.DataPoints {
|
||||||
wr.appendSampleFromNumericPoint(m.Name, p)
|
wr.appendSampleFromNumericPoint(m.Name, p)
|
||||||
}
|
}
|
||||||
case *pb.Metric_Summary:
|
case m.Summary != nil:
|
||||||
for _, p := range t.Summary.DataPoints {
|
for _, p := range m.Summary.DataPoints {
|
||||||
wr.appendSamplesFromSummary(m.Name, p)
|
wr.appendSamplesFromSummary(m.Name, p)
|
||||||
}
|
}
|
||||||
case *pb.Metric_Histogram:
|
case m.Histogram != nil:
|
||||||
if t.Histogram.AggregationTemporality != pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE {
|
if m.Histogram.AggregationTemporality != pb.AggregationTemporalityCumulative {
|
||||||
rowsDroppedUnsupportedHistogram.Inc()
|
rowsDroppedUnsupportedHistogram.Inc()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, p := range t.Histogram.DataPoints {
|
for _, p := range m.Histogram.DataPoints {
|
||||||
wr.appendSamplesFromHistogram(m.Name, p)
|
wr.appendSamplesFromHistogram(m.Name, p)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
rowsDroppedUnsupportedMetricType.Inc()
|
rowsDroppedUnsupportedMetricType.Inc()
|
||||||
logger.Warnf("unsupported type %T for metric %q", t, m.Name)
|
logger.Warnf("unsupported type for metric %q", m.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -91,11 +91,11 @@ func (wr *writeContext) appendSamplesFromScopeMetrics(sc *pb.ScopeMetrics) {
|
||||||
// appendSampleFromNumericPoint appends p to wr.tss
|
// appendSampleFromNumericPoint appends p to wr.tss
|
||||||
func (wr *writeContext) appendSampleFromNumericPoint(metricName string, p *pb.NumberDataPoint) {
|
func (wr *writeContext) appendSampleFromNumericPoint(metricName string, p *pb.NumberDataPoint) {
|
||||||
var v float64
|
var v float64
|
||||||
switch t := p.Value.(type) {
|
switch {
|
||||||
case *pb.NumberDataPoint_AsInt:
|
case p.IntValue != nil:
|
||||||
v = float64(t.AsInt)
|
v = float64(*p.IntValue)
|
||||||
case *pb.NumberDataPoint_AsDouble:
|
case p.DoubleValue != nil:
|
||||||
v = t.AsDouble
|
v = *p.DoubleValue
|
||||||
}
|
}
|
||||||
|
|
||||||
t := int64(p.TimeUnixNano / 1e6)
|
t := int64(p.TimeUnixNano / 1e6)
|
||||||
|
@ -264,7 +264,7 @@ func (wr *writeContext) readAndUnpackRequest(r io.Reader) (*pb.ExportMetricsServ
|
||||||
return nil, fmt.Errorf("cannot read request: %w", err)
|
return nil, fmt.Errorf("cannot read request: %w", err)
|
||||||
}
|
}
|
||||||
var req pb.ExportMetricsServiceRequest
|
var req pb.ExportMetricsServiceRequest
|
||||||
if err := req.UnmarshalVT(wr.bb.B); err != nil {
|
if err := req.UnmarshalProtobuf(wr.bb.B); err != nil {
|
||||||
return nil, fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(wr.bb.B), err)
|
return nil, fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(wr.bb.B), err)
|
||||||
}
|
}
|
||||||
return &req, nil
|
return &req, nil
|
||||||
|
|
|
@ -60,10 +60,7 @@ func TestParseStream(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify protobuf parsing
|
// Verify protobuf parsing
|
||||||
pbData, err := req.MarshalVT()
|
pbData := req.MarshalProtobuf(nil)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("cannot marshal to protobuf: %s", err)
|
|
||||||
}
|
|
||||||
if err := checkParseStream(pbData, checkSeries); err != nil {
|
if err := checkParseStream(pbData, checkSeries); err != nil {
|
||||||
t.Fatalf("cannot parse protobuf: %s", err)
|
t.Fatalf("cannot parse protobuf: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -149,28 +146,25 @@ func attributesFromKV(k, v string) []*pb.KeyValue {
|
||||||
{
|
{
|
||||||
Key: k,
|
Key: k,
|
||||||
Value: &pb.AnyValue{
|
Value: &pb.AnyValue{
|
||||||
Value: &pb.AnyValue_StringValue{
|
StringValue: &v,
|
||||||
StringValue: v,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateGauge(name string) *pb.Metric {
|
func generateGauge(name string) *pb.Metric {
|
||||||
|
n := int64(15)
|
||||||
points := []*pb.NumberDataPoint{
|
points := []*pb.NumberDataPoint{
|
||||||
{
|
{
|
||||||
Attributes: attributesFromKV("label1", "value1"),
|
Attributes: attributesFromKV("label1", "value1"),
|
||||||
Value: &pb.NumberDataPoint_AsInt{AsInt: 15},
|
IntValue: &n,
|
||||||
TimeUnixNano: uint64(15 * time.Second),
|
TimeUnixNano: uint64(15 * time.Second),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return &pb.Metric{
|
return &pb.Metric{
|
||||||
Name: name,
|
Name: name,
|
||||||
Data: &pb.Metric_Gauge{
|
Gauge: &pb.Gauge{
|
||||||
Gauge: &pb.Gauge{
|
DataPoints: points,
|
||||||
DataPoints: points,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -189,30 +183,27 @@ func generateHistogram(name string) *pb.Metric {
|
||||||
}
|
}
|
||||||
return &pb.Metric{
|
return &pb.Metric{
|
||||||
Name: name,
|
Name: name,
|
||||||
Data: &pb.Metric_Histogram{
|
Histogram: &pb.Histogram{
|
||||||
Histogram: &pb.Histogram{
|
AggregationTemporality: pb.AggregationTemporalityCumulative,
|
||||||
AggregationTemporality: pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
DataPoints: points,
|
||||||
DataPoints: points,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateSum(name string) *pb.Metric {
|
func generateSum(name string) *pb.Metric {
|
||||||
|
d := float64(15.5)
|
||||||
points := []*pb.NumberDataPoint{
|
points := []*pb.NumberDataPoint{
|
||||||
{
|
{
|
||||||
Attributes: attributesFromKV("label5", "value5"),
|
Attributes: attributesFromKV("label5", "value5"),
|
||||||
Value: &pb.NumberDataPoint_AsDouble{AsDouble: 15.5},
|
DoubleValue: &d,
|
||||||
TimeUnixNano: uint64(150 * time.Second),
|
TimeUnixNano: uint64(150 * time.Second),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
return &pb.Metric{
|
return &pb.Metric{
|
||||||
Name: name,
|
Name: name,
|
||||||
Data: &pb.Metric_Sum{
|
Sum: &pb.Sum{
|
||||||
Sum: &pb.Sum{
|
AggregationTemporality: pb.AggregationTemporalityCumulative,
|
||||||
AggregationTemporality: pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
DataPoints: points,
|
||||||
DataPoints: points,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -224,7 +215,7 @@ func generateSummary(name string) *pb.Metric {
|
||||||
TimeUnixNano: uint64(35 * time.Second),
|
TimeUnixNano: uint64(35 * time.Second),
|
||||||
Sum: 32.5,
|
Sum: 32.5,
|
||||||
Count: 5,
|
Count: 5,
|
||||||
QuantileValues: []*pb.SummaryDataPoint_ValueAtQuantile{
|
QuantileValues: []*pb.ValueAtQuantile{
|
||||||
{
|
{
|
||||||
Quantile: 0.1,
|
Quantile: 0.1,
|
||||||
Value: 7.5,
|
Value: 7.5,
|
||||||
|
@ -242,10 +233,8 @@ func generateSummary(name string) *pb.Metric {
|
||||||
}
|
}
|
||||||
return &pb.Metric{
|
return &pb.Metric{
|
||||||
Name: name,
|
Name: name,
|
||||||
Data: &pb.Metric_Summary{
|
Summary: &pb.Summary{
|
||||||
Summary: &pb.Summary{
|
DataPoints: points,
|
||||||
DataPoints: points,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,10 +21,7 @@ func BenchmarkParseStream(b *testing.B) {
|
||||||
pbRequest := pb.ExportMetricsServiceRequest{
|
pbRequest := pb.ExportMetricsServiceRequest{
|
||||||
ResourceMetrics: []*pb.ResourceMetrics{generateOTLPSamples(samples)},
|
ResourceMetrics: []*pb.ResourceMetrics{generateOTLPSamples(samples)},
|
||||||
}
|
}
|
||||||
data, err := pbRequest.MarshalVT()
|
data := pbRequest.MarshalProtobuf(nil)
|
||||||
if err != nil {
|
|
||||||
b.Fatalf("cannot marshal data: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for p.Next() {
|
for p.Next() {
|
||||||
err := ParseStream(bytes.NewBuffer(data), false, func(tss []prompbmarshal.TimeSeries) error {
|
err := ParseStream(bytes.NewBuffer(data), false, func(tss []prompbmarshal.TimeSeries) error {
|
||||||
|
|
Loading…
Reference in a new issue