// Copyright 2019, OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// 	protoc-gen-go v1.28.1
// 	protoc        v3.21.12
// source: lib/protoparser/opentelemetry/proto/metrics.proto

package pb

// AggregationTemporality defines how a metric aggregator reports aggregated
// values. It describes how those values relate to the time interval over
// which they are aggregated.
type AggregationTemporality int32

const (
	// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
	AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
	// DELTA is an AggregationTemporality for a metric aggregator which reports
	// changes since last report time. Successive metrics contain aggregation of
	// values from continuous and non-overlapping intervals.
	//
	// The values for a DELTA metric are based only on the time interval
	// associated with one measurement cycle. There is no dependency on
	// previous measurements like is the case for CUMULATIVE metrics.
	//
	// For example, consider a system measuring the number of requests that
	// it receives and reports the sum of these requests every second as a
	// DELTA metric:
	//
	//  1. The system starts receiving at time=t_0.
	//  2. A request is received, the system measures 1 request.
	//  3. A request is received, the system measures 1 request.
	//  4. A request is received, the system measures 1 request.
	//  5. The 1 second collection cycle ends. A metric is exported for the
	//     number of requests received over the interval of time t_0 to
	//     t_0+1 with a value of 3.
	//  6. A request is received, the system measures 1 request.
	//  7. A request is received, the system measures 1 request.
	//  8. The 1 second collection cycle ends. A metric is exported for the
	//     number of requests received over the interval of time t_0+1 to
	//     t_0+2 with a value of 2.
	AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
	// CUMULATIVE is an AggregationTemporality for a metric aggregator which
	// reports changes since a fixed start time. This means that current values
	// of a CUMULATIVE metric depend on all previous measurements since the
	// start time. Because of this, the sender is required to retain this state
	// in some form. If this state is lost or invalidated, the CUMULATIVE metric
	// values MUST be reset and a new fixed start time following the last
	// reported measurement time sent MUST be used.
	//
	// For example, consider a system measuring the number of requests that
	// it receives and reports the sum of these requests every second as a
	// CUMULATIVE metric:
	//
	//  1. The system starts receiving at time=t_0.
	//  2. A request is received, the system measures 1 request.
	//  3. A request is received, the system measures 1 request.
	//  4. A request is received, the system measures 1 request.
	//  5. The 1 second collection cycle ends. A metric is exported for the
	//     number of requests received over the interval of time t_0 to
	//     t_0+1 with a value of 3.
	//  6. A request is received, the system measures 1 request.
	//  7. A request is received, the system measures 1 request.
	//  8. The 1 second collection cycle ends. A metric is exported for the
	//     number of requests received over the interval of time t_0 to
	//     t_0+2 with a value of 5.
	//  9. The system experiences a fault and loses state.
	//  10. The system recovers and resumes receiving at time=t_1.
	//  11. A request is received, the system measures 1 request.
	//  12. The 1 second collection cycle ends. A metric is exported for the
	//     number of requests received over the interval of time t_1 to
	//     t_0+1 with a value of 1.
	//
	// Note: Even though, when reporting changes since last report time, using
	// CUMULATIVE is valid, it is not recommended. This may cause problems for
	// systems that do not use start_time to determine when the aggregation
	// value was reset (e.g. Prometheus).
	AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
)

// Enum value maps for AggregationTemporality.
var (
	AggregationTemporality_name = map[int32]string{
		0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
		1: "AGGREGATION_TEMPORALITY_DELTA",
		2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
	}
	AggregationTemporality_value = map[string]int32{
		"AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
		"AGGREGATION_TEMPORALITY_DELTA":       1,
		"AGGREGATION_TEMPORALITY_CUMULATIVE":  2,
	}
)

func (x AggregationTemporality) Enum() *AggregationTemporality {
	p := new(AggregationTemporality)
	*p = x
	return p
}

// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
// bit-field representing 32 distinct boolean flags.  Each flag defined in this
// enum is a bit-mask.  To test the presence of a single flag in the flags of
// a data point, for example, use an expression like:
//
//	(point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE
type DataPointFlags int32

const (
	DataPointFlags_FLAG_NONE DataPointFlags = 0
	// This DataPoint is valid but has no recorded value.  This value
	// SHOULD be used to reflect explicitly missing data in a series, as
	// for an equivalent to the Prometheus "staleness marker".
	DataPointFlags_FLAG_NO_RECORDED_VALUE DataPointFlags = 1
)

// Enum value maps for DataPointFlags.
var (
	DataPointFlags_name = map[int32]string{
		0: "FLAG_NONE",
		1: "FLAG_NO_RECORDED_VALUE",
	}
	DataPointFlags_value = map[string]int32{
		"FLAG_NONE":              0,
		"FLAG_NO_RECORDED_VALUE": 1,
	}
)

func (x DataPointFlags) Enum() *DataPointFlags {
	p := new(DataPointFlags)
	*p = x
	return p
}

// MetricsData represents the metrics data that can be stored in a persistent
// storage, OR can be embedded by other protocols that transfer OTLP metrics
// data but do not implement the OTLP protocol.
//
// The main difference between this message and collector protocol is that
// in this message there will not be any "control" or "metadata" specific to
// OTLP protocol.
//
// When new fields are added into this message, the OTLP request MUST be updated
// as well.
type MetricsData struct {
	unknownFields []byte

	// An array of ResourceMetrics.
	// For data coming from a single resource this array will typically contain
	// one element. Intermediary nodes that receive data from multiple origins
	// typically batch the data before forwarding further and in that case this
	// array will contain multiple elements.
	ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
}

// A collection of ScopeMetrics from a Resource.
type ResourceMetrics struct {
	unknownFields []byte

	// The resource for the metrics in this message.
	// If this field is not set then no resource info is known.
	Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
	// A list of metrics that originate from a resource.
	ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
	// This schema_url applies to the data in the "resource" field. It does not apply
	// to the data in the "scope_metrics" field which have their own schema_url field.
	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}

// A collection of Metrics produced by an Scope.
type ScopeMetrics struct {
	unknownFields []byte

	// A list of metrics that originate from an instrumentation library.
	Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
	// This schema_url applies to all metrics in the "metrics" field.
	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
}

// Defines a Metric which has one or more timeseries.  The following is a
// brief summary of the Metric data model.  For more details, see:
//
//	https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
//
// The data model and relation between entities is shown in the
// diagram below. Here, "DataPoint" is the term used to refer to any
// one of the specific data point value types, and "points" is the term used
// to refer to any one of the lists of points contained in the Metric.
//
//   - Metric is composed of a metadata and data.
//
//   - Metadata part contains a name, description, unit.
//
//   - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
//
//   - DataPoint contains timestamps, attributes, and one of the possible value type
//     fields.
//
//     Metric
//     +------------+
//     |name        |
//     |description |
//     |unit        |     +------------------------------------+
//     |data        |---> |Gauge, Sum, Histogram, Summary, ... |
//     +------------+     +------------------------------------+
//
//     Data [One of Gauge, Sum, Histogram, Summary, ...]
//     +-----------+
//     |...        |  // Metadata about the Data.
//     |points     |--+
//     +-----------+  |
//     |      +---------------------------+
//     |      |DataPoint 1                |
//     v      |+------+------+   +------+ |
//     +-----+   ||label |label |...|label | |
//     |  1  |-->||value1|value2|...|valueN| |
//     +-----+   |+------+------+   +------+ |
//     |  .  |   |+-----+                    |
//     |  .  |   ||value|                    |
//     |  .  |   |+-----+                    |
//     |  .  |   +---------------------------+
//     |  .  |                   .
//     |  .  |                   .
//     |  .  |                   .
//     |  .  |   +---------------------------+
//     |  .  |   |DataPoint M                |
//     +-----+   |+------+------+   +------+ |
//     |  M  |-->||label |label |...|label | |
//     +-----+   ||value1|value2|...|valueN| |
//     |+------+------+   +------+ |
//     |+-----+                    |
//     ||value|                    |
//     |+-----+                    |
//     +---------------------------+
//
// Each distinct type of DataPoint represents the output of a specific
// aggregation function, the result of applying the DataPoint's
// associated function of to one or more measurements.
//
// All DataPoint types have three common fields:
//   - Attributes includes key-value pairs associated with the data point
//   - TimeUnixNano is required, set to the end time of the aggregation
//   - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
//     having an AggregationTemporality field, as discussed below.
//
// Both TimeUnixNano and StartTimeUnixNano values are expressed as
// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
//
// # TimeUnixNano
//
// This field is required, having consistent interpretation across
// DataPoint types.  TimeUnixNano is the moment corresponding to when
// the data point's aggregate value was captured.
//
// Data points with the 0 value for TimeUnixNano SHOULD be rejected
// by consumers.
//
// # StartTimeUnixNano
//
// StartTimeUnixNano in general allows detecting when a sequence of
// observations is unbroken.  This field indicates to consumers the
// start time for points with cumulative and delta
// AggregationTemporality, and it should be included whenever possible
// to support correct rate calculation.  Although it may be omitted
// when the start time is truly unknown, setting StartTimeUnixNano is
// strongly encouraged.
type Metric struct {
	unknownFields []byte

	// name of the metric, including its DNS name prefix. It must be unique.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// description of the metric, which can be used in documentation.
	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
	// unit in which the metric value is reported. Follows the format
	// described by http://unitsofmeasure.org/ucum.html.
	Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
	// Data determines the aggregation type (if any) of the metric, what is the
	// reported value type for the data points, as well as the relatationship to
	// the time interval over which they are reported.
	//
	// Types that are assignable to Data:
	//
	//	*Metric_Gauge
	//	*Metric_Sum
	//	*Metric_Histogram
	//	*Metric_ExponentialHistogram
	//	*Metric_Summary
	Data isMetric_Data `protobuf_oneof:"data"`
}

type isMetric_Data interface {
	isMetric_Data()
}

type Metric_Gauge struct {
	Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof"`
}

type Metric_Sum struct {
	Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof"`
}

type Metric_Histogram struct {
	Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof"`
}

type Metric_ExponentialHistogram struct {
	ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof"`
}

type Metric_Summary struct {
	Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof"`
}

func (*Metric_Gauge) isMetric_Data() {}

func (*Metric_Sum) isMetric_Data() {}

func (*Metric_Histogram) isMetric_Data() {}

func (*Metric_ExponentialHistogram) isMetric_Data() {}

func (*Metric_Summary) isMetric_Data() {}

// Gauge represents the type of a scalar metric that always exports the
// "current value" for every data point. It should be used for an "unknown"
// aggregation.
//
// A Gauge does not support different aggregation temporalities. Given the
// aggregation is unknown, points cannot be combined using the same
// aggregation, regardless of aggregation temporalities. Therefore,
// AggregationTemporality is not included. Consequently, this also means
// "StartTimeUnixNano" is ignored for all data points.
type Gauge struct {
	unknownFields []byte

	DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
}

// Sum represents the type of a scalar metric that is calculated as a sum of all
// reported measurements over a time interval.
type Sum struct {
	unknownFields []byte

	DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
	// aggregation_temporality describes if the aggregator reports delta changes
	// since last report time, or cumulative changes since a fixed start time.
	AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
	// If "true" means that the sum is monotonic.
	IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
}

// Histogram represents the type of a metric that is calculated by aggregating
// as a Histogram of all reported measurements over a time interval.
type Histogram struct {
	unknownFields []byte

	DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
	// aggregation_temporality describes if the aggregator reports delta changes
	// since last report time, or cumulative changes since a fixed start time.
	AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
}

// ExponentialHistogram represents the type of a metric that is calculated by aggregating
// as a ExponentialHistogram of all reported double measurements over a time interval.
type ExponentialHistogram struct {
	unknownFields []byte

	DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
	// aggregation_temporality describes if the aggregator reports delta changes
	// since last report time, or cumulative changes since a fixed start time.
	AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
}

// Summary metric data are used to convey quantile summaries,
// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
// data type. These data points cannot always be merged in a meaningful way.
// While they can be useful in some applications, histogram data points are
// recommended for new applications.
type Summary struct {
	unknownFields []byte

	DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
}

// NumberDataPoint is a single data point in a timeseries that describes the
// time-varying scalar value of a metric.
type NumberDataPoint struct {
	unknownFields []byte

	// The set of key/value pairs that uniquely identify the timeseries from
	// where this point belongs. The list may be empty (may contain 0 elements).
	// Attribute keys MUST be unique (it is not allowed to have more than one
	// attribute with the same key).
	Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
	// StartTimeUnixNano is optional but strongly encouraged, see the
	// the detailed comments above Metric.
	//
	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
	// 1970.
	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
	// TimeUnixNano is required, see the detailed comments above Metric.
	//
	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
	// 1970.
	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
	// The value itself.  A point is considered invalid when one of the recognized
	// value fields is not present inside this oneof.
	//
	// Types that are assignable to Value:
	//
	//	*NumberDataPoint_AsDouble
	//	*NumberDataPoint_AsInt
	Value isNumberDataPoint_Value `protobuf_oneof:"value"`
	// (Optional) List of exemplars collected from
	// measurements that were used to form the data point
	Exemplars []*Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
	// Flags that apply to this specific data point.  See DataPointFlags
	// for the available flags and their meaning.
	Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
}

type isNumberDataPoint_Value interface {
	isNumberDataPoint_Value()
}

type NumberDataPoint_AsDouble struct {
	AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof"`
}

type NumberDataPoint_AsInt struct {
	AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
}

func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {}

func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {}

// HistogramDataPoint is a single data point in a timeseries that describes the
// time-varying values of a Histogram. A Histogram contains summary statistics
// for a population of values, it may optionally contain the distribution of
// those values across a set of buckets.
//
// If the histogram contains the distribution of values, then both
// "explicit_bounds" and "bucket counts" fields must be defined.
// If the histogram does not contain the distribution of values, then both
// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
// "sum" are known.
type HistogramDataPoint struct {
	unknownFields []byte

	// The set of key/value pairs that uniquely identify the timeseries from
	// where this point belongs. The list may be empty (may contain 0 elements).
	// Attribute keys MUST be unique (it is not allowed to have more than one
	// attribute with the same key).
	Attributes []*KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
	// StartTimeUnixNano is optional but strongly encouraged, see the
	// the detailed comments above Metric.
	//
	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
	// 1970.
	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
	// TimeUnixNano is required, see the detailed comments above Metric.
	//
	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
	// 1970.
	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
	// count is the number of values in the population. Must be non-negative. This
	// value must be equal to the sum of the "count" fields in buckets if a
	// histogram is provided.
	Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
	// sum of the values in the population. If count is zero then this field
	// must be zero.
	//
	// Note: Sum should only be filled out when measuring non-negative discrete
	// events, and is assumed to be monotonic over the values of these events.
	// Negative events *can* be recorded, but sum should not be filled out when
	// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
	// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
	Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
	// bucket_counts is an optional field contains the count values of histogram
	// for each bucket.
	//
	// The sum of the bucket_counts must equal the value in the count field.
	//
	// The number of elements in bucket_counts array must be by one greater than
	// the number of elements in explicit_bounds array.
	BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
	// explicit_bounds specifies buckets with explicitly defined bounds for values.
	//
	// The boundaries for bucket at index i are:
	//
	// (-infinity, explicit_bounds[i]] for i == 0
	// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
	// (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
	//
	// The values in the explicit_bounds array must be strictly increasing.
	//
	// Histogram buckets are inclusive of their upper boundary, except the last
	// bucket where the boundary is at infinity. This format is intentionally
	// compatible with the OpenMetrics histogram definition.
	ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
	// (Optional) List of exemplars collected from
	// measurements that were used to form the data point
	Exemplars []*Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
	// Flags that apply to this specific data point.  See DataPointFlags
	// for the available flags and their meaning.
	Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
	// min is the minimum value over (start_time, end_time].
	Min *float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"`
	// max is the maximum value over (start_time, end_time].
	Max *float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"`
}

// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
// summary statistics for a population of values, it may optionally contain the
// distribution of those values across a set of buckets.
type ExponentialHistogramDataPoint struct {
	unknownFields []byte

	// The set of key/value pairs that uniquely identify the timeseries from
	// where this point belongs. The list may be empty (may contain 0 elements).
	// Attribute keys MUST be unique (it is not allowed to have more than one
	// attribute with the same key).
	Attributes []*KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
	// StartTimeUnixNano is optional but strongly encouraged, see the
	// the detailed comments above Metric.
	//
	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
	// 1970.
	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
	// TimeUnixNano is required, see the detailed comments above Metric.
	//
	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
	// 1970.
	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
	// count is the number of values in the population. Must be
	// non-negative. This value must be equal to the sum of the "bucket_counts"
	// values in the positive and negative Buckets plus the "zero_count" field.
	Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
	// sum of the values in the population. If count is zero then this field
	// must be zero.
	//
	// Note: Sum should only be filled out when measuring non-negative discrete
	// events, and is assumed to be monotonic over the values of these events.
	// Negative events *can* be recorded, but sum should not be filled out when
	// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
	// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
	Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
	// scale describes the resolution of the histogram.  Boundaries are
	// located at powers of the base, where:
	//
	//	base = (2^(2^-scale))
	//
	// The histogram bucket identified by `index`, a signed integer,
	// contains values that are greater than (base^index) and
	// less than or equal to (base^(index+1)).
	//
	// The positive and negative ranges of the histogram are expressed
	// separately.  Negative values are mapped by their absolute value
	// into the negative range using the same scale as the positive range.
	//
	// scale is not restricted by the protocol, as the permissible
	// values depend on the range of the data.
	Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
	// zero_count is the count of values that are either exactly zero or
	// within the region considered zero by the instrumentation at the
	// tolerated degree of precision.  This bucket stores values that
	// cannot be expressed using the standard exponential formula as
	// well as values that have been rounded to zero.
	//
	// Implementations MAY consider the zero bucket to have probability
	// mass equal to (zero_count / count).
	ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
	// positive carries the positive range of exponential bucket counts.
	Positive *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive,omitempty"`
	// negative carries the negative range of exponential bucket counts.
	Negative *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative,omitempty"`
	// Flags that apply to this specific data point.  See DataPointFlags
	// for the available flags and their meaning.
	Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
	// (Optional) List of exemplars collected from
	// measurements that were used to form the data point
	Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
	// min is the minimum value over (start_time, end_time].
	Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
	// max is the maximum value over (start_time, end_time].
	Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
}

// SummaryDataPoint is a single data point in a timeseries that describes the
// time-varying values of a Summary metric.
type SummaryDataPoint struct {
	unknownFields []byte

	// The set of key/value pairs that uniquely identify the timeseries from
	// where this point belongs. The list may be empty (may contain 0 elements).
	// Attribute keys MUST be unique (it is not allowed to have more than one
	// attribute with the same key).
	Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
	// StartTimeUnixNano is optional but strongly encouraged, see the
	// the detailed comments above Metric.
	//
	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
	// 1970.
	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
	// TimeUnixNano is required, see the detailed comments above Metric.
	//
	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
	// 1970.
	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
	// count is the number of values in the population. Must be non-negative.
	Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
	// sum of the values in the population. If count is zero then this field
	// must be zero.
	//
	// Note: Sum should only be filled out when measuring non-negative discrete
	// events, and is assumed to be monotonic over the values of these events.
	// Negative events *can* be recorded, but sum should not be filled out when
	// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
	// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
	Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
	// (Optional) list of values at different quantiles of the distribution calculated
	// from the current snapshot. The quantiles must be strictly increasing.
	QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"`
	// Flags that apply to this specific data point.  See DataPointFlags
	// for the available flags and their meaning.
	Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
}

// A representation of an exemplar, which is a sample input measurement.
// Exemplars also hold information about the environment when the measurement
// was recorded, for example the span and trace ID of the active span when the
// exemplar was recorded.
type Exemplar struct {
	unknownFields []byte

	// The set of key/value pairs that were filtered out by the aggregator, but
	// recorded alongside the original measurement. Only key/value pairs that were
	// filtered out by the aggregator should be included
	FilteredAttributes []*KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes,omitempty"`
	// time_unix_nano is the exact time when this exemplar was recorded
	//
	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
	// 1970.
	TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
	// The value of the measurement that was recorded. An exemplar is
	// considered invalid when one of the recognized value fields is not present
	// inside this oneof.
	//
	// Types that are assignable to Value:
	//
	//	*Exemplar_AsDouble
	//	*Exemplar_AsInt
	Value isExemplar_Value `protobuf_oneof:"value"`
	// (Optional) Span ID of the exemplar trace.
	// span_id may be missing if the measurement is not recorded inside a trace
	// or if the trace is not sampled.
	SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
	// (Optional) Trace ID of the exemplar trace.
	// trace_id may be missing if the measurement is not recorded inside a trace
	// or if the trace is not sampled.
	TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
}

type isExemplar_Value interface {
	isExemplar_Value()
}

type Exemplar_AsDouble struct {
	AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof"`
}

type Exemplar_AsInt struct {
	AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
}

func (*Exemplar_AsDouble) isExemplar_Value() {}

func (*Exemplar_AsInt) isExemplar_Value() {}

// Buckets are a set of bucket counts, encoded in a contiguous array
// of counts.
type ExponentialHistogramDataPoint_Buckets struct {
	unknownFields []byte

	// Offset is the bucket index of the first entry in the bucket_counts array.
	//
	// Note: This uses a varint encoding as a simple form of compression.
	Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
	// Count is an array of counts, where count[i] carries the count
	// of the bucket at index (offset+i).  count[i] is the count of
	// values greater than base^(offset+i) and less or equal to than
	// base^(offset+i+1).
	//
	// Note: By contrast, the explicit HistogramDataPoint uses
	// fixed64.  This field is expected to have many buckets,
	// especially zeros, so uint64 has been selected to ensure
	// varint encoding.
	BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
}

// Represents the value at a given quantile of a distribution.
//
// To record Min and Max values following conventions are used:
// - The 1.0 quantile is equivalent to the maximum value observed.
// - The 0.0 quantile is equivalent to the minimum value observed.
//
// See the following issue for more context:
// https://github.com/open-telemetry/opentelemetry-proto/issues/125
type SummaryDataPoint_ValueAtQuantile struct {
	unknownFields []byte

	// The quantile of a distribution. Must be in the interval
	// [0.0, 1.0].
	Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
	// The value at the given quantile of a distribution.
	//
	// Quantile values must NOT be negative.
	Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
}