mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-03-11 15:34:56 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
8033f1705c
64 changed files with 9382 additions and 663 deletions
8
.github/dependabot.yml
vendored
8
.github/dependabot.yml
vendored
|
@ -6,12 +6,18 @@ updates:
|
|||
interval: "daily"
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 0
|
||||
- package-ecosystem: "bundler"
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 0
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/app/vmui/packages/vmui/web"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 0
|
||||
- package-ecosystem: "docker"
|
||||
directory: "/"
|
||||
|
@ -19,4 +25,6 @@ updates:
|
|||
interval: "daily"
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/app/vmui/packages/vmui"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
open-pull-requests-limit: 0
|
||||
|
|
|
@ -86,6 +86,7 @@ VictoriaMetrics has the following prominent features:
|
|||
* [Arbitrary CSV data](#how-to-import-csv-data).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent).
|
||||
* [OpenTelemetry metrics format](#sending-data-via-opentelemetry).
|
||||
* It supports powerful [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), which can be used as a [statsd](https://github.com/statsd/statsd) alternative.
|
||||
* It supports metrics [relabeling](#relabeling).
|
||||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and
|
||||
|
@ -1173,6 +1174,7 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data
|
|||
* DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details.
|
||||
* InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
* Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
|
||||
* OpenTelemetry http API. See [these docs](#sending-data-via-opentelemetry) for details.
|
||||
* OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
|
||||
* OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
|
||||
* `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
|
||||
|
@ -1356,6 +1358,13 @@ Note that it could be required to flush response cache after importing historica
|
|||
|
||||
VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
|
||||
## Sending data via OpenTelemetry
|
||||
|
||||
VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentemetry/api/v1/push` path.
|
||||
|
||||
VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`.
|
||||
Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`.
|
||||
|
||||
## Relabeling
|
||||
|
||||
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
||||
|
|
|
@ -93,6 +93,7 @@ additionally to pull-based Prometheus-compatible targets' scraping:
|
|||
* DataDog "submit metrics" API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent).
|
||||
* InfluxDB line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
|
||||
* Graphite plaintext protocol if `-graphiteListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd).
|
||||
* OpenTelemetry http API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry).
|
||||
* OpenTSDB telnet and http protocols if `-opentsdbListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents).
|
||||
* Prometheus remote write protocol via `http://<vmagent>:8429/api/v1/write`.
|
||||
* JSON lines import protocol via `http://<vmagent>:8429/api/v1/import`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format).
|
||||
|
|
|
@ -16,6 +16,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentelemetry"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentsdbhttp"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/prometheusimport"
|
||||
|
@ -308,6 +309,15 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
influxQueryRequests.Inc()
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "/opentelemetry/api/v1/push":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(nil, r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/datadog/api/v1/series":
|
||||
datadogWriteRequests.Inc()
|
||||
if err := datadog.InsertHandlerForHTTP(nil, r); err != nil {
|
||||
|
@ -499,6 +509,15 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
|
|||
influxQueryRequests.Inc()
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "opentelemetry/api/v1/push":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(at, r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "datadog/api/v1/series":
|
||||
datadogWriteRequests.Inc()
|
||||
if err := datadog.InsertHandlerForHTTP(at, r); err != nil {
|
||||
|
@ -568,6 +587,9 @@ var (
|
|||
datadogIntakeRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/intake", protocol="datadog"}`)
|
||||
datadogMetadataRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`)
|
||||
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
|
||||
promscrapeTargetsRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/targets"}`)
|
||||
promscrapeServiceDiscoveryRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/service-discovery"}`)
|
||||
|
||||
|
|
65
app/vmagent/opentelemetry/request_handler.go
Normal file
65
app/vmagent/opentelemetry/request_handler.go
Normal file
|
@ -0,0 +1,65 @@
|
|||
package opentelemetry
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="opentelemetry"}`)
|
||||
rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="opentelemetry"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="opentelemetry"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes opentelemetry metrics.
|
||||
func InsertHandler(at *auth.Token, req *http.Request) error {
|
||||
extraLabels, err := parserCommon.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return insertRows(at, tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(at *auth.Token, tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
rowsTotal := 0
|
||||
tssDst := ctx.WriteRequest.Timeseries[:0]
|
||||
labels := ctx.Labels[:0]
|
||||
samples := ctx.Samples[:0]
|
||||
for i := range tss {
|
||||
ts := &tss[i]
|
||||
rowsTotal += len(ts.Samples)
|
||||
labelsLen := len(labels)
|
||||
labels = append(labels, ts.Labels...)
|
||||
labels = append(labels, extraLabels...)
|
||||
samplesLen := len(samples)
|
||||
samples = append(samples, ts.Samples...)
|
||||
tssDst = append(tssDst, prompbmarshal.TimeSeries{
|
||||
Labels: labels[labelsLen:],
|
||||
Samples: samples[samplesLen:],
|
||||
})
|
||||
}
|
||||
ctx.WriteRequest.Timeseries = tssDst
|
||||
ctx.Labels = labels
|
||||
ctx.Samples = samples
|
||||
remotewrite.Push(at, &ctx.WriteRequest)
|
||||
rowsInserted.Add(rowsTotal)
|
||||
if at != nil {
|
||||
rowsTenantInserted.Get(at).Add(rowsTotal)
|
||||
}
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return nil
|
||||
}
|
|
@ -78,8 +78,9 @@ test-vmalert:
|
|||
|
||||
run-vmalert: vmalert
|
||||
./bin/vmalert -rule=app/vmalert/config/testdata/rules/rules2-good.rules \
|
||||
-datasource.url=http://demo.robustperception.io:9090 \
|
||||
-notifier.blackhole \
|
||||
-datasource.url=http://localhost:8428 \
|
||||
-notifier.url=http://localhost:9093 \
|
||||
-notifier.url=http://127.0.0.1:9093 \
|
||||
-remoteWrite.url=http://localhost:8428 \
|
||||
-remoteRead.url=http://localhost:8428 \
|
||||
-external.label=cluster=east-1 \
|
||||
|
|
|
@ -203,6 +203,10 @@ expr: <string>
|
|||
# as firing once they return.
|
||||
[ for: <duration> | default = 0s ]
|
||||
|
||||
# Alert will continue firing for this long even when the alerting expression no longer has results.
|
||||
# This allows you to delay alert resolution.
|
||||
[ keep_firing_for: <duration> | default = 0s ]
|
||||
|
||||
# Whether to print debug information into logs.
|
||||
# Information includes alerts state changes and requests sent to the datasource.
|
||||
# Please note, that if rule's query params contain sensitive
|
||||
|
@ -357,19 +361,24 @@ For recording rules to work `-remoteWrite.url` must be specified.
|
|||
|
||||
### Alerts state on restarts
|
||||
|
||||
`vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after restart of `vmalert`
|
||||
the process alerts state will be lost. To avoid this situation, `vmalert` should be configured via the following flags:
|
||||
`vmalert` is stateless, it holds alerts state in the process memory. Restarting of `vmalert` process
|
||||
will reset alerts state in memory. To prevent `vmalert` from losing alerts state it should be configured
|
||||
to persist the state to the remote destination via the following flags:
|
||||
|
||||
* `-remoteWrite.url` - URL to VictoriaMetrics (Single) or vminsert (Cluster). `vmalert` will persist alerts state
|
||||
into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
|
||||
These are regular time series and maybe queried from VM just as any other time series.
|
||||
The state is stored to the configured address on every rule evaluation.
|
||||
to the configured address in the form of [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
|
||||
`ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
|
||||
These time series can be queried from VictoriaMetrics just as any other time series.
|
||||
The state will be persisted to the configured address on each evaluation.
|
||||
* `-remoteRead.url` - URL to VictoriaMetrics (Single) or vmselect (Cluster). `vmalert` will try to restore alerts state
|
||||
from configured address by querying time series with name `ALERTS_FOR_STATE`.
|
||||
from the configured address by querying time series with name `ALERTS_FOR_STATE`. The restore happens only once when
|
||||
`vmalert` process starts, and only for the configured rules. Config [hot reload](#hot-config-reload) doesn't trigger
|
||||
state restore.
|
||||
|
||||
Both flags are required for proper state restoration. Restore process may fail if time series are missing
|
||||
in configured `-remoteRead.url`, weren't updated in the last `1h` (controlled by `-remoteRead.lookback`)
|
||||
or received state doesn't match current `vmalert` rules configuration.
|
||||
or received state doesn't match current `vmalert` rules configuration. `vmalert` marks successfully restored rules
|
||||
with `restored` label in [web UI](#WEB).
|
||||
|
||||
### Multitenancy
|
||||
|
||||
|
@ -731,6 +740,7 @@ See full description for these flags in `./vmalert -help`.
|
|||
* Graphite engine isn't supported yet;
|
||||
* `query` template function is disabled for performance reasons (might be changed in future);
|
||||
* `limit` group's param has no effect during replay (might be changed in future);
|
||||
* `keep_firing_for` alerting rule param has no effect during replay (might be changed in future).
|
||||
|
||||
## Unit Testing for Rules
|
||||
|
||||
|
|
|
@ -21,17 +21,18 @@ import (
|
|||
|
||||
// AlertingRule is basic alert entity
|
||||
type AlertingRule struct {
|
||||
Type config.Type
|
||||
RuleID uint64
|
||||
Name string
|
||||
Expr string
|
||||
For time.Duration
|
||||
Labels map[string]string
|
||||
Annotations map[string]string
|
||||
GroupID uint64
|
||||
GroupName string
|
||||
EvalInterval time.Duration
|
||||
Debug bool
|
||||
Type config.Type
|
||||
RuleID uint64
|
||||
Name string
|
||||
Expr string
|
||||
For time.Duration
|
||||
KeepFiringFor time.Duration
|
||||
Labels map[string]string
|
||||
Annotations map[string]string
|
||||
GroupID uint64
|
||||
GroupName string
|
||||
EvalInterval time.Duration
|
||||
Debug bool
|
||||
|
||||
q datasource.Querier
|
||||
|
||||
|
@ -56,17 +57,18 @@ type alertingRuleMetrics struct {
|
|||
|
||||
func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule) *AlertingRule {
|
||||
ar := &AlertingRule{
|
||||
Type: group.Type,
|
||||
RuleID: cfg.ID,
|
||||
Name: cfg.Alert,
|
||||
Expr: cfg.Expr,
|
||||
For: cfg.For.Duration(),
|
||||
Labels: cfg.Labels,
|
||||
Annotations: cfg.Annotations,
|
||||
GroupID: group.ID(),
|
||||
GroupName: group.Name,
|
||||
EvalInterval: group.Interval,
|
||||
Debug: cfg.Debug,
|
||||
Type: group.Type,
|
||||
RuleID: cfg.ID,
|
||||
Name: cfg.Alert,
|
||||
Expr: cfg.Expr,
|
||||
For: cfg.For.Duration(),
|
||||
KeepFiringFor: cfg.KeepFiringFor.Duration(),
|
||||
Labels: cfg.Labels,
|
||||
Annotations: cfg.Annotations,
|
||||
GroupID: group.ID(),
|
||||
GroupName: group.Name,
|
||||
EvalInterval: group.Interval,
|
||||
Debug: cfg.Debug,
|
||||
q: qb.BuildWithParams(datasource.QuerierParams{
|
||||
DataSourceType: group.Type.String(),
|
||||
EvaluationInterval: group.Interval,
|
||||
|
@ -366,6 +368,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a.KeepFiringSince = time.Time{}
|
||||
continue
|
||||
}
|
||||
a, err := ar.newAlert(m, ls, start, qFn)
|
||||
|
@ -391,12 +394,24 @@ func (ar *AlertingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]pr
|
|||
ar.logDebugf(ts, a, "PENDING => DELETED: is absent in current evaluation round")
|
||||
continue
|
||||
}
|
||||
// check if alert should keep StateFiring if rule has
|
||||
// `keep_firing_for` field
|
||||
if a.State == notifier.StateFiring {
|
||||
a.State = notifier.StateInactive
|
||||
a.ResolvedAt = ts
|
||||
ar.logDebugf(ts, a, "FIRING => INACTIVE: is absent in current evaluation round")
|
||||
if ar.KeepFiringFor > 0 {
|
||||
if a.KeepFiringSince.IsZero() {
|
||||
a.KeepFiringSince = ts
|
||||
}
|
||||
}
|
||||
// alerts with ar.KeepFiringFor>0 may remain FIRING
|
||||
// even if their expression isn't true anymore
|
||||
if ts.Sub(a.KeepFiringSince) > ar.KeepFiringFor {
|
||||
a.State = notifier.StateInactive
|
||||
a.ResolvedAt = ts
|
||||
ar.logDebugf(ts, a, "FIRING => INACTIVE: is absent in current evaluation round")
|
||||
continue
|
||||
}
|
||||
ar.logDebugf(ts, a, "KEEP_FIRING: will keep firing for %fs since %v", ar.KeepFiringFor.Seconds(), a.KeepFiringSince)
|
||||
}
|
||||
continue
|
||||
}
|
||||
numActivePending++
|
||||
if a.State == notifier.StatePending && ts.Sub(a.ActiveAt) >= ar.For {
|
||||
|
@ -436,6 +451,7 @@ func (ar *AlertingRule) UpdateWith(r Rule) error {
|
|||
}
|
||||
ar.Expr = nr.Expr
|
||||
ar.For = nr.For
|
||||
ar.KeepFiringFor = nr.KeepFiringFor
|
||||
ar.Labels = nr.Labels
|
||||
ar.Annotations = nr.Annotations
|
||||
ar.EvalInterval = nr.EvalInterval
|
||||
|
@ -508,6 +524,7 @@ func (ar *AlertingRule) ToAPI() APIRule {
|
|||
Name: ar.Name,
|
||||
Query: ar.Expr,
|
||||
Duration: ar.For.Seconds(),
|
||||
KeepFiringFor: ar.KeepFiringFor.Seconds(),
|
||||
Labels: ar.Labels,
|
||||
Annotations: ar.Annotations,
|
||||
LastEvaluation: lastState.time,
|
||||
|
@ -576,6 +593,9 @@ func (ar *AlertingRule) newAlertAPI(a notifier.Alert) *APIAlert {
|
|||
if alertURLGeneratorFn != nil {
|
||||
aa.SourceLink = alertURLGeneratorFn(a)
|
||||
}
|
||||
if a.State == notifier.StateFiring && !a.KeepFiringSince.IsZero() {
|
||||
aa.Stabilizing = true
|
||||
}
|
||||
return aa
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
testCases := []struct {
|
||||
rule *AlertingRule
|
||||
steps [][]datasource.Metric
|
||||
expAlerts []testAlert
|
||||
expAlerts map[int][]testAlert
|
||||
}{
|
||||
{
|
||||
newTestAlertingRule("empty", 0),
|
||||
|
@ -125,50 +125,8 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
[][]datasource.Metric{
|
||||
{datasource.Metric{Values: []float64{1}, Timestamps: []int64{1}}},
|
||||
},
|
||||
[]testAlert{
|
||||
{alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
},
|
||||
{
|
||||
newTestAlertingRule("single-firing", 0),
|
||||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
},
|
||||
{
|
||||
newTestAlertingRule("single-firing=>inactive", 0),
|
||||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
},
|
||||
},
|
||||
{
|
||||
newTestAlertingRule("single-firing=>inactive=>firing", 0),
|
||||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
},
|
||||
{
|
||||
newTestAlertingRule("single-firing=>inactive=>firing=>inactive", 0),
|
||||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
map[int][]testAlert{
|
||||
0: {{alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -180,12 +138,16 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
{},
|
||||
{},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
newTestAlertingRule("single-firing=>inactive=>firing=>inactive=>empty=>firing", 0),
|
||||
newTestAlertingRule("single-firing=>inactive=>firing=>inactive=>inactive=>firing", 0),
|
||||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{},
|
||||
|
@ -194,8 +156,13 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
{},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
5: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -207,10 +174,12 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
metricWithLabels(t, "name", "foo2"),
|
||||
},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
{labels: []string{"name", "foo2"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
map[int][]testAlert{
|
||||
0: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
{labels: []string{"name", "foo2"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -223,10 +192,19 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
// 1: fire first alert
|
||||
// 2: fire second alert, set first inactive
|
||||
// 3: fire third alert, set second inactive
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo2"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
map[int][]testAlert{
|
||||
0: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
1: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
2: {
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo1"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
{labels: []string{"name", "foo2"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -234,8 +212,8 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}},
|
||||
map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -244,8 +222,9 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -253,34 +232,13 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
// empty step to reset and delete pending alerts
|
||||
// empty step to delete pending alerts
|
||||
{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
newTestAlertingRule("for-pending=>firing=>inactive", defaultStep),
|
||||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
// empty step to reset pending alerts
|
||||
{},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}},
|
||||
},
|
||||
},
|
||||
{
|
||||
newTestAlertingRule("for-pending=>firing=>inactive=>pending", defaultStep),
|
||||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
// empty step to reset pending alerts
|
||||
{},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}},
|
||||
map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
2: {},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -288,13 +246,57 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
// empty step to reset pending alerts
|
||||
// empty step to set alert inactive
|
||||
{},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
},
|
||||
[]testAlert{
|
||||
{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}},
|
||||
map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
newTestAlertingRuleWithKeepFiring("for-pending=>firing=>keepfiring=>firing", defaultStep, defaultStep),
|
||||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
// empty step to keep firing
|
||||
{},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
},
|
||||
map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
},
|
||||
},
|
||||
{
|
||||
newTestAlertingRuleWithKeepFiring("for-pending=>firing=>keepfiring=>keepfiring=>inactive=>pending=>firing", defaultStep, 2*defaultStep),
|
||||
[][]datasource.Metric{
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
// empty step to keep firing
|
||||
{},
|
||||
// another empty step to keep firing
|
||||
{},
|
||||
// empty step to set alert inactive
|
||||
{},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
{metricWithLabels(t, "name", "foo")},
|
||||
},
|
||||
map[int][]testAlert{
|
||||
0: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
1: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
2: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
3: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
4: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateInactive}}},
|
||||
5: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StatePending}}},
|
||||
6: {{labels: []string{"name", "foo"}, alert: ¬ifier.Alert{State: notifier.StateFiring}}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -304,7 +306,7 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
fq := &fakeQuerier{}
|
||||
tc.rule.q = fq
|
||||
tc.rule.GroupID = fakeGroup.ID()
|
||||
for _, step := range tc.steps {
|
||||
for i, step := range tc.steps {
|
||||
fq.reset()
|
||||
fq.add(step...)
|
||||
if _, err := tc.rule.Exec(context.TODO(), time.Now(), 0); err != nil {
|
||||
|
@ -312,28 +314,31 @@ func TestAlertingRule_Exec(t *testing.T) {
|
|||
}
|
||||
// artificial delay between applying steps
|
||||
time.Sleep(defaultStep)
|
||||
}
|
||||
if len(tc.rule.alerts) != len(tc.expAlerts) {
|
||||
t.Fatalf("expected %d alerts; got %d", len(tc.expAlerts), len(tc.rule.alerts))
|
||||
}
|
||||
expAlerts := make(map[uint64]*notifier.Alert)
|
||||
for _, ta := range tc.expAlerts {
|
||||
labels := make(map[string]string)
|
||||
for i := 0; i < len(ta.labels); i += 2 {
|
||||
k, v := ta.labels[i], ta.labels[i+1]
|
||||
labels[k] = v
|
||||
if _, ok := tc.expAlerts[i]; !ok {
|
||||
continue
|
||||
}
|
||||
labels[alertNameLabel] = tc.rule.Name
|
||||
h := hash(labels)
|
||||
expAlerts[h] = ta.alert
|
||||
}
|
||||
for key, exp := range expAlerts {
|
||||
got, ok := tc.rule.alerts[key]
|
||||
if !ok {
|
||||
t.Fatalf("expected to have key %d", key)
|
||||
if len(tc.rule.alerts) != len(tc.expAlerts[i]) {
|
||||
t.Fatalf("evalIndex %d: expected %d alerts; got %d", i, len(tc.expAlerts[i]), len(tc.rule.alerts))
|
||||
}
|
||||
if got.State != exp.State {
|
||||
t.Fatalf("expected state %d; got %d", exp.State, got.State)
|
||||
expAlerts := make(map[uint64]*notifier.Alert)
|
||||
for _, ta := range tc.expAlerts[i] {
|
||||
labels := make(map[string]string)
|
||||
for i := 0; i < len(ta.labels); i += 2 {
|
||||
k, v := ta.labels[i], ta.labels[i+1]
|
||||
labels[k] = v
|
||||
}
|
||||
labels[alertNameLabel] = tc.rule.Name
|
||||
h := hash(labels)
|
||||
expAlerts[h] = ta.alert
|
||||
}
|
||||
for key, exp := range expAlerts {
|
||||
got, ok := tc.rule.alerts[key]
|
||||
if !ok {
|
||||
t.Fatalf("evalIndex %d: expected to have key %d", i, key)
|
||||
}
|
||||
if got.State != exp.State {
|
||||
t.Fatalf("evalIndex %d: expected state %d; got %d", i, exp.State, got.State)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -970,11 +975,18 @@ func newTestRuleWithLabels(name string, labels ...string) *AlertingRule {
|
|||
}
|
||||
|
||||
func newTestAlertingRule(name string, waitFor time.Duration) *AlertingRule {
|
||||
return &AlertingRule{
|
||||
rule := AlertingRule{
|
||||
Name: name,
|
||||
For: waitFor,
|
||||
EvalInterval: waitFor,
|
||||
alerts: make(map[uint64]*notifier.Alert),
|
||||
state: newRuleState(10),
|
||||
}
|
||||
return &rule
|
||||
}
|
||||
|
||||
func newTestAlertingRuleWithKeepFiring(name string, waitFor, keepFiringFor time.Duration) *AlertingRule {
|
||||
rule := newTestAlertingRule(name, waitFor)
|
||||
rule.KeepFiringFor = keepFiringFor
|
||||
return rule
|
||||
}
|
||||
|
|
|
@ -105,14 +105,16 @@ func (g *Group) Validate(validateTplFn ValidateTplFn, validateExpressions bool)
|
|||
// Rule describes entity that represent either
|
||||
// recording rule or alerting rule.
|
||||
type Rule struct {
|
||||
ID uint64
|
||||
Record string `yaml:"record,omitempty"`
|
||||
Alert string `yaml:"alert,omitempty"`
|
||||
Expr string `yaml:"expr"`
|
||||
For *promutils.Duration `yaml:"for,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
Annotations map[string]string `yaml:"annotations,omitempty"`
|
||||
Debug bool `yaml:"debug,omitempty"`
|
||||
ID uint64
|
||||
Record string `yaml:"record,omitempty"`
|
||||
Alert string `yaml:"alert,omitempty"`
|
||||
Expr string `yaml:"expr"`
|
||||
For *promutils.Duration `yaml:"for,omitempty"`
|
||||
// Alert will continue firing for this long even when the alerting expression no longer has results.
|
||||
KeepFiringFor *promutils.Duration `yaml:"keep_firing_for,omitempty"`
|
||||
Labels map[string]string `yaml:"labels,omitempty"`
|
||||
Annotations map[string]string `yaml:"annotations,omitempty"`
|
||||
Debug bool `yaml:"debug,omitempty"`
|
||||
// UpdateEntriesLimit defines max number of rule's state updates stored in memory.
|
||||
// Overrides `-rule.updateEntriesLimit`.
|
||||
UpdateEntriesLimit *int `yaml:"update_entries_limit,omitempty"`
|
||||
|
|
|
@ -404,7 +404,7 @@ func TestHashRule(t *testing.T) {
|
|||
true,
|
||||
},
|
||||
{
|
||||
Rule{Alert: "alert", Expr: "up == 1", For: promutils.NewDuration(time.Minute)},
|
||||
Rule{Alert: "alert", Expr: "up == 1", For: promutils.NewDuration(time.Minute), KeepFiringFor: promutils.NewDuration(time.Minute)},
|
||||
Rule{Alert: "alert", Expr: "up == 1"},
|
||||
true,
|
||||
},
|
||||
|
|
|
@ -46,18 +46,36 @@ func TestUpdateWith(t *testing.T) {
|
|||
"summary": "{{ $value|humanize }}",
|
||||
"description": "{{$labels}}",
|
||||
},
|
||||
}},
|
||||
[]config.Rule{{
|
||||
Alert: "foo",
|
||||
Expr: "up > 10",
|
||||
For: promutils.NewDuration(time.Second),
|
||||
Labels: map[string]string{
|
||||
"baz": "bar",
|
||||
},
|
||||
{
|
||||
Alert: "bar",
|
||||
Expr: "up > 0",
|
||||
For: promutils.NewDuration(time.Second),
|
||||
Labels: map[string]string{
|
||||
"bar": "baz",
|
||||
},
|
||||
}},
|
||||
[]config.Rule{
|
||||
{
|
||||
Alert: "foo",
|
||||
Expr: "up > 10",
|
||||
For: promutils.NewDuration(time.Second),
|
||||
Labels: map[string]string{
|
||||
"baz": "bar",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": "none",
|
||||
},
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"summary": "none",
|
||||
},
|
||||
}},
|
||||
{
|
||||
Alert: "bar",
|
||||
Expr: "up > 0",
|
||||
For: promutils.NewDuration(2 * time.Second),
|
||||
KeepFiringFor: promutils.NewDuration(time.Minute),
|
||||
Labels: map[string]string{
|
||||
"bar": "baz",
|
||||
},
|
||||
}},
|
||||
},
|
||||
{
|
||||
"update recording rule",
|
||||
|
|
|
@ -272,6 +272,9 @@ func compareAlertingRules(t *testing.T, a, b *AlertingRule) error {
|
|||
if a.For != b.For {
|
||||
return fmt.Errorf("expected to have for %q; got %q", a.For, b.For)
|
||||
}
|
||||
if a.KeepFiringFor != b.KeepFiringFor {
|
||||
return fmt.Errorf("expected to have KeepFiringFor %q; got %q", a.KeepFiringFor, b.KeepFiringFor)
|
||||
}
|
||||
if !reflect.DeepEqual(a.Annotations, b.Annotations) {
|
||||
return fmt.Errorf("expected to have annotations %#v; got %#v", a.Annotations, b.Annotations)
|
||||
}
|
||||
|
|
|
@ -39,6 +39,8 @@ type Alert struct {
|
|||
ResolvedAt time.Time
|
||||
// LastSent defines the moment when Alert was sent last time
|
||||
LastSent time.Time
|
||||
// KeepFiringSince defines the moment when StateFiring was kept because of `keep_firing_for` instead of real alert
|
||||
KeepFiringSince time.Time
|
||||
// Value stores the value returned from evaluating expression from Expr field
|
||||
Value float64
|
||||
// ID is the unique identifier for the Alert
|
||||
|
|
|
@ -116,7 +116,11 @@ btn-primary
|
|||
<div class="row">
|
||||
<div class="col-12 mb-2">
|
||||
{% if r.Type == "alerting" %}
|
||||
{% if r.KeepFiringFor > 0 %}
|
||||
<b>alert:</b> {%s r.Name %} (for: {%v r.Duration %} seconds, keep_firing_for: {%v r.KeepFiringFor %} seconds)
|
||||
{% else %}
|
||||
<b>alert:</b> {%s r.Name %} (for: {%v r.Duration %} seconds)
|
||||
{% endif %}
|
||||
{% else %}
|
||||
<b>record:</b> {%s r.Name %}
|
||||
{% endif %}
|
||||
|
@ -225,6 +229,7 @@ btn-primary
|
|||
<td>
|
||||
{%s ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}
|
||||
{% if ar.Restored %}{%= badgeRestored() %}{% endif %}
|
||||
{% if ar.Stabilizing %}{%= badgeStabilizing() %}{% endif %}
|
||||
</td>
|
||||
<td>{%s ar.Value %}</td>
|
||||
<td>
|
||||
|
@ -442,6 +447,18 @@ btn-primary
|
|||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% if rule.KeepFiringFor > 0 %}
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
<div class="col-2">
|
||||
Keep firing for
|
||||
</div>
|
||||
<div class="col">
|
||||
{%v rule.KeepFiringFor %} seconds
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
<div class="container border-bottom p-2">
|
||||
<div class="row">
|
||||
|
@ -561,6 +578,10 @@ btn-primary
|
|||
<span class="badge bg-warning text-dark" title="Alert state was restored after the service restart from remote storage">restored</span>
|
||||
{% endfunc %}
|
||||
|
||||
{% func badgeStabilizing() %}
|
||||
<span class="badge bg-warning text-dark" title="This firing state is kept because of `keep_firing_for`">stabilizing</span>
|
||||
{% endfunc %}
|
||||
|
||||
{% func seriesFetchedWarn(r APIRule) %}
|
||||
{% if isNoMatch(r) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg"
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -32,6 +32,9 @@ type APIAlert struct {
|
|||
SourceLink string `json:"source"`
|
||||
// Restored shows whether Alert's state was restored on restart
|
||||
Restored bool `json:"restored"`
|
||||
// Stabilizing shows when firing state is kept because of
|
||||
// `keep_firing_for` instead of real alert
|
||||
Stabilizing bool `json:"stabilizing"`
|
||||
}
|
||||
|
||||
// WebLink returns a link to the alert which can be used in UI.
|
||||
|
@ -96,9 +99,11 @@ type APIRule struct {
|
|||
// Query represents Rule's `expression` field
|
||||
Query string `json:"query"`
|
||||
// Duration represents Rule's `for` field
|
||||
Duration float64 `json:"duration"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
Duration float64 `json:"duration"`
|
||||
// Alert will continue firing for this long even when the alerting expression no longer has results.
|
||||
KeepFiringFor float64 `json:"keep_firing_for"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
// LastError contains the error faced while executing the rule.
|
||||
LastError string `json:"lastError"`
|
||||
// EvaluationTime is the time taken to completely evaluate the rule in float seconds.
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentelemetry"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdbhttp"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/prometheusimport"
|
||||
|
@ -210,6 +211,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
addInfluxResponseHeaders(w)
|
||||
influxutils.WriteDatabaseNames(w)
|
||||
return true
|
||||
case "/opentelemetry/api/v1/push":
|
||||
opentelemetryPushRequests.Inc()
|
||||
if err := opentelemetry.InsertHandler(r); err != nil {
|
||||
opentelemetryPushErrors.Inc()
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
case "/datadog/api/v1/series":
|
||||
datadogWriteRequests.Inc()
|
||||
if err := datadog.InsertHandlerForHTTP(r); err != nil {
|
||||
|
@ -344,6 +354,9 @@ var (
|
|||
datadogIntakeRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/intake", protocol="datadog"}`)
|
||||
datadogMetadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`)
|
||||
|
||||
opentelemetryPushRequests = metrics.NewCounter(`vm_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
opentelemetryPushErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
|
||||
|
||||
promscrapeTargetsRequests = metrics.NewCounter(`vm_http_requests_total{path="/targets"}`)
|
||||
promscrapeServiceDiscoveryRequests = metrics.NewCounter(`vm_http_requests_total{path="/service-discovery"}`)
|
||||
|
||||
|
|
74
app/vminsert/opentelemetry/request_handler.go
Normal file
74
app/vminsert/opentelemetry/request_handler.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
package opentelemetry
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentelemetry"}`)
|
||||
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="opentelemetry"}`)
|
||||
)
|
||||
|
||||
// InsertHandler processes opentelemetry metrics.
|
||||
func InsertHandler(req *http.Request) error {
|
||||
extraLabels, err := parserCommon.GetExtraLabels(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return insertRows(tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := common.GetInsertCtx()
|
||||
defer common.PutInsertCtx(ctx)
|
||||
|
||||
rowsLen := 0
|
||||
for i := range tss {
|
||||
rowsLen += len(tss[i].Samples)
|
||||
}
|
||||
ctx.Reset(rowsLen)
|
||||
rowsTotal := 0
|
||||
hasRelabeling := relabel.HasRelabeling()
|
||||
for i := range tss {
|
||||
ts := &tss[i]
|
||||
rowsTotal += len(ts.Samples)
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
for _, label := range ts.Labels {
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
for _, label := range extraLabels {
|
||||
ctx.AddLabel(label.Name, label.Value)
|
||||
}
|
||||
if hasRelabeling {
|
||||
ctx.ApplyRelabeling()
|
||||
}
|
||||
if len(ctx.Labels) == 0 {
|
||||
// Skip metric without labels.
|
||||
continue
|
||||
}
|
||||
ctx.SortLabelsIfNeeded()
|
||||
var metricNameRaw []byte
|
||||
var err error
|
||||
samples := ts.Samples
|
||||
for i := range samples {
|
||||
r := &samples[i]
|
||||
metricNameRaw, err = ctx.WriteDataPointExt(metricNameRaw, ctx.Labels, r.Timestamp, r.Value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
rowsInserted.Add(rowsTotal)
|
||||
rowsPerInsert.Update(float64(rowsTotal))
|
||||
return ctx.FlushBufs()
|
||||
}
|
|
@ -2,7 +2,7 @@ version: '3.5'
|
|||
services:
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.91.3
|
||||
image: victoriametrics/vmagent:v1.92.0
|
||||
depends_on:
|
||||
- "vminsert"
|
||||
ports:
|
||||
|
@ -32,7 +32,7 @@ services:
|
|||
|
||||
vmstorage-1:
|
||||
container_name: vmstorage-1
|
||||
image: victoriametrics/vmstorage:v1.91.3-cluster
|
||||
image: victoriametrics/vmstorage:v1.92.0-cluster
|
||||
ports:
|
||||
- 8482
|
||||
- 8400
|
||||
|
@ -44,7 +44,7 @@ services:
|
|||
restart: always
|
||||
vmstorage-2:
|
||||
container_name: vmstorage-2
|
||||
image: victoriametrics/vmstorage:v1.91.3-cluster
|
||||
image: victoriametrics/vmstorage:v1.92.0-cluster
|
||||
ports:
|
||||
- 8482
|
||||
- 8400
|
||||
|
@ -56,7 +56,7 @@ services:
|
|||
restart: always
|
||||
vminsert:
|
||||
container_name: vminsert
|
||||
image: victoriametrics/vminsert:v1.91.3-cluster
|
||||
image: victoriametrics/vminsert:v1.92.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
|
@ -68,7 +68,7 @@ services:
|
|||
restart: always
|
||||
vmselect:
|
||||
container_name: vmselect
|
||||
image: victoriametrics/vmselect:v1.91.3-cluster
|
||||
image: victoriametrics/vmselect:v1.92.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
|
@ -82,7 +82,7 @@ services:
|
|||
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.91.3
|
||||
image: victoriametrics/vmalert:v1.92.0
|
||||
depends_on:
|
||||
- "vmselect"
|
||||
ports:
|
||||
|
|
|
@ -2,7 +2,7 @@ version: "3.5"
|
|||
services:
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.91.3
|
||||
image: victoriametrics/vmagent:v1.92.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
@ -18,7 +18,7 @@ services:
|
|||
restart: always
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.91.3
|
||||
image: victoriametrics/victoria-metrics:v1.92.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
- 8089:8089
|
||||
|
@ -56,7 +56,7 @@ services:
|
|||
restart: always
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.91.3
|
||||
image: victoriametrics/vmalert:v1.92.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
- "alertmanager"
|
||||
|
|
|
@ -105,7 +105,7 @@ services:
|
|||
- '--config=/config.yml'
|
||||
|
||||
vmsingle:
|
||||
image: victoriametrics/victoria-metrics:v1.91.3
|
||||
image: victoriametrics/victoria-metrics:v1.92.0
|
||||
ports:
|
||||
- '8428:8428'
|
||||
command:
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
4. Set variables `DIGITALOCEAN_API_TOKEN` with `VM_VERSION` for `packer` environment and run make from example below:
|
||||
|
||||
```console
|
||||
make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="dop_v23_2e46f4759ceeeba0d0248" VM_VERSION="1.91.3"
|
||||
make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="dop_v23_2e46f4759ceeeba0d0248" VM_VERSION="1.92.0"
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -19,8 +19,8 @@ On the server:
|
|||
* VictoriaMetrics is running on ports: 8428, 8089, 4242, 2003 and they are bound to the local interface.
|
||||
|
||||
********************************************************************************
|
||||
# This image includes 1.91.3 version of VictoriaMetrics.
|
||||
# See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.91.3
|
||||
# This image includes 1.92.0 version of VictoriaMetrics.
|
||||
# See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.92.0
|
||||
|
||||
# Welcome to VictoriaMetrics droplet!
|
||||
|
||||
|
|
|
@ -24,8 +24,12 @@ The following `tip` changes can be tested by building VictoriaMetrics components
|
|||
|
||||
## tip
|
||||
|
||||
**Update note: starting from this release, [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) writes
|
||||
to the configured storage the following samples by default:
|
||||
## [v1.92.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.92.0)
|
||||
|
||||
Released at 2023-07-27
|
||||
|
||||
**Update note**: starting from this release, [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) writes
|
||||
the following samples to the configured remote storage by default:
|
||||
|
||||
- aggregated samples;
|
||||
- the original input samples, which match zero `match` options from the provided [config](https://docs.victoriametrics.com/stream-aggregation.html#stream-aggregation-config).
|
||||
|
@ -34,7 +38,7 @@ Previously only aggregated samples were written to the storage by default.
|
|||
The previous behavior can be restored in the following ways:
|
||||
|
||||
- by passing `-streamAggr.dropInput` command-line flag to single-node VictoriaMetrics;
|
||||
- by passing `-remoteWrite.streamAggr.dropInput` command-line flag per each configured `-remoteWrite.streamAggr.config` at `vmagent`.**
|
||||
- by passing `-remoteWrite.streamAggr.dropInput` command-line flag per each configured `-remoteWrite.streamAggr.config` at `vmagent`.
|
||||
|
||||
* SECURITY: upgrade base docker image (alpine) from 3.18.0 to 3.18.2. See [alpine 3.18.2 release notes](https://alpinelinux.org/posts/Alpine-3.15.9-3.16.6-3.17.4-3.18.2-released.html).
|
||||
* SECURITY: upgrade Go builder from Go1.20.5 to Go1.20.6. See [the list of issues addressed in Go1.20.6](https://github.com/golang/go/issues?q=milestone%3AGo1.20.6+label%3ACherryPickApproved).
|
||||
|
@ -49,6 +53,7 @@ The previous behavior can be restored in the following ways:
|
|||
- `WITH (f(window, step, off) = m[window:step] offset off) f(5m, 10s, 1h)` is automatically transformed to `m[5m:10s] offset 1h`
|
||||
Thanks to @lujiajing1126 for the initial idea and [implementation](https://github.com/VictoriaMetrics/metricsql/pull/13). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4025).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): added a new page with the list of currently running queries. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4598) and [these docs](https://docs.victoriametrics.com/#active-queries).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for data ingestion via [OpenTelemetry protocol](https://opentelemetry.io/docs/reference/specification/metrics/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry), [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2424) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2570).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow sharding outgoing time series among the configured remote storage systems. This can be useful for building horizontally scalable [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), when samples for the same time series must be aggregated by the same `vmagent` instance at the second level. See [these docs](https://docs.victoriametrics.com/vmagent.html#sharding-among-remote-storages) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4637) for details.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow configuring staleness interval in [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) config. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4667) for details.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow specifying a list of [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) inside `if` option of relabeling rules. The corresponding relabeling rule is executed when at least a single series selector matches. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling-enhancements).
|
||||
|
@ -66,7 +71,8 @@ The previous behavior can be restored in the following ways:
|
|||
* FEATUTE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow disabling of `step` param attached to [instant queries](https://docs.victoriametrics.com/keyConcepts.html#instant-query). This might be useful for using vmalert with datasources that to not support this param, unlike VictoriaMetrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4573) for details.
|
||||
* FEATUTE: [vmalert](https://docs.victoriametrics.com/vmalert.html): support option for "blackholing" alerting notifications if `-notifier.blackhole` cmd-line flag is set. Enable this flag if you want vmalert to evaluate alerting rules without sending any notifications to external receivers (eg. alertmanager). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4122) for details. Thanks to @venkatbvc for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4639).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add unit test for alerting and recording rules, see more [details](https://docs.victoriametrics.com/vmalert.html#unit-testing-for-rules) here. Thanks to @Haleygo for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4596).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow overriding default GET params for rules with `graphite` datasource type, in the same way as it happens for `prometheus` type. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4685).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow overriding default GET params for rules with `graphite` datasource type, in the same way as it happens for `prometheus` type. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4685).
|
||||
* FEATUTE: [vmalert](https://docs.victoriametrics.com/vmalert.html): support `keep_firing_for` field for alerting rules. See docs updated [here](https://docs.victoriametrics.com/vmalert.html#alerting-rules) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4529). Thanks to @Haleygo for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4669).
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): expose `vmauth_user_request_duration_seconds` and `vmauth_unauthorized_user_request_duration_seconds` summary metrics for measuring requests latency per user.
|
||||
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): show backup progress percentage in log during backup uploading. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4460).
|
||||
* FEATURE: [vmrestore](https://docs.victoriametrics.com/vmrestore.html): show restoring progress percentage in log during backup downloading. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4460).
|
||||
|
@ -86,7 +92,9 @@ The previous behavior can be restored in the following ways:
|
|||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): correctly calculate evaluation time for rules. Before, there was a low probability for discrepancy between actual time and rules evaluation time if evaluation interval was lower than the execution time for rules within the group.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): reset evaluation timestamp after modifying group interval. Before, there could have latency on rule evaluation time.
|
||||
* BUGFIX: vmselect: fix timestamp alignment for Prometheus querying API if time argument is less than 10m from the beginning of Unix epoch.
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): close HTTP connections to [service discovery](https://docs.victoriametrics.com/sd_configs.html) servers when they are no longer needed. This should prevent from possible connection exhasution in some cases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4724).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not show [relabel debug](https://docs.victoriametrics.com/vmagent.html#relabel-debug) links at the `/targets` page when `vmagent` runs with `-promscrape.dropOriginalLabels` command-line flag, since it has no the original labels needed for relabel debug. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4597).
|
||||
* BUGFIX: vminsert: fixed decoding of label values with slash when accepting data via [pushgateway protocol](https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format). This fixes Prometheus golang client compatibility. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4692).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly parse binary operations with reserved words on the right side such as `foo + (on{bar="baz"})`. Previously such queries could lead to panic. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4422).
|
||||
* BUGFIX: [Official Grafana dashboards for VictoriaMetrics](https://grafana.com/orgs/victoriametrics): display cache usage for all components on panel `Cache usage % by type` for cluster dashboard. Before, only vmstorage caches were shown.
|
||||
|
||||
|
|
|
@ -335,13 +335,14 @@ Check practical examples of VictoriaMetrics API [here](https://docs.victoriametr
|
|||
The `<accountID>` can be set to `multitenant` string, e.g. `http://<vminsert>:8480/insert/multitenant/<suffix>`. Such urls accept data from multiple tenants
|
||||
specified via `vm_account_id` and `vm_project_id` labels. See [multitenancy via labels](#multitenancy-via-labels) for more details.
|
||||
- `<suffix>` may have the following values:
|
||||
- `prometheus` and `prometheus/api/v1/write` - for inserting data with [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write).
|
||||
- `prometheus` and `prometheus/api/v1/write` - for ingesting data with [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write).
|
||||
- `prometheus/api/v1/import` - for importing data obtained via `api/v1/export` at `vmselect` (see below), JSON line format.
|
||||
- `prometheus/api/v1/import/native` - for importing data obtained via `api/v1/export/native` on `vmselect` (see below).
|
||||
- `prometheus/api/v1/import/csv` - for importing arbitrary CSV data. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-csv-data) for details.
|
||||
- `prometheus/api/v1/import/prometheus` - for importing data in [Prometheus text exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) and in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md). This endpoint also supports [Pushgateway protocol](https://github.com/prometheus/pushgateway#url). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) for details.
|
||||
- `datadog/api/v1/series` - for inserting data with [DataDog submit metrics API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent) for details.
|
||||
- `influx/write` and `influx/api/v2/write` - for inserting data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
- `opentemetry/api/v1/push` - for ingesting data via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry).
|
||||
- `datadog/api/v1/series` - for ingesting data with [DataDog submit metrics API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent) for details.
|
||||
- `influx/write` and `influx/api/v2/write` - for ingesting data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
- `opentsdb/api/put` - for accepting [OpenTSDB HTTP /api/put requests](http://opentsdb.net/docs/build/html/api_http/put.html). This handler is disabled by default. It is exposed on a distinct TCP address set via `-opentsdbHTTPListenAddr` command-line flag. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-opentsdb-data-via-http-apiput-requests) for details.
|
||||
|
||||
- URLs for [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/): `http://<vmselect>:8481/select/<accountID>/prometheus/<suffix>`, where:
|
||||
|
|
|
@ -89,6 +89,7 @@ VictoriaMetrics has the following prominent features:
|
|||
* [Arbitrary CSV data](#how-to-import-csv-data).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent).
|
||||
* [OpenTelemetry metrics format](#sending-data-via-opentelemetry).
|
||||
* It supports powerful [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), which can be used as a [statsd](https://github.com/statsd/statsd) alternative.
|
||||
* It supports metrics [relabeling](#relabeling).
|
||||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and
|
||||
|
@ -1176,6 +1177,7 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data
|
|||
* DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details.
|
||||
* InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
* Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
|
||||
* OpenTelemetry http API. See [these docs](#sending-data-via-opentelemetry) for details.
|
||||
* OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
|
||||
* OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
|
||||
* `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
|
||||
|
@ -1359,6 +1361,13 @@ Note that it could be required to flush response cache after importing historica
|
|||
|
||||
VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
|
||||
## Sending data via OpenTelemetry
|
||||
|
||||
VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentemetry/api/v1/push` path.
|
||||
|
||||
VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`.
|
||||
Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`.
|
||||
|
||||
## Relabeling
|
||||
|
||||
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
||||
|
|
|
@ -97,6 +97,7 @@ VictoriaMetrics has the following prominent features:
|
|||
* [Arbitrary CSV data](#how-to-import-csv-data).
|
||||
* [Native binary format](#how-to-import-data-in-native-format).
|
||||
* [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent).
|
||||
* [OpenTelemetry metrics format](#sending-data-via-opentelemetry).
|
||||
* It supports powerful [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), which can be used as a [statsd](https://github.com/statsd/statsd) alternative.
|
||||
* It supports metrics [relabeling](#relabeling).
|
||||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and
|
||||
|
@ -1184,6 +1185,7 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data
|
|||
* DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details.
|
||||
* InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
|
||||
* Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
|
||||
* OpenTelemetry http API. See [these docs](#sending-data-via-opentelemetry) for details.
|
||||
* OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
|
||||
* OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
|
||||
* `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
|
||||
|
@ -1367,6 +1369,13 @@ Note that it could be required to flush response cache after importing historica
|
|||
|
||||
VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
|
||||
|
||||
## Sending data via OpenTelemetry
|
||||
|
||||
VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentemetry/api/v1/push` path.
|
||||
|
||||
VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`.
|
||||
Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`.
|
||||
|
||||
## Relabeling
|
||||
|
||||
VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
|
||||
|
|
|
@ -104,6 +104,7 @@ additionally to pull-based Prometheus-compatible targets' scraping:
|
|||
* DataDog "submit metrics" API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent).
|
||||
* InfluxDB line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
|
||||
* Graphite plaintext protocol if `-graphiteListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd).
|
||||
* OpenTelemetry http API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry).
|
||||
* OpenTSDB telnet and http protocols if `-opentsdbListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents).
|
||||
* Prometheus remote write protocol via `http://<vmagent>:8429/api/v1/write`.
|
||||
* JSON lines import protocol via `http://<vmagent>:8429/api/v1/import`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format).
|
||||
|
|
|
@ -214,6 +214,10 @@ expr: <string>
|
|||
# as firing once they return.
|
||||
[ for: <duration> | default = 0s ]
|
||||
|
||||
# Alert will continue firing for this long even when the alerting expression no longer has results.
|
||||
# This allows you to delay alert resolution.
|
||||
[ keep_firing_for: <duration> | default = 0s ]
|
||||
|
||||
# Whether to print debug information into logs.
|
||||
# Information includes alerts state changes and requests sent to the datasource.
|
||||
# Please note, that if rule's query params contain sensitive
|
||||
|
@ -368,19 +372,24 @@ For recording rules to work `-remoteWrite.url` must be specified.
|
|||
|
||||
### Alerts state on restarts
|
||||
|
||||
`vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after restart of `vmalert`
|
||||
the process alerts state will be lost. To avoid this situation, `vmalert` should be configured via the following flags:
|
||||
`vmalert` is stateless, it holds alerts state in the process memory. Restarting of `vmalert` process
|
||||
will reset alerts state in memory. To prevent `vmalert` from losing alerts state it should be configured
|
||||
to persist the state to the remote destination via the following flags:
|
||||
|
||||
* `-remoteWrite.url` - URL to VictoriaMetrics (Single) or vminsert (Cluster). `vmalert` will persist alerts state
|
||||
into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
|
||||
These are regular time series and maybe queried from VM just as any other time series.
|
||||
The state is stored to the configured address on every rule evaluation.
|
||||
to the configured address in the form of [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
|
||||
`ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
|
||||
These time series can be queried from VictoriaMetrics just as any other time series.
|
||||
The state will be persisted to the configured address on each evaluation.
|
||||
* `-remoteRead.url` - URL to VictoriaMetrics (Single) or vmselect (Cluster). `vmalert` will try to restore alerts state
|
||||
from configured address by querying time series with name `ALERTS_FOR_STATE`.
|
||||
from the configured address by querying time series with name `ALERTS_FOR_STATE`. The restore happens only once when
|
||||
`vmalert` process starts, and only for the configured rules. Config [hot reload](#hot-config-reload) doesn't trigger
|
||||
state restore.
|
||||
|
||||
Both flags are required for proper state restoration. Restore process may fail if time series are missing
|
||||
in configured `-remoteRead.url`, weren't updated in the last `1h` (controlled by `-remoteRead.lookback`)
|
||||
or received state doesn't match current `vmalert` rules configuration.
|
||||
or received state doesn't match current `vmalert` rules configuration. `vmalert` marks successfully restored rules
|
||||
with `restored` label in [web UI](#WEB).
|
||||
|
||||
### Multitenancy
|
||||
|
||||
|
@ -742,6 +751,7 @@ See full description for these flags in `./vmalert -help`.
|
|||
* Graphite engine isn't supported yet;
|
||||
* `query` template function is disabled for performance reasons (might be changed in future);
|
||||
* `limit` group's param has no effect during replay (might be changed in future);
|
||||
* `keep_firing_for` alerting rule param has no effect during replay (might be changed in future).
|
||||
|
||||
## Unit Testing for Rules
|
||||
|
||||
|
|
|
@ -58,7 +58,11 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
|
|||
|
||||
// MustStop stops further usage for sdc.
|
||||
func (sdc *SDConfig) MustStop() {
|
||||
configMap.Delete(sdc)
|
||||
v := configMap.Delete(sdc)
|
||||
if v != nil {
|
||||
cfg := v.(*apiConfig)
|
||||
cfg.c.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func appendMachineLabels(vms []virtualMachine, port int, sdc *SDConfig) []*promutils.Labels {
|
||||
|
|
|
@ -71,6 +71,7 @@ func TestGetVirtualMachinesSuccess(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected error at client create: %s", err)
|
||||
}
|
||||
defer c.Stop()
|
||||
ac := &apiConfig{
|
||||
c: c,
|
||||
subscriptionID: "some-id",
|
||||
|
|
|
@ -90,6 +90,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
}
|
||||
dc, err := getDatacenter(client, sdc.Datacenter)
|
||||
if err != nil {
|
||||
client.Stop()
|
||||
return nil, fmt.Errorf("cannot obtain consul datacenter: %w", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -84,6 +84,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
}
|
||||
agent, err := consul.GetAgentInfo(client)
|
||||
if err != nil {
|
||||
client.Stop()
|
||||
return nil, fmt.Errorf("cannot obtain consul datacenter: %w", err)
|
||||
}
|
||||
dc := sdc.Datacenter
|
||||
|
|
|
@ -155,5 +155,9 @@ func addDropletLabels(droplets []droplet, defaultPort int) []*promutils.Labels {
|
|||
|
||||
// MustStop stops further usage for sdc.
|
||||
func (sdc *SDConfig) MustStop() {
|
||||
configMap.Delete(sdc)
|
||||
v := configMap.Delete(sdc)
|
||||
if v != nil {
|
||||
cfg := v.(*apiConfig)
|
||||
cfg.client.Stop()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,5 +47,9 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
|
|||
|
||||
// MustStop stops further usage for sdc.
|
||||
func (sdc *SDConfig) MustStop() {
|
||||
configMap.Delete(sdc)
|
||||
v := configMap.Delete(sdc)
|
||||
if v != nil {
|
||||
cfg := v.(*apiConfig)
|
||||
cfg.client.Stop()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,5 +56,9 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
|
|||
|
||||
// MustStop stops further usage for sdc.
|
||||
func (sdc *SDConfig) MustStop() {
|
||||
configMap.Delete(sdc)
|
||||
v := configMap.Delete(sdc)
|
||||
if v != nil {
|
||||
cfg := v.(*apiConfig)
|
||||
cfg.client.Stop()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,7 +101,11 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
|
|||
|
||||
// MustStop stops further usage for sdc.
|
||||
func (sdc *SDConfig) MustStop() {
|
||||
configMap.Delete(sdc)
|
||||
v := configMap.Delete(sdc)
|
||||
if v != nil {
|
||||
cfg := v.(*apiConfig)
|
||||
cfg.client.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func addInstanceLabels(apps *applications) []*promutils.Labels {
|
||||
|
|
|
@ -42,6 +42,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
|||
if len(project) == 0 {
|
||||
proj, err := getCurrentProject()
|
||||
if err != nil {
|
||||
client.CloseIdleConnections()
|
||||
return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %w", err)
|
||||
}
|
||||
project = proj
|
||||
|
@ -52,6 +53,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
|||
// Autodetect the current zone.
|
||||
zone, err := getCurrentZone()
|
||||
if err != nil {
|
||||
client.CloseIdleConnections()
|
||||
return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %w", err)
|
||||
}
|
||||
zones = append(zones, zone)
|
||||
|
@ -62,6 +64,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
|
|||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3202
|
||||
zs, err := getZonesForProject(client, project)
|
||||
if err != nil {
|
||||
client.CloseIdleConnections()
|
||||
return nil, fmt.Errorf("cannot obtain zones for project %q: %w", project, err)
|
||||
}
|
||||
zones = zs
|
||||
|
|
|
@ -73,5 +73,9 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
|
|||
|
||||
// MustStop stops further usage for sdc.
|
||||
func (sdc *SDConfig) MustStop() {
|
||||
configMap.Delete(sdc)
|
||||
v := configMap.Delete(sdc)
|
||||
if v != nil {
|
||||
cfg := v.(*apiConfig)
|
||||
cfg.client.CloseIdleConnections()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,5 +57,9 @@ func addHTTPTargetLabels(src []httpGroupTarget, sourceURL string) []*promutils.L
|
|||
|
||||
// MustStop stops further usage for sdc.
|
||||
func (sdc *SDConfig) MustStop() {
|
||||
configMap.Delete(sdc)
|
||||
v := configMap.Delete(sdc)
|
||||
if v != nil {
|
||||
cfg := v.(*apiConfig)
|
||||
cfg.client.Stop()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,6 +80,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
// The synchronous targets' update is needed for returning non-empty list of targets
|
||||
// just after the initialization.
|
||||
if err := cfg.updateTargetsLabels(ctx); err != nil {
|
||||
client.Stop()
|
||||
return nil, fmt.Errorf("cannot discover Kuma targets: %w", err)
|
||||
}
|
||||
cfg.wg.Add(1)
|
||||
|
|
|
@ -91,6 +91,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
}
|
||||
ac, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
cfg.client.CloseIdleConnections()
|
||||
return nil, err
|
||||
}
|
||||
cfg.client.Transport = &http.Transport{
|
||||
|
@ -111,6 +112,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
sdcAuth = readCredentialsFromEnv()
|
||||
}
|
||||
if strings.HasSuffix(sdcAuth.IdentityEndpoint, "v2.0") {
|
||||
cfg.client.CloseIdleConnections()
|
||||
return nil, errors.New("identity_endpoint v2.0 is not supported")
|
||||
}
|
||||
// trim .0 from v3.0 for prometheus cfg compatibility
|
||||
|
@ -118,11 +120,13 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
|
||||
parsedURL, err := url.Parse(sdcAuth.IdentityEndpoint)
|
||||
if err != nil {
|
||||
cfg.client.CloseIdleConnections()
|
||||
return nil, fmt.Errorf("cannot parse identity_endpoint: %s as url, err: %w", sdcAuth.IdentityEndpoint, err)
|
||||
}
|
||||
cfg.endpoint = parsedURL
|
||||
tokenReq, err := buildAuthRequestBody(&sdcAuth)
|
||||
if err != nil {
|
||||
cfg.client.CloseIdleConnections()
|
||||
return nil, err
|
||||
}
|
||||
cfg.authTokenReq = tokenReq
|
||||
|
|
|
@ -57,5 +57,9 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
|
|||
|
||||
// MustStop stops further usage for sdc.
|
||||
func (sdc *SDConfig) MustStop() {
|
||||
configMap.Delete(sdc)
|
||||
v := configMap.Delete(sdc)
|
||||
if v != nil {
|
||||
cfg := v.(*apiConfig)
|
||||
cfg.client.CloseIdleConnections()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,6 +81,12 @@ type HTTPClient struct {
|
|||
ReadTimeout time.Duration
|
||||
}
|
||||
|
||||
func (hc *HTTPClient) stop() {
|
||||
// Close idle connections to server in order to free up resources.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4724
|
||||
hc.client.CloseIdleConnections()
|
||||
}
|
||||
|
||||
var defaultDialer = &net.Dialer{}
|
||||
|
||||
// NewClient returns new Client for the given args.
|
||||
|
@ -276,6 +282,8 @@ func (c *Client) APIServer() string {
|
|||
// Stop cancels all in-flight requests
|
||||
func (c *Client) Stop() {
|
||||
c.clientCancel()
|
||||
c.client.stop()
|
||||
c.blockingClient.stop()
|
||||
}
|
||||
|
||||
func doRequestWithPossibleRetry(hc *HTTPClient, req *http.Request) (*http.Response, error) {
|
||||
|
|
|
@ -64,7 +64,7 @@ func getPushgatewayLabels(path string) ([]prompbmarshal.Label, error) {
|
|||
s = s[n+1:]
|
||||
}
|
||||
if isBase64 {
|
||||
data, err := base64.URLEncoding.DecodeString(value)
|
||||
data, err := base64.RawURLEncoding.DecodeString(strings.TrimRight(value, "="))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot base64-decode value=%q for label=%q: %w", value, name, err)
|
||||
}
|
||||
|
|
|
@ -62,6 +62,9 @@ func TestGetPushgatewayLabelsSuccess(t *testing.T) {
|
|||
f("/foo/metrics/job@base64/Zm9v", `{job="foo"}`)
|
||||
f("/foo/metrics/job/x/a/foo/aaa/bar", `{a="foo",aaa="bar",job="x"}`)
|
||||
f("/foo/metrics/job/x/a@base64/Zm9v", `{a="foo",job="x"}`)
|
||||
f("/metrics/job/test/region@base64/YXotc291dGhlYXN0LTEtZjAxL3d6eS1hei1zb3V0aGVhc3QtMQ", `{job="test",region="az-southeast-1-f01/wzy-az-southeast-1"}`)
|
||||
f("/metrics/job/test/empty@base64/=", `{job="test"}`)
|
||||
f("/metrics/job/test/test@base64/PT0vPT0", `{job="test",test="==/=="}`)
|
||||
}
|
||||
|
||||
func TestGetPushgatewayLabelsFailure(t *testing.T) {
|
||||
|
|
120
lib/protoparser/opentelemetry/pb/common.pb.go
Normal file
120
lib/protoparser/opentelemetry/pb/common.pb.go
Normal file
|
@ -0,0 +1,120 @@
|
|||
// Copyright 2019, OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.12
|
||||
// source: lib/protoparser/opentelemetry/proto/common.proto
|
||||
|
||||
package pb
|
||||
|
||||
// AnyValue is used to represent any type of attribute value. AnyValue may contain a
|
||||
// primitive value such as a string or integer or it may contain an arbitrary nested
|
||||
// object containing arrays, key-value lists and primitives.
|
||||
type AnyValue struct {
|
||||
unknownFields []byte
|
||||
|
||||
// The value is one of the listed fields. It is valid for all values to be unspecified
|
||||
// in which case this AnyValue is considered to be "empty".
|
||||
//
|
||||
// Types that are assignable to Value:
|
||||
//
|
||||
// *AnyValue_StringValue
|
||||
// *AnyValue_BoolValue
|
||||
// *AnyValue_IntValue
|
||||
// *AnyValue_DoubleValue
|
||||
// *AnyValue_ArrayValue
|
||||
// *AnyValue_KvlistValue
|
||||
// *AnyValue_BytesValue
|
||||
Value isAnyValue_Value `protobuf_oneof:"value"`
|
||||
}
|
||||
|
||||
type isAnyValue_Value interface {
|
||||
isAnyValue_Value()
|
||||
}
|
||||
|
||||
type AnyValue_StringValue struct {
|
||||
StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type AnyValue_BoolValue struct {
|
||||
BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type AnyValue_IntValue struct {
|
||||
IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type AnyValue_DoubleValue struct {
|
||||
DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type AnyValue_ArrayValue struct {
|
||||
ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type AnyValue_KvlistValue struct {
|
||||
KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type AnyValue_BytesValue struct {
|
||||
BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*AnyValue_StringValue) isAnyValue_Value() {}
|
||||
|
||||
func (*AnyValue_BoolValue) isAnyValue_Value() {}
|
||||
|
||||
func (*AnyValue_IntValue) isAnyValue_Value() {}
|
||||
|
||||
func (*AnyValue_DoubleValue) isAnyValue_Value() {}
|
||||
|
||||
func (*AnyValue_ArrayValue) isAnyValue_Value() {}
|
||||
|
||||
func (*AnyValue_KvlistValue) isAnyValue_Value() {}
|
||||
|
||||
func (*AnyValue_BytesValue) isAnyValue_Value() {}
|
||||
|
||||
// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
|
||||
// since oneof in AnyValue does not allow repeated fields.
|
||||
type ArrayValue struct {
|
||||
unknownFields []byte
|
||||
// Array of values. The array may be empty (contain 0 elements).
|
||||
Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
}
|
||||
|
||||
// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
|
||||
// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
|
||||
// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
|
||||
// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
|
||||
// are semantically equivalent.
|
||||
type KeyValueList struct {
|
||||
unknownFields []byte
|
||||
|
||||
// A collection of key/value pairs of key-value pairs. The list may be empty (may
|
||||
// contain 0 elements).
|
||||
// The keys MUST be unique (it is not allowed to have more than one
|
||||
// value with the same key).
|
||||
Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
}
|
||||
|
||||
// KeyValue is a key-value pair that is used to store Span attributes, Link
|
||||
// attributes, etc.
|
||||
type KeyValue struct {
|
||||
unknownFields []byte
|
||||
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
1079
lib/protoparser/opentelemetry/pb/common_vtproto.pb.go
Normal file
1079
lib/protoparser/opentelemetry/pb/common_vtproto.pb.go
Normal file
File diff suppressed because it is too large
Load diff
69
lib/protoparser/opentelemetry/pb/helpers.go
Normal file
69
lib/protoparser/opentelemetry/pb/helpers.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
package pb
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// FormatString formats strings
|
||||
func (x *AnyValue) FormatString() string {
|
||||
switch v := x.Value.(type) {
|
||||
case *AnyValue_StringValue:
|
||||
return v.StringValue
|
||||
|
||||
case *AnyValue_BoolValue:
|
||||
return strconv.FormatBool(v.BoolValue)
|
||||
|
||||
case *AnyValue_DoubleValue:
|
||||
return float64AsString(v.DoubleValue)
|
||||
|
||||
case *AnyValue_IntValue:
|
||||
return strconv.FormatInt(v.IntValue, 10)
|
||||
|
||||
case *AnyValue_KvlistValue:
|
||||
jsonStr, _ := json.Marshal(v.KvlistValue.Values)
|
||||
return string(jsonStr)
|
||||
|
||||
case *AnyValue_BytesValue:
|
||||
return base64.StdEncoding.EncodeToString(v.BytesValue)
|
||||
|
||||
case *AnyValue_ArrayValue:
|
||||
jsonStr, _ := json.Marshal(v.ArrayValue.Values)
|
||||
return string(jsonStr)
|
||||
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func float64AsString(f float64) string {
|
||||
if math.IsInf(f, 0) || math.IsNaN(f) {
|
||||
return fmt.Sprintf("json: unsupported value: %s", strconv.FormatFloat(f, 'g', -1, 64))
|
||||
}
|
||||
|
||||
// Convert as if by ES6 number to string conversion.
|
||||
// This matches most other JSON generators.
|
||||
// See golang.org/issue/6384 and golang.org/issue/14135.
|
||||
// Like fmt %g, but the exponent cutoffs are different
|
||||
// and exponents themselves are not padded to two digits.
|
||||
scratch := [64]byte{}
|
||||
b := scratch[:0]
|
||||
abs := math.Abs(f)
|
||||
fmt := byte('f')
|
||||
if abs != 0 && (abs < 1e-6 || abs >= 1e21) {
|
||||
fmt = 'e'
|
||||
}
|
||||
b = strconv.AppendFloat(b, f, fmt, -1, 64)
|
||||
if fmt == 'e' {
|
||||
// clean up e-09 to e-9
|
||||
n := len(b)
|
||||
if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
|
||||
b[n-2] = b[n-1]
|
||||
b = b[:n-1]
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
736
lib/protoparser/opentelemetry/pb/metrics.pb.go
Normal file
736
lib/protoparser/opentelemetry/pb/metrics.pb.go
Normal file
|
@ -0,0 +1,736 @@
|
|||
// Copyright 2019, OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.12
|
||||
// source: lib/protoparser/opentelemetry/proto/metrics.proto
|
||||
|
||||
package pb
|
||||
|
||||
// AggregationTemporality defines how a metric aggregator reports aggregated
|
||||
// values. It describes how those values relate to the time interval over
|
||||
// which they are aggregated.
|
||||
type AggregationTemporality int32
|
||||
|
||||
const (
|
||||
// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
|
||||
AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
|
||||
// DELTA is an AggregationTemporality for a metric aggregator which reports
|
||||
// changes since last report time. Successive metrics contain aggregation of
|
||||
// values from continuous and non-overlapping intervals.
|
||||
//
|
||||
// The values for a DELTA metric are based only on the time interval
|
||||
// associated with one measurement cycle. There is no dependency on
|
||||
// previous measurements like is the case for CUMULATIVE metrics.
|
||||
//
|
||||
// For example, consider a system measuring the number of requests that
|
||||
// it receives and reports the sum of these requests every second as a
|
||||
// DELTA metric:
|
||||
//
|
||||
// 1. The system starts receiving at time=t_0.
|
||||
// 2. A request is received, the system measures 1 request.
|
||||
// 3. A request is received, the system measures 1 request.
|
||||
// 4. A request is received, the system measures 1 request.
|
||||
// 5. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_0 to
|
||||
// t_0+1 with a value of 3.
|
||||
// 6. A request is received, the system measures 1 request.
|
||||
// 7. A request is received, the system measures 1 request.
|
||||
// 8. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_0+1 to
|
||||
// t_0+2 with a value of 2.
|
||||
AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
|
||||
// CUMULATIVE is an AggregationTemporality for a metric aggregator which
|
||||
// reports changes since a fixed start time. This means that current values
|
||||
// of a CUMULATIVE metric depend on all previous measurements since the
|
||||
// start time. Because of this, the sender is required to retain this state
|
||||
// in some form. If this state is lost or invalidated, the CUMULATIVE metric
|
||||
// values MUST be reset and a new fixed start time following the last
|
||||
// reported measurement time sent MUST be used.
|
||||
//
|
||||
// For example, consider a system measuring the number of requests that
|
||||
// it receives and reports the sum of these requests every second as a
|
||||
// CUMULATIVE metric:
|
||||
//
|
||||
// 1. The system starts receiving at time=t_0.
|
||||
// 2. A request is received, the system measures 1 request.
|
||||
// 3. A request is received, the system measures 1 request.
|
||||
// 4. A request is received, the system measures 1 request.
|
||||
// 5. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_0 to
|
||||
// t_0+1 with a value of 3.
|
||||
// 6. A request is received, the system measures 1 request.
|
||||
// 7. A request is received, the system measures 1 request.
|
||||
// 8. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_0 to
|
||||
// t_0+2 with a value of 5.
|
||||
// 9. The system experiences a fault and loses state.
|
||||
// 10. The system recovers and resumes receiving at time=t_1.
|
||||
// 11. A request is received, the system measures 1 request.
|
||||
// 12. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_1 to
|
||||
// t_0+1 with a value of 1.
|
||||
//
|
||||
// Note: Even though, when reporting changes since last report time, using
|
||||
// CUMULATIVE is valid, it is not recommended. This may cause problems for
|
||||
// systems that do not use start_time to determine when the aggregation
|
||||
// value was reset (e.g. Prometheus).
|
||||
AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
|
||||
)
|
||||
|
||||
// Enum value maps for AggregationTemporality.
|
||||
var (
|
||||
AggregationTemporality_name = map[int32]string{
|
||||
0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
|
||||
1: "AGGREGATION_TEMPORALITY_DELTA",
|
||||
2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
|
||||
}
|
||||
AggregationTemporality_value = map[string]int32{
|
||||
"AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
|
||||
"AGGREGATION_TEMPORALITY_DELTA": 1,
|
||||
"AGGREGATION_TEMPORALITY_CUMULATIVE": 2,
|
||||
}
|
||||
)
|
||||
|
||||
func (x AggregationTemporality) Enum() *AggregationTemporality {
|
||||
p := new(AggregationTemporality)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
|
||||
// bit-field representing 32 distinct boolean flags. Each flag defined in this
|
||||
// enum is a bit-mask. To test the presence of a single flag in the flags of
|
||||
// a data point, for example, use an expression like:
|
||||
//
|
||||
// (point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE
|
||||
type DataPointFlags int32
|
||||
|
||||
const (
|
||||
DataPointFlags_FLAG_NONE DataPointFlags = 0
|
||||
// This DataPoint is valid but has no recorded value. This value
|
||||
// SHOULD be used to reflect explicitly missing data in a series, as
|
||||
// for an equivalent to the Prometheus "staleness marker".
|
||||
DataPointFlags_FLAG_NO_RECORDED_VALUE DataPointFlags = 1
|
||||
)
|
||||
|
||||
// Enum value maps for DataPointFlags.
|
||||
var (
|
||||
DataPointFlags_name = map[int32]string{
|
||||
0: "FLAG_NONE",
|
||||
1: "FLAG_NO_RECORDED_VALUE",
|
||||
}
|
||||
DataPointFlags_value = map[string]int32{
|
||||
"FLAG_NONE": 0,
|
||||
"FLAG_NO_RECORDED_VALUE": 1,
|
||||
}
|
||||
)
|
||||
|
||||
func (x DataPointFlags) Enum() *DataPointFlags {
|
||||
p := new(DataPointFlags)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
|
||||
// MetricsData represents the metrics data that can be stored in a persistent
|
||||
// storage, OR can be embedded by other protocols that transfer OTLP metrics
|
||||
// data but do not implement the OTLP protocol.
|
||||
//
|
||||
// The main difference between this message and collector protocol is that
|
||||
// in this message there will not be any "control" or "metadata" specific to
|
||||
// OTLP protocol.
|
||||
//
|
||||
// When new fields are added into this message, the OTLP request MUST be updated
|
||||
// as well.
|
||||
type MetricsData struct {
|
||||
unknownFields []byte
|
||||
|
||||
// An array of ResourceMetrics.
|
||||
// For data coming from a single resource this array will typically contain
|
||||
// one element. Intermediary nodes that receive data from multiple origins
|
||||
// typically batch the data before forwarding further and in that case this
|
||||
// array will contain multiple elements.
|
||||
ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
|
||||
}
|
||||
|
||||
// A collection of ScopeMetrics from a Resource.
|
||||
type ResourceMetrics struct {
|
||||
unknownFields []byte
|
||||
|
||||
// The resource for the metrics in this message.
|
||||
// If this field is not set then no resource info is known.
|
||||
Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
|
||||
// A list of metrics that originate from a resource.
|
||||
ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
|
||||
// This schema_url applies to the data in the "resource" field. It does not apply
|
||||
// to the data in the "scope_metrics" field which have their own schema_url field.
|
||||
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
|
||||
}
|
||||
|
||||
// A collection of Metrics produced by an Scope.
|
||||
type ScopeMetrics struct {
|
||||
unknownFields []byte
|
||||
|
||||
// A list of metrics that originate from an instrumentation library.
|
||||
Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
|
||||
// This schema_url applies to all metrics in the "metrics" field.
|
||||
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
|
||||
}
|
||||
|
||||
// Defines a Metric which has one or more timeseries. The following is a
|
||||
// brief summary of the Metric data model. For more details, see:
|
||||
//
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
|
||||
//
|
||||
// The data model and relation between entities is shown in the
|
||||
// diagram below. Here, "DataPoint" is the term used to refer to any
|
||||
// one of the specific data point value types, and "points" is the term used
|
||||
// to refer to any one of the lists of points contained in the Metric.
|
||||
//
|
||||
// - Metric is composed of a metadata and data.
|
||||
//
|
||||
// - Metadata part contains a name, description, unit.
|
||||
//
|
||||
// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
|
||||
//
|
||||
// - DataPoint contains timestamps, attributes, and one of the possible value type
|
||||
// fields.
|
||||
//
|
||||
// Metric
|
||||
// +------------+
|
||||
// |name |
|
||||
// |description |
|
||||
// |unit | +------------------------------------+
|
||||
// |data |---> |Gauge, Sum, Histogram, Summary, ... |
|
||||
// +------------+ +------------------------------------+
|
||||
//
|
||||
// Data [One of Gauge, Sum, Histogram, Summary, ...]
|
||||
// +-----------+
|
||||
// |... | // Metadata about the Data.
|
||||
// |points |--+
|
||||
// +-----------+ |
|
||||
// | +---------------------------+
|
||||
// | |DataPoint 1 |
|
||||
// v |+------+------+ +------+ |
|
||||
// +-----+ ||label |label |...|label | |
|
||||
// | 1 |-->||value1|value2|...|valueN| |
|
||||
// +-----+ |+------+------+ +------+ |
|
||||
// | . | |+-----+ |
|
||||
// | . | ||value| |
|
||||
// | . | |+-----+ |
|
||||
// | . | +---------------------------+
|
||||
// | . | .
|
||||
// | . | .
|
||||
// | . | .
|
||||
// | . | +---------------------------+
|
||||
// | . | |DataPoint M |
|
||||
// +-----+ |+------+------+ +------+ |
|
||||
// | M |-->||label |label |...|label | |
|
||||
// +-----+ ||value1|value2|...|valueN| |
|
||||
// |+------+------+ +------+ |
|
||||
// |+-----+ |
|
||||
// ||value| |
|
||||
// |+-----+ |
|
||||
// +---------------------------+
|
||||
//
|
||||
// Each distinct type of DataPoint represents the output of a specific
|
||||
// aggregation function, the result of applying the DataPoint's
|
||||
// associated function of to one or more measurements.
|
||||
//
|
||||
// All DataPoint types have three common fields:
|
||||
// - Attributes includes key-value pairs associated with the data point
|
||||
// - TimeUnixNano is required, set to the end time of the aggregation
|
||||
// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
|
||||
// having an AggregationTemporality field, as discussed below.
|
||||
//
|
||||
// Both TimeUnixNano and StartTimeUnixNano values are expressed as
|
||||
// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
|
||||
//
|
||||
// # TimeUnixNano
|
||||
//
|
||||
// This field is required, having consistent interpretation across
|
||||
// DataPoint types. TimeUnixNano is the moment corresponding to when
|
||||
// the data point's aggregate value was captured.
|
||||
//
|
||||
// Data points with the 0 value for TimeUnixNano SHOULD be rejected
|
||||
// by consumers.
|
||||
//
|
||||
// # StartTimeUnixNano
|
||||
//
|
||||
// StartTimeUnixNano in general allows detecting when a sequence of
|
||||
// observations is unbroken. This field indicates to consumers the
|
||||
// start time for points with cumulative and delta
|
||||
// AggregationTemporality, and it should be included whenever possible
|
||||
// to support correct rate calculation. Although it may be omitted
|
||||
// when the start time is truly unknown, setting StartTimeUnixNano is
|
||||
// strongly encouraged.
|
||||
type Metric struct {
|
||||
unknownFields []byte
|
||||
|
||||
// name of the metric, including its DNS name prefix. It must be unique.
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
// description of the metric, which can be used in documentation.
|
||||
Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
|
||||
// unit in which the metric value is reported. Follows the format
|
||||
// described by http://unitsofmeasure.org/ucum.html.
|
||||
Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
|
||||
// Data determines the aggregation type (if any) of the metric, what is the
|
||||
// reported value type for the data points, as well as the relatationship to
|
||||
// the time interval over which they are reported.
|
||||
//
|
||||
// Types that are assignable to Data:
|
||||
//
|
||||
// *Metric_Gauge
|
||||
// *Metric_Sum
|
||||
// *Metric_Histogram
|
||||
// *Metric_ExponentialHistogram
|
||||
// *Metric_Summary
|
||||
Data isMetric_Data `protobuf_oneof:"data"`
|
||||
}
|
||||
|
||||
type isMetric_Data interface {
|
||||
isMetric_Data()
|
||||
}
|
||||
|
||||
type Metric_Gauge struct {
|
||||
Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Metric_Sum struct {
|
||||
Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Metric_Histogram struct {
|
||||
Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Metric_ExponentialHistogram struct {
|
||||
ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Metric_Summary struct {
|
||||
Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*Metric_Gauge) isMetric_Data() {}
|
||||
|
||||
func (*Metric_Sum) isMetric_Data() {}
|
||||
|
||||
func (*Metric_Histogram) isMetric_Data() {}
|
||||
|
||||
func (*Metric_ExponentialHistogram) isMetric_Data() {}
|
||||
|
||||
func (*Metric_Summary) isMetric_Data() {}
|
||||
|
||||
// Gauge represents the type of a scalar metric that always exports the
|
||||
// "current value" for every data point. It should be used for an "unknown"
|
||||
// aggregation.
|
||||
//
|
||||
// A Gauge does not support different aggregation temporalities. Given the
|
||||
// aggregation is unknown, points cannot be combined using the same
|
||||
// aggregation, regardless of aggregation temporalities. Therefore,
|
||||
// AggregationTemporality is not included. Consequently, this also means
|
||||
// "StartTimeUnixNano" is ignored for all data points.
|
||||
type Gauge struct {
|
||||
unknownFields []byte
|
||||
|
||||
DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
||||
}
|
||||
|
||||
// Sum represents the type of a scalar metric that is calculated as a sum of all
|
||||
// reported measurements over a time interval.
|
||||
type Sum struct {
|
||||
unknownFields []byte
|
||||
|
||||
DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
||||
// aggregation_temporality describes if the aggregator reports delta changes
|
||||
// since last report time, or cumulative changes since a fixed start time.
|
||||
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
|
||||
// If "true" means that the sum is monotonic.
|
||||
IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
|
||||
}
|
||||
|
||||
// Histogram represents the type of a metric that is calculated by aggregating
|
||||
// as a Histogram of all reported measurements over a time interval.
|
||||
type Histogram struct {
|
||||
unknownFields []byte
|
||||
|
||||
DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
||||
// aggregation_temporality describes if the aggregator reports delta changes
|
||||
// since last report time, or cumulative changes since a fixed start time.
|
||||
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
|
||||
}
|
||||
|
||||
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
|
||||
// as a ExponentialHistogram of all reported double measurements over a time interval.
|
||||
type ExponentialHistogram struct {
|
||||
unknownFields []byte
|
||||
|
||||
DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
||||
// aggregation_temporality describes if the aggregator reports delta changes
|
||||
// since last report time, or cumulative changes since a fixed start time.
|
||||
AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
|
||||
}
|
||||
|
||||
// Summary metric data are used to convey quantile summaries,
|
||||
// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
|
||||
// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
|
||||
// data type. These data points cannot always be merged in a meaningful way.
|
||||
// While they can be useful in some applications, histogram data points are
|
||||
// recommended for new applications.
|
||||
type Summary struct {
|
||||
unknownFields []byte
|
||||
|
||||
DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
|
||||
}
|
||||
|
||||
// NumberDataPoint is a single data point in a timeseries that describes the
|
||||
// time-varying scalar value of a metric.
|
||||
type NumberDataPoint struct {
|
||||
unknownFields []byte
|
||||
|
||||
// The set of key/value pairs that uniquely identify the timeseries from
|
||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
// the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
|
||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
||||
// The value itself. A point is considered invalid when one of the recognized
|
||||
// value fields is not present inside this oneof.
|
||||
//
|
||||
// Types that are assignable to Value:
|
||||
//
|
||||
// *NumberDataPoint_AsDouble
|
||||
// *NumberDataPoint_AsInt
|
||||
Value isNumberDataPoint_Value `protobuf_oneof:"value"`
|
||||
// (Optional) List of exemplars collected from
|
||||
// measurements that were used to form the data point
|
||||
Exemplars []*Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
|
||||
// Flags that apply to this specific data point. See DataPointFlags
|
||||
// for the available flags and their meaning.
|
||||
Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
|
||||
}
|
||||
|
||||
type isNumberDataPoint_Value interface {
|
||||
isNumberDataPoint_Value()
|
||||
}
|
||||
|
||||
type NumberDataPoint_AsDouble struct {
|
||||
AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof"`
|
||||
}
|
||||
|
||||
type NumberDataPoint_AsInt struct {
|
||||
AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {}
|
||||
|
||||
func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {}
|
||||
|
||||
// HistogramDataPoint is a single data point in a timeseries that describes the
|
||||
// time-varying values of a Histogram. A Histogram contains summary statistics
|
||||
// for a population of values, it may optionally contain the distribution of
|
||||
// those values across a set of buckets.
|
||||
//
|
||||
// If the histogram contains the distribution of values, then both
|
||||
// "explicit_bounds" and "bucket counts" fields must be defined.
|
||||
// If the histogram does not contain the distribution of values, then both
|
||||
// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
|
||||
// "sum" are known.
|
||||
type HistogramDataPoint struct {
|
||||
unknownFields []byte
|
||||
|
||||
// The set of key/value pairs that uniquely identify the timeseries from
|
||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
Attributes []*KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
// the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
|
||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
||||
// count is the number of values in the population. Must be non-negative. This
|
||||
// value must be equal to the sum of the "count" fields in buckets if a
|
||||
// histogram is provided.
|
||||
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
|
||||
// sum of the values in the population. If count is zero then this field
|
||||
// must be zero.
|
||||
//
|
||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
||||
// events, and is assumed to be monotonic over the values of these events.
|
||||
// Negative events *can* be recorded, but sum should not be filled out when
|
||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
||||
Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
|
||||
// bucket_counts is an optional field contains the count values of histogram
|
||||
// for each bucket.
|
||||
//
|
||||
// The sum of the bucket_counts must equal the value in the count field.
|
||||
//
|
||||
// The number of elements in bucket_counts array must be by one greater than
|
||||
// the number of elements in explicit_bounds array.
|
||||
BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
|
||||
// explicit_bounds specifies buckets with explicitly defined bounds for values.
|
||||
//
|
||||
// The boundaries for bucket at index i are:
|
||||
//
|
||||
// (-infinity, explicit_bounds[i]] for i == 0
|
||||
// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
|
||||
// (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
|
||||
//
|
||||
// The values in the explicit_bounds array must be strictly increasing.
|
||||
//
|
||||
// Histogram buckets are inclusive of their upper boundary, except the last
|
||||
// bucket where the boundary is at infinity. This format is intentionally
|
||||
// compatible with the OpenMetrics histogram definition.
|
||||
ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
|
||||
// (Optional) List of exemplars collected from
|
||||
// measurements that were used to form the data point
|
||||
Exemplars []*Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
|
||||
// Flags that apply to this specific data point. See DataPointFlags
|
||||
// for the available flags and their meaning.
|
||||
Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
|
||||
// min is the minimum value over (start_time, end_time].
|
||||
Min *float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"`
|
||||
// max is the maximum value over (start_time, end_time].
|
||||
Max *float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"`
|
||||
}
|
||||
|
||||
// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
|
||||
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
|
||||
// summary statistics for a population of values, it may optionally contain the
|
||||
// distribution of those values across a set of buckets.
|
||||
type ExponentialHistogramDataPoint struct {
|
||||
unknownFields []byte
|
||||
|
||||
// The set of key/value pairs that uniquely identify the timeseries from
|
||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
Attributes []*KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
// the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
|
||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
||||
// count is the number of values in the population. Must be
|
||||
// non-negative. This value must be equal to the sum of the "bucket_counts"
|
||||
// values in the positive and negative Buckets plus the "zero_count" field.
|
||||
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
|
||||
// sum of the values in the population. If count is zero then this field
|
||||
// must be zero.
|
||||
//
|
||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
||||
// events, and is assumed to be monotonic over the values of these events.
|
||||
// Negative events *can* be recorded, but sum should not be filled out when
|
||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
||||
Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
|
||||
// scale describes the resolution of the histogram. Boundaries are
|
||||
// located at powers of the base, where:
|
||||
//
|
||||
// base = (2^(2^-scale))
|
||||
//
|
||||
// The histogram bucket identified by `index`, a signed integer,
|
||||
// contains values that are greater than (base^index) and
|
||||
// less than or equal to (base^(index+1)).
|
||||
//
|
||||
// The positive and negative ranges of the histogram are expressed
|
||||
// separately. Negative values are mapped by their absolute value
|
||||
// into the negative range using the same scale as the positive range.
|
||||
//
|
||||
// scale is not restricted by the protocol, as the permissible
|
||||
// values depend on the range of the data.
|
||||
Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
|
||||
// zero_count is the count of values that are either exactly zero or
|
||||
// within the region considered zero by the instrumentation at the
|
||||
// tolerated degree of precision. This bucket stores values that
|
||||
// cannot be expressed using the standard exponential formula as
|
||||
// well as values that have been rounded to zero.
|
||||
//
|
||||
// Implementations MAY consider the zero bucket to have probability
|
||||
// mass equal to (zero_count / count).
|
||||
ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
|
||||
// positive carries the positive range of exponential bucket counts.
|
||||
Positive *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive,omitempty"`
|
||||
// negative carries the negative range of exponential bucket counts.
|
||||
Negative *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative,omitempty"`
|
||||
// Flags that apply to this specific data point. See DataPointFlags
|
||||
// for the available flags and their meaning.
|
||||
Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
|
||||
// (Optional) List of exemplars collected from
|
||||
// measurements that were used to form the data point
|
||||
Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
|
||||
// min is the minimum value over (start_time, end_time].
|
||||
Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
|
||||
// max is the maximum value over (start_time, end_time].
|
||||
Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
|
||||
}
|
||||
|
||||
// SummaryDataPoint is a single data point in a timeseries that describes the
|
||||
// time-varying values of a Summary metric.
|
||||
type SummaryDataPoint struct {
|
||||
unknownFields []byte
|
||||
|
||||
// The set of key/value pairs that uniquely identify the timeseries from
|
||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
// the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
|
||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
||||
// count is the number of values in the population. Must be non-negative.
|
||||
Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
|
||||
// sum of the values in the population. If count is zero then this field
|
||||
// must be zero.
|
||||
//
|
||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
||||
// events, and is assumed to be monotonic over the values of these events.
|
||||
// Negative events *can* be recorded, but sum should not be filled out when
|
||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
|
||||
Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
|
||||
// (Optional) list of values at different quantiles of the distribution calculated
|
||||
// from the current snapshot. The quantiles must be strictly increasing.
|
||||
QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"`
|
||||
// Flags that apply to this specific data point. See DataPointFlags
|
||||
// for the available flags and their meaning.
|
||||
Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
|
||||
}
|
||||
|
||||
// A representation of an exemplar, which is a sample input measurement.
|
||||
// Exemplars also hold information about the environment when the measurement
|
||||
// was recorded, for example the span and trace ID of the active span when the
|
||||
// exemplar was recorded.
|
||||
type Exemplar struct {
|
||||
unknownFields []byte
|
||||
|
||||
// The set of key/value pairs that were filtered out by the aggregator, but
|
||||
// recorded alongside the original measurement. Only key/value pairs that were
|
||||
// filtered out by the aggregator should be included
|
||||
FilteredAttributes []*KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes,omitempty"`
|
||||
// time_unix_nano is the exact time when this exemplar was recorded
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
|
||||
// The value of the measurement that was recorded. An exemplar is
|
||||
// considered invalid when one of the recognized value fields is not present
|
||||
// inside this oneof.
|
||||
//
|
||||
// Types that are assignable to Value:
|
||||
//
|
||||
// *Exemplar_AsDouble
|
||||
// *Exemplar_AsInt
|
||||
Value isExemplar_Value `protobuf_oneof:"value"`
|
||||
// (Optional) Span ID of the exemplar trace.
|
||||
// span_id may be missing if the measurement is not recorded inside a trace
|
||||
// or if the trace is not sampled.
|
||||
SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
|
||||
// (Optional) Trace ID of the exemplar trace.
|
||||
// trace_id may be missing if the measurement is not recorded inside a trace
|
||||
// or if the trace is not sampled.
|
||||
TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
|
||||
}
|
||||
|
||||
type isExemplar_Value interface {
|
||||
isExemplar_Value()
|
||||
}
|
||||
|
||||
type Exemplar_AsDouble struct {
|
||||
AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Exemplar_AsInt struct {
|
||||
AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*Exemplar_AsDouble) isExemplar_Value() {}
|
||||
|
||||
func (*Exemplar_AsInt) isExemplar_Value() {}
|
||||
|
||||
// Buckets are a set of bucket counts, encoded in a contiguous array
|
||||
// of counts.
|
||||
type ExponentialHistogramDataPoint_Buckets struct {
|
||||
unknownFields []byte
|
||||
|
||||
// Offset is the bucket index of the first entry in the bucket_counts array.
|
||||
//
|
||||
// Note: This uses a varint encoding as a simple form of compression.
|
||||
Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
|
||||
// Count is an array of counts, where count[i] carries the count
|
||||
// of the bucket at index (offset+i). count[i] is the count of
|
||||
// values greater than base^(offset+i) and less or equal to than
|
||||
// base^(offset+i+1).
|
||||
//
|
||||
// Note: By contrast, the explicit HistogramDataPoint uses
|
||||
// fixed64. This field is expected to have many buckets,
|
||||
// especially zeros, so uint64 has been selected to ensure
|
||||
// varint encoding.
|
||||
BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
|
||||
}
|
||||
|
||||
// Represents the value at a given quantile of a distribution.
|
||||
//
|
||||
// To record Min and Max values following conventions are used:
|
||||
// - The 1.0 quantile is equivalent to the maximum value observed.
|
||||
// - The 0.0 quantile is equivalent to the minimum value observed.
|
||||
//
|
||||
// See the following issue for more context:
|
||||
// https://github.com/open-telemetry/opentelemetry-proto/issues/125
|
||||
type SummaryDataPoint_ValueAtQuantile struct {
|
||||
unknownFields []byte
|
||||
|
||||
// The quantile of a distribution. Must be in the interval
|
||||
// [0.0, 1.0].
|
||||
Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
|
||||
// The value at the given quantile of a distribution.
|
||||
//
|
||||
// Quantile values must NOT be negative.
|
||||
Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
32
lib/protoparser/opentelemetry/pb/metrics_service.pb.go
Normal file
32
lib/protoparser/opentelemetry/pb/metrics_service.pb.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2019, OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.12
|
||||
// source: lib/protoparser/opentelemetry/proto/metrics_service.proto
|
||||
|
||||
package pb
|
||||
|
||||
type ExportMetricsServiceRequest struct {
|
||||
unknownFields []byte
|
||||
|
||||
// An array of ResourceMetrics.
|
||||
// For data coming from a single resource this array will typically contain one
|
||||
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
|
||||
// data from multiple origins typically batch the data before forwarding further and
|
||||
// in that case this array will contain multiple elements.
|
||||
ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
|
||||
}
|
157
lib/protoparser/opentelemetry/pb/metrics_service_vtproto.pb.go
Normal file
157
lib/protoparser/opentelemetry/pb/metrics_service_vtproto.pb.go
Normal file
|
@ -0,0 +1,157 @@
|
|||
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
|
||||
// protoc-gen-go-vtproto version: v0.4.0
|
||||
// source: lib/protoparser/opentelemetry/proto/metrics_service.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
io "io"
|
||||
)
|
||||
|
||||
func (m *ExportMetricsServiceRequest) MarshalVT() (dAtA []byte, err error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
size := m.SizeVT()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBufferVT(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceRequest) MarshalToVT(dAtA []byte) (int, error) {
|
||||
size := m.SizeVT()
|
||||
return m.MarshalToSizedBufferVT(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
|
||||
if m == nil {
|
||||
return 0, nil
|
||||
}
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.unknownFields != nil {
|
||||
i -= len(m.unknownFields)
|
||||
copy(dAtA[i:], m.unknownFields)
|
||||
}
|
||||
if len(m.ResourceMetrics) > 0 {
|
||||
for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
|
||||
size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarint(dAtA, i, uint64(size))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceRequest) SizeVT() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.ResourceMetrics) > 0 {
|
||||
for _, e := range m.ResourceMetrics {
|
||||
l = e.SizeVT()
|
||||
n += 1 + l + sov(uint64(l))
|
||||
}
|
||||
}
|
||||
n += len(m.unknownFields)
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ExportMetricsServiceRequest) UnmarshalVT(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflow
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflow
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLength
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLength
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{})
|
||||
if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skip(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLength
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
4331
lib/protoparser/opentelemetry/pb/metrics_vtproto.pb.go
Normal file
4331
lib/protoparser/opentelemetry/pb/metrics_vtproto.pb.go
Normal file
File diff suppressed because it is too large
Load diff
48
lib/protoparser/opentelemetry/pb/resource.pb.go
Normal file
48
lib/protoparser/opentelemetry/pb/resource.pb.go
Normal file
|
@ -0,0 +1,48 @@
|
|||
// Copyright 2019, OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.12
|
||||
// source: lib/protoparser/opentelemetry/proto/resource.proto
|
||||
|
||||
package pb
|
||||
|
||||
// Resource information.
|
||||
type Resource struct {
|
||||
unknownFields []byte
|
||||
|
||||
// Set of attributes that describe the resource.
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
Attributes []*KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
|
||||
// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
|
||||
// no attributes were dropped.
|
||||
DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Resource) GetAttributes() []*KeyValue {
|
||||
if x != nil {
|
||||
return x.Attributes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Resource) GetDroppedAttributesCount() uint32 {
|
||||
if x != nil {
|
||||
return x.DroppedAttributesCount
|
||||
}
|
||||
return 0
|
||||
}
|
184
lib/protoparser/opentelemetry/pb/resource_vtproto.pb.go
Normal file
184
lib/protoparser/opentelemetry/pb/resource_vtproto.pb.go
Normal file
|
@ -0,0 +1,184 @@
|
|||
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
|
||||
// protoc-gen-go-vtproto version: v0.4.0
|
||||
// source: lib/protoparser/opentelemetry/proto/resource.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
io "io"
|
||||
)
|
||||
|
||||
func (m *Resource) MarshalVT() (dAtA []byte, err error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
size := m.SizeVT()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBufferVT(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Resource) MarshalToVT(dAtA []byte) (int, error) {
|
||||
size := m.SizeVT()
|
||||
return m.MarshalToSizedBufferVT(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Resource) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
|
||||
if m == nil {
|
||||
return 0, nil
|
||||
}
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.unknownFields != nil {
|
||||
i -= len(m.unknownFields)
|
||||
copy(dAtA[i:], m.unknownFields)
|
||||
}
|
||||
if m.DroppedAttributesCount != 0 {
|
||||
i = encodeVarint(dAtA, i, uint64(m.DroppedAttributesCount))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if len(m.Attributes) > 0 {
|
||||
for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
|
||||
size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarint(dAtA, i, uint64(size))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *Resource) SizeVT() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Attributes) > 0 {
|
||||
for _, e := range m.Attributes {
|
||||
l = e.SizeVT()
|
||||
n += 1 + l + sov(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.DroppedAttributesCount != 0 {
|
||||
n += 1 + sov(uint64(m.DroppedAttributesCount))
|
||||
}
|
||||
n += len(m.unknownFields)
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *Resource) UnmarshalVT(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflow
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Resource: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflow
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLength
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLength
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Attributes = append(m.Attributes, &KeyValue{})
|
||||
if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
|
||||
}
|
||||
m.DroppedAttributesCount = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflow
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.DroppedAttributesCount |= uint32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skip(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLength
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
32
lib/protoparser/opentelemetry/proto/README.md
Normal file
32
lib/protoparser/opentelemetry/proto/README.md
Normal file
|
@ -0,0 +1,32 @@
|
|||
# Opentelemetry proto files
|
||||
|
||||
Content copied from https://github.com/open-telemetry/opentelemetry-proto/tree/main/opentelemetry/proto
|
||||
|
||||
## Requirements
|
||||
- protoc binary [link](http://google.github.io/proto-lens/installing-protoc.html)
|
||||
- golang-proto-gen[link](https://developers.google.com/protocol-buffers/docs/reference/go-generated)
|
||||
- custom marshaller [link](https://github.com/planetscale/vtprotobuf)
|
||||
|
||||
## Modifications
|
||||
|
||||
Original proto files were modified:
|
||||
1) changed package name for `package opentelemetry`.
|
||||
2) changed import paths - changed directory names.
|
||||
3) changed go_package for `opentelemetry/pb`.
|
||||
|
||||
|
||||
## How to generate pbs
|
||||
|
||||
run command:
|
||||
```bash
|
||||
export GOBIN=~/go/bin protoc
|
||||
protoc -I=. --go_out=./lib/protoparser/opentelemetry --go-vtproto_out=./lib/protoparser/opentelemetry --plugin protoc-gen-go-vtproto="$GOBIN/protoc-gen-go-vtproto" --go-vtproto_opt=features=marshal+unmarshal+size lib/protoparser/opentelemetry/proto/*.proto
|
||||
```
|
||||
|
||||
Generated code will be at `lib/protoparser/opentelemetry/opentelemetry/`
|
||||
|
||||
manually edit it:
|
||||
|
||||
1) remove all external imports
|
||||
2) remove all unneeded methods
|
||||
3) replace `unknownFields` with `unknownFields []byte`
|
67
lib/protoparser/opentelemetry/proto/common.proto
Normal file
67
lib/protoparser/opentelemetry/proto/common.proto
Normal file
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2019, OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package opentelemetry;
|
||||
|
||||
option csharp_namespace = "OpenTelemetry.Proto.Common.V1";
|
||||
option java_multiple_files = true;
|
||||
option java_package = "io.opentelemetry.proto.common.v1";
|
||||
option java_outer_classname = "CommonProto";
|
||||
option go_package = "opentelemetry/pb";
|
||||
|
||||
// AnyValue is used to represent any type of attribute value. AnyValue may contain a
|
||||
// primitive value such as a string or integer or it may contain an arbitrary nested
|
||||
// object containing arrays, key-value lists and primitives.
|
||||
message AnyValue {
|
||||
// The value is one of the listed fields. It is valid for all values to be unspecified
|
||||
// in which case this AnyValue is considered to be "empty".
|
||||
oneof value {
|
||||
string string_value = 1;
|
||||
bool bool_value = 2;
|
||||
int64 int_value = 3;
|
||||
double double_value = 4;
|
||||
ArrayValue array_value = 5;
|
||||
KeyValueList kvlist_value = 6;
|
||||
bytes bytes_value = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
|
||||
// since oneof in AnyValue does not allow repeated fields.
|
||||
message ArrayValue {
|
||||
// Array of values. The array may be empty (contain 0 elements).
|
||||
repeated AnyValue values = 1;
|
||||
}
|
||||
|
||||
// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
|
||||
// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
|
||||
// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
|
||||
// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
|
||||
// are semantically equivalent.
|
||||
message KeyValueList {
|
||||
// A collection of key/value pairs of key-value pairs. The list may be empty (may
|
||||
// contain 0 elements).
|
||||
// The keys MUST be unique (it is not allowed to have more than one
|
||||
// value with the same key).
|
||||
repeated KeyValue values = 1;
|
||||
}
|
||||
|
||||
// KeyValue is a key-value pair that is used to store Span attributes, Link
|
||||
// attributes, etc.
|
||||
message KeyValue {
|
||||
string key = 1;
|
||||
AnyValue value = 2;
|
||||
}
|
661
lib/protoparser/opentelemetry/proto/metrics.proto
Normal file
661
lib/protoparser/opentelemetry/proto/metrics.proto
Normal file
|
@ -0,0 +1,661 @@
|
|||
// Copyright 2019, OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package opentelemetry;
|
||||
|
||||
import "lib/protoparser/opentelemetry/proto/common.proto";
|
||||
import "lib/protoparser/opentelemetry/proto/resource.proto";
|
||||
|
||||
option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1";
|
||||
option java_multiple_files = true;
|
||||
option java_package = "io.opentelemetry.proto.metrics.v1";
|
||||
option java_outer_classname = "MetricsProto";
|
||||
option go_package = "opentelemetry/pb";
|
||||
|
||||
// MetricsData represents the metrics data that can be stored in a persistent
|
||||
// storage, OR can be embedded by other protocols that transfer OTLP metrics
|
||||
// data but do not implement the OTLP protocol.
|
||||
//
|
||||
// The main difference between this message and collector protocol is that
|
||||
// in this message there will not be any "control" or "metadata" specific to
|
||||
// OTLP protocol.
|
||||
//
|
||||
// When new fields are added into this message, the OTLP request MUST be updated
|
||||
// as well.
|
||||
message MetricsData {
|
||||
// An array of ResourceMetrics.
|
||||
// For data coming from a single resource this array will typically contain
|
||||
// one element. Intermediary nodes that receive data from multiple origins
|
||||
// typically batch the data before forwarding further and in that case this
|
||||
// array will contain multiple elements.
|
||||
repeated ResourceMetrics resource_metrics = 1;
|
||||
}
|
||||
|
||||
// A collection of ScopeMetrics from a Resource.
|
||||
message ResourceMetrics {
|
||||
reserved 1000;
|
||||
|
||||
// The resource for the metrics in this message.
|
||||
// If this field is not set then no resource info is known.
|
||||
Resource resource = 1;
|
||||
|
||||
// A list of metrics that originate from a resource.
|
||||
repeated ScopeMetrics scope_metrics = 2;
|
||||
|
||||
// This schema_url applies to the data in the "resource" field. It does not apply
|
||||
// to the data in the "scope_metrics" field which have their own schema_url field.
|
||||
string schema_url = 3;
|
||||
}
|
||||
|
||||
// A collection of Metrics produced by an Scope.
|
||||
message ScopeMetrics {
|
||||
// A list of metrics that originate from an instrumentation library.
|
||||
repeated Metric metrics = 2;
|
||||
|
||||
// This schema_url applies to all metrics in the "metrics" field.
|
||||
string schema_url = 3;
|
||||
}
|
||||
|
||||
// Defines a Metric which has one or more timeseries. The following is a
|
||||
// brief summary of the Metric data model. For more details, see:
|
||||
//
|
||||
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
|
||||
//
|
||||
//
|
||||
// The data model and relation between entities is shown in the
|
||||
// diagram below. Here, "DataPoint" is the term used to refer to any
|
||||
// one of the specific data point value types, and "points" is the term used
|
||||
// to refer to any one of the lists of points contained in the Metric.
|
||||
//
|
||||
// - Metric is composed of a metadata and data.
|
||||
// - Metadata part contains a name, description, unit.
|
||||
// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
|
||||
// - DataPoint contains timestamps, attributes, and one of the possible value type
|
||||
// fields.
|
||||
//
|
||||
// Metric
|
||||
// +------------+
|
||||
// |name |
|
||||
// |description |
|
||||
// |unit | +------------------------------------+
|
||||
// |data |---> |Gauge, Sum, Histogram, Summary, ... |
|
||||
// +------------+ +------------------------------------+
|
||||
//
|
||||
// Data [One of Gauge, Sum, Histogram, Summary, ...]
|
||||
// +-----------+
|
||||
// |... | // Metadata about the Data.
|
||||
// |points |--+
|
||||
// +-----------+ |
|
||||
// | +---------------------------+
|
||||
// | |DataPoint 1 |
|
||||
// v |+------+------+ +------+ |
|
||||
// +-----+ ||label |label |...|label | |
|
||||
// | 1 |-->||value1|value2|...|valueN| |
|
||||
// +-----+ |+------+------+ +------+ |
|
||||
// | . | |+-----+ |
|
||||
// | . | ||value| |
|
||||
// | . | |+-----+ |
|
||||
// | . | +---------------------------+
|
||||
// | . | .
|
||||
// | . | .
|
||||
// | . | .
|
||||
// | . | +---------------------------+
|
||||
// | . | |DataPoint M |
|
||||
// +-----+ |+------+------+ +------+ |
|
||||
// | M |-->||label |label |...|label | |
|
||||
// +-----+ ||value1|value2|...|valueN| |
|
||||
// |+------+------+ +------+ |
|
||||
// |+-----+ |
|
||||
// ||value| |
|
||||
// |+-----+ |
|
||||
// +---------------------------+
|
||||
//
|
||||
// Each distinct type of DataPoint represents the output of a specific
|
||||
// aggregation function, the result of applying the DataPoint's
|
||||
// associated function of to one or more measurements.
|
||||
//
|
||||
// All DataPoint types have three common fields:
|
||||
// - Attributes includes key-value pairs associated with the data point
|
||||
// - TimeUnixNano is required, set to the end time of the aggregation
|
||||
// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
|
||||
// having an AggregationTemporality field, as discussed below.
|
||||
//
|
||||
// Both TimeUnixNano and StartTimeUnixNano values are expressed as
|
||||
// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
|
||||
//
|
||||
// # TimeUnixNano
|
||||
//
|
||||
// This field is required, having consistent interpretation across
|
||||
// DataPoint types. TimeUnixNano is the moment corresponding to when
|
||||
// the data point's aggregate value was captured.
|
||||
//
|
||||
// Data points with the 0 value for TimeUnixNano SHOULD be rejected
|
||||
// by consumers.
|
||||
//
|
||||
// # StartTimeUnixNano
|
||||
//
|
||||
// StartTimeUnixNano in general allows detecting when a sequence of
|
||||
// observations is unbroken. This field indicates to consumers the
|
||||
// start time for points with cumulative and delta
|
||||
// AggregationTemporality, and it should be included whenever possible
|
||||
// to support correct rate calculation. Although it may be omitted
|
||||
// when the start time is truly unknown, setting StartTimeUnixNano is
|
||||
// strongly encouraged.
|
||||
message Metric {
|
||||
reserved 4, 6, 8;
|
||||
|
||||
// name of the metric, including its DNS name prefix. It must be unique.
|
||||
string name = 1;
|
||||
|
||||
// description of the metric, which can be used in documentation.
|
||||
string description = 2;
|
||||
|
||||
// unit in which the metric value is reported. Follows the format
|
||||
// described by http://unitsofmeasure.org/ucum.html.
|
||||
string unit = 3;
|
||||
|
||||
// Data determines the aggregation type (if any) of the metric, what is the
|
||||
// reported value type for the data points, as well as the relatationship to
|
||||
// the time interval over which they are reported.
|
||||
oneof data {
|
||||
Gauge gauge = 5;
|
||||
Sum sum = 7;
|
||||
Histogram histogram = 9;
|
||||
ExponentialHistogram exponential_histogram = 10;
|
||||
Summary summary = 11;
|
||||
}
|
||||
}
|
||||
|
||||
// Gauge represents the type of a scalar metric that always exports the
|
||||
// "current value" for every data point. It should be used for an "unknown"
|
||||
// aggregation.
|
||||
//
|
||||
// A Gauge does not support different aggregation temporalities. Given the
|
||||
// aggregation is unknown, points cannot be combined using the same
|
||||
// aggregation, regardless of aggregation temporalities. Therefore,
|
||||
// AggregationTemporality is not included. Consequently, this also means
|
||||
// "StartTimeUnixNano" is ignored for all data points.
|
||||
message Gauge {
|
||||
repeated NumberDataPoint data_points = 1;
|
||||
}
|
||||
|
||||
// Sum represents the type of a scalar metric that is calculated as a sum of all
|
||||
// reported measurements over a time interval.
|
||||
message Sum {
|
||||
repeated NumberDataPoint data_points = 1;
|
||||
|
||||
// aggregation_temporality describes if the aggregator reports delta changes
|
||||
// since last report time, or cumulative changes since a fixed start time.
|
||||
AggregationTemporality aggregation_temporality = 2;
|
||||
|
||||
// If "true" means that the sum is monotonic.
|
||||
bool is_monotonic = 3;
|
||||
}
|
||||
|
||||
// Histogram represents the type of a metric that is calculated by aggregating
|
||||
// as a Histogram of all reported measurements over a time interval.
|
||||
message Histogram {
|
||||
repeated HistogramDataPoint data_points = 1;
|
||||
|
||||
// aggregation_temporality describes if the aggregator reports delta changes
|
||||
// since last report time, or cumulative changes since a fixed start time.
|
||||
AggregationTemporality aggregation_temporality = 2;
|
||||
}
|
||||
|
||||
// ExponentialHistogram represents the type of a metric that is calculated by aggregating
|
||||
// as a ExponentialHistogram of all reported double measurements over a time interval.
|
||||
message ExponentialHistogram {
|
||||
repeated ExponentialHistogramDataPoint data_points = 1;
|
||||
|
||||
// aggregation_temporality describes if the aggregator reports delta changes
|
||||
// since last report time, or cumulative changes since a fixed start time.
|
||||
AggregationTemporality aggregation_temporality = 2;
|
||||
}
|
||||
|
||||
// Summary metric data are used to convey quantile summaries,
|
||||
// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
|
||||
// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
|
||||
// data type. These data points cannot always be merged in a meaningful way.
|
||||
// While they can be useful in some applications, histogram data points are
|
||||
// recommended for new applications.
|
||||
message Summary {
|
||||
repeated SummaryDataPoint data_points = 1;
|
||||
}
|
||||
|
||||
// AggregationTemporality defines how a metric aggregator reports aggregated
|
||||
// values. It describes how those values relate to the time interval over
|
||||
// which they are aggregated.
|
||||
enum AggregationTemporality {
|
||||
// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
|
||||
AGGREGATION_TEMPORALITY_UNSPECIFIED = 0;
|
||||
|
||||
// DELTA is an AggregationTemporality for a metric aggregator which reports
|
||||
// changes since last report time. Successive metrics contain aggregation of
|
||||
// values from continuous and non-overlapping intervals.
|
||||
//
|
||||
// The values for a DELTA metric are based only on the time interval
|
||||
// associated with one measurement cycle. There is no dependency on
|
||||
// previous measurements like is the case for CUMULATIVE metrics.
|
||||
//
|
||||
// For example, consider a system measuring the number of requests that
|
||||
// it receives and reports the sum of these requests every second as a
|
||||
// DELTA metric:
|
||||
//
|
||||
// 1. The system starts receiving at time=t_0.
|
||||
// 2. A request is received, the system measures 1 request.
|
||||
// 3. A request is received, the system measures 1 request.
|
||||
// 4. A request is received, the system measures 1 request.
|
||||
// 5. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_0 to
|
||||
// t_0+1 with a value of 3.
|
||||
// 6. A request is received, the system measures 1 request.
|
||||
// 7. A request is received, the system measures 1 request.
|
||||
// 8. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_0+1 to
|
||||
// t_0+2 with a value of 2.
|
||||
AGGREGATION_TEMPORALITY_DELTA = 1;
|
||||
|
||||
// CUMULATIVE is an AggregationTemporality for a metric aggregator which
|
||||
// reports changes since a fixed start time. This means that current values
|
||||
// of a CUMULATIVE metric depend on all previous measurements since the
|
||||
// start time. Because of this, the sender is required to retain this state
|
||||
// in some form. If this state is lost or invalidated, the CUMULATIVE metric
|
||||
// values MUST be reset and a new fixed start time following the last
|
||||
// reported measurement time sent MUST be used.
|
||||
//
|
||||
// For example, consider a system measuring the number of requests that
|
||||
// it receives and reports the sum of these requests every second as a
|
||||
// CUMULATIVE metric:
|
||||
//
|
||||
// 1. The system starts receiving at time=t_0.
|
||||
// 2. A request is received, the system measures 1 request.
|
||||
// 3. A request is received, the system measures 1 request.
|
||||
// 4. A request is received, the system measures 1 request.
|
||||
// 5. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_0 to
|
||||
// t_0+1 with a value of 3.
|
||||
// 6. A request is received, the system measures 1 request.
|
||||
// 7. A request is received, the system measures 1 request.
|
||||
// 8. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_0 to
|
||||
// t_0+2 with a value of 5.
|
||||
// 9. The system experiences a fault and loses state.
|
||||
// 10. The system recovers and resumes receiving at time=t_1.
|
||||
// 11. A request is received, the system measures 1 request.
|
||||
// 12. The 1 second collection cycle ends. A metric is exported for the
|
||||
// number of requests received over the interval of time t_1 to
|
||||
// t_0+1 with a value of 1.
|
||||
//
|
||||
// Note: Even though, when reporting changes since last report time, using
|
||||
// CUMULATIVE is valid, it is not recommended. This may cause problems for
|
||||
// systems that do not use start_time to determine when the aggregation
|
||||
// value was reset (e.g. Prometheus).
|
||||
AGGREGATION_TEMPORALITY_CUMULATIVE = 2;
|
||||
}
|
||||
|
||||
// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
|
||||
// bit-field representing 32 distinct boolean flags. Each flag defined in this
|
||||
// enum is a bit-mask. To test the presence of a single flag in the flags of
|
||||
// a data point, for example, use an expression like:
|
||||
//
|
||||
// (point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE
|
||||
//
|
||||
enum DataPointFlags {
|
||||
FLAG_NONE = 0;
|
||||
|
||||
// This DataPoint is valid but has no recorded value. This value
|
||||
// SHOULD be used to reflect explicitly missing data in a series, as
|
||||
// for an equivalent to the Prometheus "staleness marker".
|
||||
FLAG_NO_RECORDED_VALUE = 1;
|
||||
|
||||
// Bits 2-31 are reserved for future use.
|
||||
}
|
||||
|
||||
// NumberDataPoint is a single data point in a timeseries that describes the
|
||||
// time-varying scalar value of a metric.
|
||||
message NumberDataPoint {
|
||||
reserved 1;
|
||||
|
||||
// The set of key/value pairs that uniquely identify the timeseries from
|
||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
repeated KeyValue attributes = 7;
|
||||
|
||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
// the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
fixed64 start_time_unix_nano = 2;
|
||||
|
||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
fixed64 time_unix_nano = 3;
|
||||
|
||||
// The value itself. A point is considered invalid when one of the recognized
|
||||
// value fields is not present inside this oneof.
|
||||
oneof value {
|
||||
double as_double = 4;
|
||||
sfixed64 as_int = 6;
|
||||
}
|
||||
|
||||
// (Optional) List of exemplars collected from
|
||||
// measurements that were used to form the data point
|
||||
repeated Exemplar exemplars = 5;
|
||||
|
||||
// Flags that apply to this specific data point. See DataPointFlags
|
||||
// for the available flags and their meaning.
|
||||
uint32 flags = 8;
|
||||
}
|
||||
|
||||
// HistogramDataPoint is a single data point in a timeseries that describes the
|
||||
// time-varying values of a Histogram. A Histogram contains summary statistics
|
||||
// for a population of values, it may optionally contain the distribution of
|
||||
// those values across a set of buckets.
|
||||
//
|
||||
// If the histogram contains the distribution of values, then both
|
||||
// "explicit_bounds" and "bucket counts" fields must be defined.
|
||||
// If the histogram does not contain the distribution of values, then both
|
||||
// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
|
||||
// "sum" are known.
|
||||
message HistogramDataPoint {
|
||||
reserved 1;
|
||||
|
||||
// The set of key/value pairs that uniquely identify the timeseries from
|
||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
repeated KeyValue attributes = 9;
|
||||
|
||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
// the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
fixed64 start_time_unix_nano = 2;
|
||||
|
||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
fixed64 time_unix_nano = 3;
|
||||
|
||||
// count is the number of values in the population. Must be non-negative. This
|
||||
// value must be equal to the sum of the "count" fields in buckets if a
|
||||
// histogram is provided.
|
||||
fixed64 count = 4;
|
||||
|
||||
// sum of the values in the population. If count is zero then this field
|
||||
// must be zero.
|
||||
//
|
||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
||||
// events, and is assumed to be monotonic over the values of these events.
|
||||
// Negative events *can* be recorded, but sum should not be filled out when
|
||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
||||
optional double sum = 5;
|
||||
|
||||
// bucket_counts is an optional field contains the count values of histogram
|
||||
// for each bucket.
|
||||
//
|
||||
// The sum of the bucket_counts must equal the value in the count field.
|
||||
//
|
||||
// The number of elements in bucket_counts array must be by one greater than
|
||||
// the number of elements in explicit_bounds array.
|
||||
repeated fixed64 bucket_counts = 6;
|
||||
|
||||
// explicit_bounds specifies buckets with explicitly defined bounds for values.
|
||||
//
|
||||
// The boundaries for bucket at index i are:
|
||||
//
|
||||
// (-infinity, explicit_bounds[i]] for i == 0
|
||||
// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
|
||||
// (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
|
||||
//
|
||||
// The values in the explicit_bounds array must be strictly increasing.
|
||||
//
|
||||
// Histogram buckets are inclusive of their upper boundary, except the last
|
||||
// bucket where the boundary is at infinity. This format is intentionally
|
||||
// compatible with the OpenMetrics histogram definition.
|
||||
repeated double explicit_bounds = 7;
|
||||
|
||||
// (Optional) List of exemplars collected from
|
||||
// measurements that were used to form the data point
|
||||
repeated Exemplar exemplars = 8;
|
||||
|
||||
// Flags that apply to this specific data point. See DataPointFlags
|
||||
// for the available flags and their meaning.
|
||||
uint32 flags = 10;
|
||||
|
||||
// min is the minimum value over (start_time, end_time].
|
||||
optional double min = 11;
|
||||
|
||||
// max is the maximum value over (start_time, end_time].
|
||||
optional double max = 12;
|
||||
}
|
||||
|
||||
// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
|
||||
// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
|
||||
// summary statistics for a population of values, it may optionally contain the
|
||||
// distribution of those values across a set of buckets.
|
||||
//
|
||||
message ExponentialHistogramDataPoint {
|
||||
// The set of key/value pairs that uniquely identify the timeseries from
|
||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
repeated KeyValue attributes = 1;
|
||||
|
||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
// the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
fixed64 start_time_unix_nano = 2;
|
||||
|
||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
fixed64 time_unix_nano = 3;
|
||||
|
||||
// count is the number of values in the population. Must be
|
||||
// non-negative. This value must be equal to the sum of the "bucket_counts"
|
||||
// values in the positive and negative Buckets plus the "zero_count" field.
|
||||
fixed64 count = 4;
|
||||
|
||||
// sum of the values in the population. If count is zero then this field
|
||||
// must be zero.
|
||||
//
|
||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
||||
// events, and is assumed to be monotonic over the values of these events.
|
||||
// Negative events *can* be recorded, but sum should not be filled out when
|
||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
||||
optional double sum = 5;
|
||||
|
||||
// scale describes the resolution of the histogram. Boundaries are
|
||||
// located at powers of the base, where:
|
||||
//
|
||||
// base = (2^(2^-scale))
|
||||
//
|
||||
// The histogram bucket identified by `index`, a signed integer,
|
||||
// contains values that are greater than (base^index) and
|
||||
// less than or equal to (base^(index+1)).
|
||||
//
|
||||
// The positive and negative ranges of the histogram are expressed
|
||||
// separately. Negative values are mapped by their absolute value
|
||||
// into the negative range using the same scale as the positive range.
|
||||
//
|
||||
// scale is not restricted by the protocol, as the permissible
|
||||
// values depend on the range of the data.
|
||||
sint32 scale = 6;
|
||||
|
||||
// zero_count is the count of values that are either exactly zero or
|
||||
// within the region considered zero by the instrumentation at the
|
||||
// tolerated degree of precision. This bucket stores values that
|
||||
// cannot be expressed using the standard exponential formula as
|
||||
// well as values that have been rounded to zero.
|
||||
//
|
||||
// Implementations MAY consider the zero bucket to have probability
|
||||
// mass equal to (zero_count / count).
|
||||
fixed64 zero_count = 7;
|
||||
|
||||
// positive carries the positive range of exponential bucket counts.
|
||||
Buckets positive = 8;
|
||||
|
||||
// negative carries the negative range of exponential bucket counts.
|
||||
Buckets negative = 9;
|
||||
|
||||
// Buckets are a set of bucket counts, encoded in a contiguous array
|
||||
// of counts.
|
||||
message Buckets {
|
||||
// Offset is the bucket index of the first entry in the bucket_counts array.
|
||||
//
|
||||
// Note: This uses a varint encoding as a simple form of compression.
|
||||
sint32 offset = 1;
|
||||
|
||||
// Count is an array of counts, where count[i] carries the count
|
||||
// of the bucket at index (offset+i). count[i] is the count of
|
||||
// values greater than base^(offset+i) and less or equal to than
|
||||
// base^(offset+i+1).
|
||||
//
|
||||
// Note: By contrast, the explicit HistogramDataPoint uses
|
||||
// fixed64. This field is expected to have many buckets,
|
||||
// especially zeros, so uint64 has been selected to ensure
|
||||
// varint encoding.
|
||||
repeated uint64 bucket_counts = 2;
|
||||
}
|
||||
|
||||
// Flags that apply to this specific data point. See DataPointFlags
|
||||
// for the available flags and their meaning.
|
||||
uint32 flags = 10;
|
||||
|
||||
// (Optional) List of exemplars collected from
|
||||
// measurements that were used to form the data point
|
||||
repeated Exemplar exemplars = 11;
|
||||
|
||||
// min is the minimum value over (start_time, end_time].
|
||||
optional double min = 12;
|
||||
|
||||
// max is the maximum value over (start_time, end_time].
|
||||
optional double max = 13;
|
||||
}
|
||||
|
||||
// SummaryDataPoint is a single data point in a timeseries that describes the
|
||||
// time-varying values of a Summary metric.
|
||||
message SummaryDataPoint {
|
||||
reserved 1;
|
||||
|
||||
// The set of key/value pairs that uniquely identify the timeseries from
|
||||
// where this point belongs. The list may be empty (may contain 0 elements).
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
repeated KeyValue attributes = 7;
|
||||
|
||||
// StartTimeUnixNano is optional but strongly encouraged, see the
|
||||
// the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
fixed64 start_time_unix_nano = 2;
|
||||
|
||||
// TimeUnixNano is required, see the detailed comments above Metric.
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
fixed64 time_unix_nano = 3;
|
||||
|
||||
// count is the number of values in the population. Must be non-negative.
|
||||
fixed64 count = 4;
|
||||
|
||||
// sum of the values in the population. If count is zero then this field
|
||||
// must be zero.
|
||||
//
|
||||
// Note: Sum should only be filled out when measuring non-negative discrete
|
||||
// events, and is assumed to be monotonic over the values of these events.
|
||||
// Negative events *can* be recorded, but sum should not be filled out when
|
||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
|
||||
double sum = 5;
|
||||
|
||||
// Represents the value at a given quantile of a distribution.
|
||||
//
|
||||
// To record Min and Max values following conventions are used:
|
||||
// - The 1.0 quantile is equivalent to the maximum value observed.
|
||||
// - The 0.0 quantile is equivalent to the minimum value observed.
|
||||
//
|
||||
// See the following issue for more context:
|
||||
// https://github.com/open-telemetry/opentelemetry-proto/issues/125
|
||||
message ValueAtQuantile {
|
||||
// The quantile of a distribution. Must be in the interval
|
||||
// [0.0, 1.0].
|
||||
double quantile = 1;
|
||||
|
||||
// The value at the given quantile of a distribution.
|
||||
//
|
||||
// Quantile values must NOT be negative.
|
||||
double value = 2;
|
||||
}
|
||||
|
||||
// (Optional) list of values at different quantiles of the distribution calculated
|
||||
// from the current snapshot. The quantiles must be strictly increasing.
|
||||
repeated ValueAtQuantile quantile_values = 6;
|
||||
|
||||
// Flags that apply to this specific data point. See DataPointFlags
|
||||
// for the available flags and their meaning.
|
||||
uint32 flags = 8;
|
||||
}
|
||||
|
||||
// A representation of an exemplar, which is a sample input measurement.
|
||||
// Exemplars also hold information about the environment when the measurement
|
||||
// was recorded, for example the span and trace ID of the active span when the
|
||||
// exemplar was recorded.
|
||||
message Exemplar {
|
||||
reserved 1;
|
||||
|
||||
// The set of key/value pairs that were filtered out by the aggregator, but
|
||||
// recorded alongside the original measurement. Only key/value pairs that were
|
||||
// filtered out by the aggregator should be included
|
||||
repeated KeyValue filtered_attributes = 7;
|
||||
|
||||
// time_unix_nano is the exact time when this exemplar was recorded
|
||||
//
|
||||
// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
|
||||
// 1970.
|
||||
fixed64 time_unix_nano = 2;
|
||||
|
||||
// The value of the measurement that was recorded. An exemplar is
|
||||
// considered invalid when one of the recognized value fields is not present
|
||||
// inside this oneof.
|
||||
oneof value {
|
||||
double as_double = 3;
|
||||
sfixed64 as_int = 6;
|
||||
}
|
||||
|
||||
// (Optional) Span ID of the exemplar trace.
|
||||
// span_id may be missing if the measurement is not recorded inside a trace
|
||||
// or if the trace is not sampled.
|
||||
bytes span_id = 4;
|
||||
|
||||
// (Optional) Trace ID of the exemplar trace.
|
||||
// trace_id may be missing if the measurement is not recorded inside a trace
|
||||
// or if the trace is not sampled.
|
||||
bytes trace_id = 5;
|
||||
}
|
30
lib/protoparser/opentelemetry/proto/metrics_service.proto
Normal file
30
lib/protoparser/opentelemetry/proto/metrics_service.proto
Normal file
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2019, OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package opentelemetry;
|
||||
|
||||
import "lib/protoparser/opentelemetry/proto/metrics.proto";
|
||||
|
||||
option go_package = "opentelemetry/pb";
|
||||
|
||||
message ExportMetricsServiceRequest {
|
||||
// An array of ResourceMetrics.
|
||||
// For data coming from a single resource this array will typically contain one
|
||||
// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
|
||||
// data from multiple origins typically batch the data before forwarding further and
|
||||
// in that case this array will contain multiple elements.
|
||||
repeated ResourceMetrics resource_metrics = 1;
|
||||
}
|
37
lib/protoparser/opentelemetry/proto/resource.proto
Normal file
37
lib/protoparser/opentelemetry/proto/resource.proto
Normal file
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2019, OpenTelemetry Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package opentelemetry;
|
||||
|
||||
import "lib/protoparser/opentelemetry/proto/common.proto";
|
||||
|
||||
option csharp_namespace = "OpenTelemetry.Proto.Resource.V1";
|
||||
option java_multiple_files = true;
|
||||
option java_package = "io.opentelemetry.proto.resource.v1";
|
||||
option java_outer_classname = "ResourceProto";
|
||||
option go_package = "opentelemetry/pb";
|
||||
|
||||
// Resource information.
|
||||
message Resource {
|
||||
// Set of attributes that describe the resource.
|
||||
// Attribute keys MUST be unique (it is not allowed to have more than one
|
||||
// attribute with the same key).
|
||||
repeated KeyValue attributes = 1;
|
||||
|
||||
// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
|
||||
// no attributes were dropped.
|
||||
uint32 dropped_attributes_count = 2;
|
||||
}
|
298
lib/protoparser/opentelemetry/stream/streamparser.go
Normal file
298
lib/protoparser/opentelemetry/stream/streamparser.go
Normal file
|
@ -0,0 +1,298 @@
|
|||
package stream
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// ParseStream parses OpenTelemetry protobuf or json data from r and calls callback for the parsed rows.
|
||||
//
|
||||
// callback shouldn't hold tss items after returning.
|
||||
func ParseStream(r io.Reader, isGzipped bool, callback func(tss []prompbmarshal.TimeSeries) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r = wcr
|
||||
|
||||
if isGzipped {
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read gzip-compressed OpenTelemetry protocol data: %w", err)
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
|
||||
wr := getWriteContext()
|
||||
defer putWriteContext(wr)
|
||||
req, err := wr.readAndUnpackRequest(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unpack OpenTelemetry metrics: %w", err)
|
||||
}
|
||||
wr.parseRequestToTss(req)
|
||||
|
||||
if err := callback(wr.tss); err != nil {
|
||||
return fmt.Errorf("error when processing OpenTelemetry samples: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (wr *writeContext) appendSamplesFromScopeMetrics(sc *pb.ScopeMetrics) {
|
||||
for _, m := range sc.Metrics {
|
||||
if len(m.Name) == 0 {
|
||||
// skip metrics without names
|
||||
continue
|
||||
}
|
||||
switch t := m.Data.(type) {
|
||||
case *pb.Metric_Gauge:
|
||||
for _, p := range t.Gauge.DataPoints {
|
||||
wr.appendSampleFromNumericPoint(m.Name, p)
|
||||
}
|
||||
case *pb.Metric_Sum:
|
||||
if t.Sum.AggregationTemporality != pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE {
|
||||
rowsDroppedUnsupportedSum.Inc()
|
||||
continue
|
||||
}
|
||||
for _, p := range t.Sum.DataPoints {
|
||||
wr.appendSampleFromNumericPoint(m.Name, p)
|
||||
}
|
||||
case *pb.Metric_Summary:
|
||||
for _, p := range t.Summary.DataPoints {
|
||||
wr.appendSamplesFromSummary(m.Name, p)
|
||||
}
|
||||
case *pb.Metric_Histogram:
|
||||
if t.Histogram.AggregationTemporality != pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE {
|
||||
rowsDroppedUnsupportedHistogram.Inc()
|
||||
continue
|
||||
}
|
||||
for _, p := range t.Histogram.DataPoints {
|
||||
wr.appendSamplesFromHistogram(m.Name, p)
|
||||
}
|
||||
default:
|
||||
rowsDroppedUnsupportedMetricType.Inc()
|
||||
logger.Warnf("unsupported type %T for metric %q", t, m.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// appendSampleFromNumericPoint appends p to wr.tss
|
||||
func (wr *writeContext) appendSampleFromNumericPoint(metricName string, p *pb.NumberDataPoint) {
|
||||
var v float64
|
||||
switch t := p.Value.(type) {
|
||||
case *pb.NumberDataPoint_AsInt:
|
||||
v = float64(t.AsInt)
|
||||
case *pb.NumberDataPoint_AsDouble:
|
||||
v = t.AsDouble
|
||||
}
|
||||
|
||||
t := int64(p.TimeUnixNano / 1e6)
|
||||
isStale := (p.Flags)&uint32(1) != 0
|
||||
wr.pointLabels = appendAttributesToPromLabels(wr.pointLabels[:0], p.Attributes)
|
||||
|
||||
wr.appendSample(metricName, t, v, isStale)
|
||||
}
|
||||
|
||||
// appendSamplesFromSummary appends summary p to wr.tss
|
||||
func (wr *writeContext) appendSamplesFromSummary(metricName string, p *pb.SummaryDataPoint) {
|
||||
t := int64(p.TimeUnixNano / 1e6)
|
||||
isStale := (p.Flags)&uint32(1) != 0
|
||||
wr.pointLabels = appendAttributesToPromLabels(wr.pointLabels[:0], p.Attributes)
|
||||
|
||||
wr.appendSample(metricName+"_sum", t, p.Sum, isStale)
|
||||
wr.appendSample(metricName+"_count", t, float64(p.Count), isStale)
|
||||
for _, q := range p.QuantileValues {
|
||||
qValue := strconv.FormatFloat(q.Quantile, 'f', -1, 64)
|
||||
wr.appendSampleWithExtraLabel(metricName, "quantile", qValue, t, q.Value, isStale)
|
||||
}
|
||||
}
|
||||
|
||||
// appendSamplesFromHistogram appends histogram p to wr.tss
|
||||
func (wr *writeContext) appendSamplesFromHistogram(metricName string, p *pb.HistogramDataPoint) {
|
||||
if len(p.BucketCounts) == 0 {
|
||||
// nothing to append
|
||||
return
|
||||
}
|
||||
if len(p.BucketCounts) != len(p.ExplicitBounds)+1 {
|
||||
// fast path, broken data format
|
||||
logger.Warnf("opentelemetry bad histogram format: %q, size of buckets: %d, size of bounds: %d", metricName, len(p.BucketCounts), len(p.ExplicitBounds))
|
||||
return
|
||||
}
|
||||
|
||||
t := int64(p.TimeUnixNano / 1e6)
|
||||
isStale := (p.Flags)&uint32(1) != 0
|
||||
wr.pointLabels = appendAttributesToPromLabels(wr.pointLabels[:0], p.Attributes)
|
||||
|
||||
wr.appendSample(metricName+"_sum", t, *p.Sum, isStale)
|
||||
wr.appendSample(metricName+"_count", t, float64(p.Count), isStale)
|
||||
|
||||
var cumulative uint64
|
||||
for index, bound := range p.ExplicitBounds {
|
||||
cumulative += p.BucketCounts[index]
|
||||
boundLabelValue := strconv.FormatFloat(bound, 'f', -1, 64)
|
||||
wr.appendSampleWithExtraLabel(metricName+"_bucket", "le", boundLabelValue, t, float64(cumulative), isStale)
|
||||
}
|
||||
cumulative += p.BucketCounts[len(p.BucketCounts)-1]
|
||||
wr.appendSampleWithExtraLabel(metricName+"_bucket", "le", "+Inf", t, float64(cumulative), isStale)
|
||||
}
|
||||
|
||||
// appendSample appends sample with the given metricName to wr.tss
|
||||
func (wr *writeContext) appendSample(metricName string, t int64, v float64, isStale bool) {
|
||||
wr.appendSampleWithExtraLabel(metricName, "", "", t, v, isStale)
|
||||
}
|
||||
|
||||
// appendSampleWithExtraLabel appends sample with the given metricName and the given (labelName=labelValue) extra label to wr.tss
|
||||
func (wr *writeContext) appendSampleWithExtraLabel(metricName, labelName, labelValue string, t int64, v float64, isStale bool) {
|
||||
if isStale {
|
||||
v = decimal.StaleNaN
|
||||
}
|
||||
if t <= 0 {
|
||||
// Set the current timestamp if t isn't set.
|
||||
t = int64(fasttime.UnixTimestamp()) * 1000
|
||||
}
|
||||
|
||||
labelsPool := wr.labelsPool
|
||||
labelsLen := len(labelsPool)
|
||||
labelsPool = append(labelsPool, prompbmarshal.Label{
|
||||
Name: "__name__",
|
||||
Value: metricName,
|
||||
})
|
||||
labelsPool = append(labelsPool, wr.baseLabels...)
|
||||
labelsPool = append(labelsPool, wr.pointLabels...)
|
||||
if labelName != "" && labelValue != "" {
|
||||
labelsPool = append(labelsPool, prompbmarshal.Label{
|
||||
Name: labelName,
|
||||
Value: labelValue,
|
||||
})
|
||||
}
|
||||
|
||||
samplesPool := wr.samplesPool
|
||||
samplesLen := len(samplesPool)
|
||||
samplesPool = append(samplesPool, prompbmarshal.Sample{
|
||||
Timestamp: t,
|
||||
Value: v,
|
||||
})
|
||||
|
||||
wr.tss = append(wr.tss, prompbmarshal.TimeSeries{
|
||||
Labels: labelsPool[labelsLen:],
|
||||
Samples: samplesPool[samplesLen:],
|
||||
})
|
||||
|
||||
wr.labelsPool = labelsPool
|
||||
wr.samplesPool = samplesPool
|
||||
|
||||
rowsRead.Inc()
|
||||
}
|
||||
|
||||
// appendAttributesToPromLabels appends attributes to dst and returns the result.
|
||||
func appendAttributesToPromLabels(dst []prompbmarshal.Label, attributes []*pb.KeyValue) []prompbmarshal.Label {
|
||||
for _, at := range attributes {
|
||||
dst = append(dst, prompbmarshal.Label{
|
||||
Name: at.Key,
|
||||
Value: at.Value.FormatString(),
|
||||
})
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
type writeContext struct {
|
||||
// bb holds the original data (json or protobuf), which must be parsed.
|
||||
bb bytesutil.ByteBuffer
|
||||
|
||||
// tss holds parsed time series
|
||||
tss []prompbmarshal.TimeSeries
|
||||
|
||||
// baseLabels are labels, which must be added to all the ingested samples
|
||||
baseLabels []prompbmarshal.Label
|
||||
|
||||
// pointLabels are labels, which must be added to the ingested OpenTelemetry points
|
||||
pointLabels []prompbmarshal.Label
|
||||
|
||||
// pools are used for reducing memory allocations when parsing time series
|
||||
labelsPool []prompbmarshal.Label
|
||||
samplesPool []prompbmarshal.Sample
|
||||
}
|
||||
|
||||
func (wr *writeContext) reset() {
|
||||
wr.bb.Reset()
|
||||
|
||||
tss := wr.tss
|
||||
for i := range tss {
|
||||
ts := &tss[i]
|
||||
ts.Labels = nil
|
||||
ts.Samples = nil
|
||||
}
|
||||
wr.tss = tss[:0]
|
||||
|
||||
wr.baseLabels = resetLabels(wr.baseLabels)
|
||||
wr.pointLabels = resetLabels(wr.pointLabels)
|
||||
|
||||
wr.labelsPool = resetLabels(wr.labelsPool)
|
||||
wr.samplesPool = wr.samplesPool[:0]
|
||||
}
|
||||
|
||||
func resetLabels(labels []prompbmarshal.Label) []prompbmarshal.Label {
|
||||
for i := range labels {
|
||||
label := &labels[i]
|
||||
label.Name = ""
|
||||
label.Value = ""
|
||||
}
|
||||
return labels[:0]
|
||||
}
|
||||
|
||||
func (wr *writeContext) readAndUnpackRequest(r io.Reader) (*pb.ExportMetricsServiceRequest, error) {
|
||||
if _, err := wr.bb.ReadFrom(r); err != nil {
|
||||
return nil, fmt.Errorf("cannot read request: %w", err)
|
||||
}
|
||||
var req pb.ExportMetricsServiceRequest
|
||||
if err := req.UnmarshalVT(wr.bb.B); err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(wr.bb.B), err)
|
||||
}
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
func (wr *writeContext) parseRequestToTss(req *pb.ExportMetricsServiceRequest) {
|
||||
for _, rm := range req.ResourceMetrics {
|
||||
if rm.Resource == nil {
|
||||
// skip metrics without resource part.
|
||||
continue
|
||||
}
|
||||
wr.baseLabels = appendAttributesToPromLabels(wr.baseLabels[:0], rm.Resource.Attributes)
|
||||
for _, sc := range rm.ScopeMetrics {
|
||||
wr.appendSamplesFromScopeMetrics(sc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var wrPool sync.Pool
|
||||
|
||||
func getWriteContext() *writeContext {
|
||||
v := wrPool.Get()
|
||||
if v == nil {
|
||||
return &writeContext{}
|
||||
}
|
||||
return v.(*writeContext)
|
||||
}
|
||||
|
||||
func putWriteContext(wr *writeContext) {
|
||||
wr.reset()
|
||||
wrPool.Put(wr)
|
||||
}
|
||||
|
||||
var (
|
||||
rowsRead = metrics.NewCounter(`vm_protoparser_rows_read_total{type="opentelemetry"}`)
|
||||
rowsDroppedUnsupportedHistogram = metrics.NewCounter(`vm_protoparser_rows_dropped_total{type="opentelemetry",reason="unsupported_histogram_aggregation"}`)
|
||||
rowsDroppedUnsupportedSum = metrics.NewCounter(`vm_protoparser_rows_dropped_total{type="opentelemetry",reason="unsupported_sum_aggregation"}`)
|
||||
rowsDroppedUnsupportedMetricType = metrics.NewCounter(`vm_protoparser_rows_dropped_total{type="opentelemetry",reason="unsupported_metric_type"}`)
|
||||
)
|
317
lib/protoparser/opentelemetry/stream/streamparser_test.go
Normal file
317
lib/protoparser/opentelemetry/stream/streamparser_test.go
Normal file
|
@ -0,0 +1,317 @@
|
|||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
|
||||
)
|
||||
|
||||
func TestParseStream(t *testing.T) {
|
||||
f := func(samples []*pb.Metric, tssExpected []prompbmarshal.TimeSeries) {
|
||||
t.Helper()
|
||||
|
||||
checkSeries := func(tss []prompbmarshal.TimeSeries) error {
|
||||
if len(tss) != len(tssExpected) {
|
||||
return fmt.Errorf("not expected tss count, got: %d, want: %d", len(tss), len(tssExpected))
|
||||
}
|
||||
sortByMetricName(tss)
|
||||
sortByMetricName(tssExpected)
|
||||
for i := 0; i < len(tss); i++ {
|
||||
ts := tss[i]
|
||||
tsExpected := tssExpected[i]
|
||||
if len(ts.Labels) != len(tsExpected.Labels) {
|
||||
return fmt.Errorf("idx: %d, not expected labels count, got: %d, want: %d", i, len(ts.Labels), len(tsExpected.Labels))
|
||||
}
|
||||
sortLabels(ts.Labels)
|
||||
sortLabels(tsExpected.Labels)
|
||||
for j, label := range ts.Labels {
|
||||
labelExpected := tsExpected.Labels[j]
|
||||
if !reflect.DeepEqual(label, labelExpected) {
|
||||
return fmt.Errorf("idx: %d, label idx: %d, not equal label pairs, \ngot: \n%s, \nwant: \n%s",
|
||||
i, j, prettifyLabel(label), prettifyLabel(labelExpected))
|
||||
}
|
||||
}
|
||||
if len(ts.Samples) != len(tsExpected.Samples) {
|
||||
return fmt.Errorf("idx: %d, not expected samples count, got: %d, want: %d", i, len(ts.Samples), len(tsExpected.Samples))
|
||||
}
|
||||
for j, sample := range ts.Samples {
|
||||
sampleExpected := tsExpected.Samples[j]
|
||||
if !reflect.DeepEqual(sample, sampleExpected) {
|
||||
return fmt.Errorf("idx: %d, label idx: %d, not equal sample pairs, \ngot: \n%s,\nwant: \n%s",
|
||||
i, j, prettifySample(sample), prettifySample(sampleExpected))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
req := &pb.ExportMetricsServiceRequest{
|
||||
ResourceMetrics: []*pb.ResourceMetrics{
|
||||
generateOTLPSamples(samples),
|
||||
},
|
||||
}
|
||||
|
||||
// Verify protobuf parsing
|
||||
pbData, err := req.MarshalVT()
|
||||
if err != nil {
|
||||
t.Fatalf("cannot marshal to protobuf: %s", err)
|
||||
}
|
||||
if err := checkParseStream(pbData, checkSeries); err != nil {
|
||||
t.Fatalf("cannot parse protobuf: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
jobLabelValue := prompbmarshal.Label{
|
||||
Name: "job",
|
||||
Value: "vm",
|
||||
}
|
||||
leLabel := func(value string) prompbmarshal.Label {
|
||||
return prompbmarshal.Label{
|
||||
Name: "le",
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
kvLabel := func(k, v string) prompbmarshal.Label {
|
||||
return prompbmarshal.Label{
|
||||
Name: k,
|
||||
Value: v,
|
||||
}
|
||||
}
|
||||
|
||||
// Test all metric types
|
||||
f(
|
||||
[]*pb.Metric{
|
||||
generateGauge("my-gauge"),
|
||||
generateHistogram("my-histogram"),
|
||||
generateSum("my-sum"),
|
||||
generateSummary("my-summary"),
|
||||
},
|
||||
[]prompbmarshal.TimeSeries{
|
||||
newPromPBTs("my-gauge", 15000, 15.0, jobLabelValue, kvLabel("label1", "value1")),
|
||||
newPromPBTs("my-histogram_count", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2")),
|
||||
newPromPBTs("my-histogram_sum", 30000, 30.0, jobLabelValue, kvLabel("label2", "value2")),
|
||||
newPromPBTs("my-histogram_bucket", 30000, 0.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("0.1")),
|
||||
newPromPBTs("my-histogram_bucket", 30000, 5.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("0.5")),
|
||||
newPromPBTs("my-histogram_bucket", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("1")),
|
||||
newPromPBTs("my-histogram_bucket", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("5")),
|
||||
newPromPBTs("my-histogram_bucket", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("+Inf")),
|
||||
newPromPBTs("my-sum", 150000, 15.5, jobLabelValue, kvLabel("label5", "value5")),
|
||||
newPromPBTs("my-summary_sum", 35000, 32.5, jobLabelValue, kvLabel("label6", "value6")),
|
||||
newPromPBTs("my-summary_count", 35000, 5.0, jobLabelValue, kvLabel("label6", "value6")),
|
||||
newPromPBTs("my-summary", 35000, 7.5, jobLabelValue, kvLabel("label6", "value6"), kvLabel("quantile", "0.1")),
|
||||
newPromPBTs("my-summary", 35000, 10.0, jobLabelValue, kvLabel("label6", "value6"), kvLabel("quantile", "0.5")),
|
||||
newPromPBTs("my-summary", 35000, 15.0, jobLabelValue, kvLabel("label6", "value6"), kvLabel("quantile", "1")),
|
||||
})
|
||||
|
||||
// Test gauge
|
||||
f(
|
||||
[]*pb.Metric{
|
||||
generateGauge("my-gauge"),
|
||||
},
|
||||
[]prompbmarshal.TimeSeries{
|
||||
newPromPBTs("my-gauge", 15000, 15.0, jobLabelValue, kvLabel("label1", "value1")),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func checkParseStream(data []byte, checkSeries func(tss []prompbmarshal.TimeSeries) error) error {
|
||||
// Verify parsing without compression
|
||||
if err := ParseStream(bytes.NewBuffer(data), false, checkSeries); err != nil {
|
||||
return fmt.Errorf("error when parsing data: %w", err)
|
||||
}
|
||||
|
||||
// Verify parsing with compression
|
||||
var bb bytes.Buffer
|
||||
zw := gzip.NewWriter(&bb)
|
||||
if _, err := zw.Write(data); err != nil {
|
||||
return fmt.Errorf("cannot compress data: %s", err)
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
return fmt.Errorf("cannot close gzip writer: %s", err)
|
||||
}
|
||||
if err := ParseStream(&bb, true, checkSeries); err != nil {
|
||||
return fmt.Errorf("error when parsing compressed data: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func attributesFromKV(k, v string) []*pb.KeyValue {
|
||||
return []*pb.KeyValue{
|
||||
{
|
||||
Key: k,
|
||||
Value: &pb.AnyValue{
|
||||
Value: &pb.AnyValue_StringValue{
|
||||
StringValue: v,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateGauge(name string) *pb.Metric {
|
||||
points := []*pb.NumberDataPoint{
|
||||
{
|
||||
Attributes: attributesFromKV("label1", "value1"),
|
||||
Value: &pb.NumberDataPoint_AsInt{AsInt: 15},
|
||||
TimeUnixNano: uint64(15 * time.Second),
|
||||
},
|
||||
}
|
||||
return &pb.Metric{
|
||||
Name: name,
|
||||
Data: &pb.Metric_Gauge{
|
||||
Gauge: &pb.Gauge{
|
||||
DataPoints: points,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateHistogram(name string) *pb.Metric {
|
||||
points := []*pb.HistogramDataPoint{
|
||||
{
|
||||
|
||||
Attributes: attributesFromKV("label2", "value2"),
|
||||
Count: 15,
|
||||
Sum: func() *float64 { v := 30.0; return &v }(),
|
||||
ExplicitBounds: []float64{0.1, 0.5, 1.0, 5.0},
|
||||
BucketCounts: []uint64{0, 5, 10, 0, 0},
|
||||
TimeUnixNano: uint64(30 * time.Second),
|
||||
},
|
||||
}
|
||||
return &pb.Metric{
|
||||
Name: name,
|
||||
Data: &pb.Metric_Histogram{
|
||||
Histogram: &pb.Histogram{
|
||||
AggregationTemporality: pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: points,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateSum(name string) *pb.Metric {
|
||||
points := []*pb.NumberDataPoint{
|
||||
{
|
||||
Attributes: attributesFromKV("label5", "value5"),
|
||||
Value: &pb.NumberDataPoint_AsDouble{AsDouble: 15.5},
|
||||
TimeUnixNano: uint64(150 * time.Second),
|
||||
},
|
||||
}
|
||||
return &pb.Metric{
|
||||
Name: name,
|
||||
Data: &pb.Metric_Sum{
|
||||
Sum: &pb.Sum{
|
||||
AggregationTemporality: pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
|
||||
DataPoints: points,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateSummary(name string) *pb.Metric {
|
||||
points := []*pb.SummaryDataPoint{
|
||||
{
|
||||
Attributes: attributesFromKV("label6", "value6"),
|
||||
TimeUnixNano: uint64(35 * time.Second),
|
||||
Sum: 32.5,
|
||||
Count: 5,
|
||||
QuantileValues: []*pb.SummaryDataPoint_ValueAtQuantile{
|
||||
{
|
||||
Quantile: 0.1,
|
||||
Value: 7.5,
|
||||
},
|
||||
{
|
||||
Quantile: 0.5,
|
||||
Value: 10.0,
|
||||
},
|
||||
{
|
||||
Quantile: 1.0,
|
||||
Value: 15.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return &pb.Metric{
|
||||
Name: name,
|
||||
Data: &pb.Metric_Summary{
|
||||
Summary: &pb.Summary{
|
||||
DataPoints: points,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateOTLPSamples(srcs []*pb.Metric) *pb.ResourceMetrics {
|
||||
otlpMetrics := &pb.ResourceMetrics{
|
||||
Resource: &pb.Resource{
|
||||
Attributes: attributesFromKV("job", "vm"),
|
||||
},
|
||||
}
|
||||
otlpMetrics.ScopeMetrics = []*pb.ScopeMetrics{
|
||||
{
|
||||
Metrics: append([]*pb.Metric{}, srcs...),
|
||||
},
|
||||
}
|
||||
return otlpMetrics
|
||||
}
|
||||
|
||||
func newPromPBTs(metricName string, t int64, v float64, extraLabels ...prompbmarshal.Label) prompbmarshal.TimeSeries {
|
||||
if t <= 0 {
|
||||
// Set the current timestamp if t isn't set.
|
||||
t = int64(fasttime.UnixTimestamp()) * 1000
|
||||
}
|
||||
ts := prompbmarshal.TimeSeries{
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "__name__",
|
||||
Value: metricName,
|
||||
},
|
||||
},
|
||||
Samples: []prompbmarshal.Sample{
|
||||
{
|
||||
Value: v,
|
||||
Timestamp: t,
|
||||
},
|
||||
},
|
||||
}
|
||||
ts.Labels = append(ts.Labels, extraLabels...)
|
||||
return ts
|
||||
}
|
||||
|
||||
func prettifyLabel(label prompbmarshal.Label) string {
|
||||
return fmt.Sprintf("name=%q value=%q", label.Name, label.Value)
|
||||
}
|
||||
|
||||
func prettifySample(sample prompbmarshal.Sample) string {
|
||||
return fmt.Sprintf("sample=%f timestamp: %d", sample.Value, sample.Timestamp)
|
||||
}
|
||||
|
||||
func sortByMetricName(tss []prompbmarshal.TimeSeries) {
|
||||
sort.Slice(tss, func(i, j int) bool {
|
||||
return getMetricName(tss[i].Labels) < getMetricName(tss[j].Labels)
|
||||
})
|
||||
}
|
||||
|
||||
func getMetricName(labels []prompbmarshal.Label) string {
|
||||
for _, l := range labels {
|
||||
if l.Name == "__name__" {
|
||||
return l.Value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func sortLabels(labels []prompbmarshal.Label) {
|
||||
sort.Slice(labels, func(i, j int) bool {
|
||||
return labels[i].Name < labels[j].Name
|
||||
})
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
|
||||
)
|
||||
|
||||
func BenchmarkParseStream(b *testing.B) {
|
||||
samples := []*pb.Metric{
|
||||
generateGauge("my-gauge"),
|
||||
generateHistogram("my-histogram"),
|
||||
generateSum("my-sum"),
|
||||
generateSummary("my-summary"),
|
||||
}
|
||||
b.SetBytes(1)
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(p *testing.PB) {
|
||||
pbRequest := pb.ExportMetricsServiceRequest{
|
||||
ResourceMetrics: []*pb.ResourceMetrics{generateOTLPSamples(samples)},
|
||||
}
|
||||
data, err := pbRequest.MarshalVT()
|
||||
if err != nil {
|
||||
b.Fatalf("cannot marshal data: %s", err)
|
||||
}
|
||||
|
||||
for p.Next() {
|
||||
err := ParseStream(bytes.NewBuffer(data), false, func(tss []prompbmarshal.TimeSeries) error {
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
b.Fatalf("cannot parse stream: %s", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
Loading…
Reference in a new issue