diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index 70347e365e..6520ee9b88 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -6,12 +6,18 @@ updates:
       interval: "daily"
   - package-ecosystem: "gomod"
     directory: "/"
+    schedule:
+      interval: "weekly"
     open-pull-requests-limit: 0
   - package-ecosystem: "bundler"
     directory: "/docs"
+    schedule:
+      interval: "weekly"
     open-pull-requests-limit: 0
   - package-ecosystem: "gomod"
     directory: "/app/vmui/packages/vmui/web"
+    schedule:
+      interval: "weekly"
     open-pull-requests-limit: 0
   - package-ecosystem: "docker"
     directory: "/"
@@ -19,4 +25,6 @@ updates:
       interval: "daily"
   - package-ecosystem: "npm"
     directory: "/app/vmui/packages/vmui"
+    schedule:
+      interval: "weekly"
     open-pull-requests-limit: 0
diff --git a/README.md b/README.md
index e4ba31a4cc..63eb1e0713 100644
--- a/README.md
+++ b/README.md
@@ -86,6 +86,7 @@ VictoriaMetrics has the following prominent features:
   * [Arbitrary CSV data](#how-to-import-csv-data).
   * [Native binary format](#how-to-import-data-in-native-format).
   * [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent).
+  * [OpenTelemetry metrics format](#sending-data-via-opentelemetry).
 * It supports powerful [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), which can be used as a [statsd](https://github.com/statsd/statsd) alternative.
 * It supports metrics [relabeling](#relabeling).
 * It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and
@@ -1173,6 +1174,7 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data
 * DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details.
 * InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
 * Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
+* OpenTelemetry http API. See [these docs](#sending-data-via-opentelemetry) for details.
 * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
 * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
 * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
@@ -1356,6 +1358,13 @@ Note that it could be required to flush response cache after importing historica
 
 VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
 
+## Sending data via OpenTelemetry
+
+VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentemetry/api/v1/push` path.
+
+VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`.
+Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`.
+
 ## Relabeling
 
 VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
diff --git a/app/vmagent/README.md b/app/vmagent/README.md
index 7f2498ce40..4009082050 100644
--- a/app/vmagent/README.md
+++ b/app/vmagent/README.md
@@ -93,6 +93,7 @@ additionally to pull-based Prometheus-compatible targets' scraping:
 * DataDog "submit metrics" API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent).
 * InfluxDB line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
 * Graphite plaintext protocol if `-graphiteListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd).
+* OpenTelemetry http API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry).
 * OpenTSDB telnet and http protocols if `-opentsdbListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents).
 * Prometheus remote write protocol via `http://<vmagent>:8429/api/v1/write`.
 * JSON lines import protocol via `http://<vmagent>:8429/api/v1/import`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format).
diff --git a/app/vmagent/main.go b/app/vmagent/main.go
index a0db615b89..51fc5cf6b4 100644
--- a/app/vmagent/main.go
+++ b/app/vmagent/main.go
@@ -16,6 +16,7 @@ import (
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/graphite"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/influx"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/native"
+	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentelemetry"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentsdb"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentsdbhttp"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/prometheusimport"
@@ -308,6 +309,15 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
 		influxQueryRequests.Inc()
 		influxutils.WriteDatabaseNames(w)
 		return true
+	case "/opentelemetry/api/v1/push":
+		opentelemetryPushRequests.Inc()
+		if err := opentelemetry.InsertHandler(nil, r); err != nil {
+			opentelemetryPushErrors.Inc()
+			httpserver.Errorf(w, r, "%s", err)
+			return true
+		}
+		w.WriteHeader(http.StatusOK)
+		return true
 	case "/datadog/api/v1/series":
 		datadogWriteRequests.Inc()
 		if err := datadog.InsertHandlerForHTTP(nil, r); err != nil {
@@ -499,6 +509,15 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri
 		influxQueryRequests.Inc()
 		influxutils.WriteDatabaseNames(w)
 		return true
+	case "opentelemetry/api/v1/push":
+		opentelemetryPushRequests.Inc()
+		if err := opentelemetry.InsertHandler(at, r); err != nil {
+			opentelemetryPushErrors.Inc()
+			httpserver.Errorf(w, r, "%s", err)
+			return true
+		}
+		w.WriteHeader(http.StatusOK)
+		return true
 	case "datadog/api/v1/series":
 		datadogWriteRequests.Inc()
 		if err := datadog.InsertHandlerForHTTP(at, r); err != nil {
@@ -568,6 +587,9 @@ var (
 	datadogIntakeRequests   = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/intake", protocol="datadog"}`)
 	datadogMetadataRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`)
 
+	opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
+	opentelemetryPushErrors   = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
+
 	promscrapeTargetsRequests          = metrics.NewCounter(`vmagent_http_requests_total{path="/targets"}`)
 	promscrapeServiceDiscoveryRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/service-discovery"}`)
 
diff --git a/app/vmagent/opentelemetry/request_handler.go b/app/vmagent/opentelemetry/request_handler.go
new file mode 100644
index 0000000000..047978149e
--- /dev/null
+++ b/app/vmagent/opentelemetry/request_handler.go
@@ -0,0 +1,65 @@
+package opentelemetry
+
+import (
+	"net/http"
+
+	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
+	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
+	"github.com/VictoriaMetrics/metrics"
+)
+
+var (
+	rowsInserted       = metrics.NewCounter(`vmagent_rows_inserted_total{type="opentelemetry"}`)
+	rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="opentelemetry"}`)
+	rowsPerInsert      = metrics.NewHistogram(`vmagent_rows_per_insert{type="opentelemetry"}`)
+)
+
+// InsertHandler processes opentelemetry metrics.
+func InsertHandler(at *auth.Token, req *http.Request) error {
+	extraLabels, err := parserCommon.GetExtraLabels(req)
+	if err != nil {
+		return err
+	}
+	isGzipped := req.Header.Get("Content-Encoding") == "gzip"
+	return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error {
+		return insertRows(at, tss, extraLabels)
+	})
+}
+
+func insertRows(at *auth.Token, tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Label) error {
+	ctx := common.GetPushCtx()
+	defer common.PutPushCtx(ctx)
+
+	rowsTotal := 0
+	tssDst := ctx.WriteRequest.Timeseries[:0]
+	labels := ctx.Labels[:0]
+	samples := ctx.Samples[:0]
+	for i := range tss {
+		ts := &tss[i]
+		rowsTotal += len(ts.Samples)
+		labelsLen := len(labels)
+		labels = append(labels, ts.Labels...)
+		labels = append(labels, extraLabels...)
+		samplesLen := len(samples)
+		samples = append(samples, ts.Samples...)
+		tssDst = append(tssDst, prompbmarshal.TimeSeries{
+			Labels:  labels[labelsLen:],
+			Samples: samples[samplesLen:],
+		})
+	}
+	ctx.WriteRequest.Timeseries = tssDst
+	ctx.Labels = labels
+	ctx.Samples = samples
+	remotewrite.Push(at, &ctx.WriteRequest)
+	rowsInserted.Add(rowsTotal)
+	if at != nil {
+		rowsTenantInserted.Get(at).Add(rowsTotal)
+	}
+	rowsPerInsert.Update(float64(rowsTotal))
+	return nil
+}
diff --git a/app/vmalert/Makefile b/app/vmalert/Makefile
index 4a27eac1c7..37787e5c17 100644
--- a/app/vmalert/Makefile
+++ b/app/vmalert/Makefile
@@ -78,8 +78,9 @@ test-vmalert:
 
 run-vmalert: vmalert
 	./bin/vmalert -rule=app/vmalert/config/testdata/rules/rules2-good.rules \
-		-datasource.url=http://demo.robustperception.io:9090 \
-		-notifier.blackhole \
+		-datasource.url=http://localhost:8428 \
+		-notifier.url=http://localhost:9093 \
+		-notifier.url=http://127.0.0.1:9093 \
 		-remoteWrite.url=http://localhost:8428 \
 		-remoteRead.url=http://localhost:8428 \
 		-external.label=cluster=east-1 \
diff --git a/app/vmalert/README.md b/app/vmalert/README.md
index 43332d03d5..68c375cf6b 100644
--- a/app/vmalert/README.md
+++ b/app/vmalert/README.md
@@ -203,6 +203,10 @@ expr: <string>
 # as firing once they return.
 [ for: <duration> | default = 0s ]
 
+# Alert will continue firing for this long even when the alerting expression no longer has results.
+# This allows you to delay alert resolution.
+[ keep_firing_for: <duration> | default = 0s ]
+
 # Whether to print debug information into logs.
 # Information includes alerts state changes and requests sent to the datasource.
 # Please note, that if rule's query params contain sensitive
@@ -357,19 +361,24 @@ For recording rules to work `-remoteWrite.url` must be specified.
 
 ### Alerts state on restarts
 
-`vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after restart of `vmalert`
-the process alerts state will be lost. To avoid this situation, `vmalert` should be configured via the following flags:
+`vmalert` is stateless, it holds alerts state in the process memory. Restarting of `vmalert` process
+will reset alerts state in memory. To prevent `vmalert` from losing alerts state it should be configured
+to persist the state to the remote destination via the following flags:
 
 * `-remoteWrite.url` - URL to VictoriaMetrics (Single) or vminsert (Cluster). `vmalert` will persist alerts state
-  into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
-  These are regular time series and maybe queried from VM just as any other time series.
-  The state is stored to the configured address on every rule evaluation.
+  to the configured address in the form of [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
+  `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
+  These time series can be queried from VictoriaMetrics just as any other time series.
+  The state will be persisted to the configured address on each evaluation.
 * `-remoteRead.url` - URL to VictoriaMetrics (Single) or vmselect (Cluster). `vmalert` will try to restore alerts state
-  from configured address by querying time series with name `ALERTS_FOR_STATE`.
+  from the configured address by querying time series with name `ALERTS_FOR_STATE`. The restore happens only once when
+  `vmalert` process starts, and only for the configured rules. Config [hot reload](#hot-config-reload) doesn't trigger 
+  state restore.
 
 Both flags are required for proper state restoration. Restore process may fail if time series are missing
 in configured `-remoteRead.url`, weren't updated in the last `1h` (controlled by `-remoteRead.lookback`)
-or received state doesn't match current `vmalert` rules configuration.
+or received state doesn't match current `vmalert` rules configuration. `vmalert` marks successfully restored rules
+with `restored` label in [web UI](#WEB).
 
 ### Multitenancy
 
@@ -731,6 +740,7 @@ See full description for these flags in `./vmalert -help`.
 * Graphite engine isn't supported yet;
 * `query` template function is disabled for performance reasons (might be changed in future);
 * `limit` group's param has no effect during replay (might be changed in future);
+* `keep_firing_for` alerting rule param has no effect during replay (might be changed in future).
 
 ## Unit Testing for Rules
 
diff --git a/app/vmalert/alerting.go b/app/vmalert/alerting.go
index e8ab1816b5..779423b6f7 100644
--- a/app/vmalert/alerting.go
+++ b/app/vmalert/alerting.go
@@ -21,17 +21,18 @@ import (
 
 // AlertingRule is basic alert entity
 type AlertingRule struct {
-	Type         config.Type
-	RuleID       uint64
-	Name         string
-	Expr         string
-	For          time.Duration
-	Labels       map[string]string
-	Annotations  map[string]string
-	GroupID      uint64
-	GroupName    string
-	EvalInterval time.Duration
-	Debug        bool
+	Type          config.Type
+	RuleID        uint64
+	Name          string
+	Expr          string
+	For           time.Duration
+	KeepFiringFor time.Duration
+	Labels        map[string]string
+	Annotations   map[string]string
+	GroupID       uint64
+	GroupName     string
+	EvalInterval  time.Duration
+	Debug         bool
 
 	q datasource.Querier
 
@@ -56,17 +57,18 @@ type alertingRuleMetrics struct {
 
 func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule) *AlertingRule {
 	ar := &AlertingRule{
-		Type:         group.Type,
-		RuleID:       cfg.ID,
-		Name:         cfg.Alert,
-		Expr:         cfg.Expr,
-		For:          cfg.For.Duration(),
-		Labels:       cfg.Labels,
-		Annotations:  cfg.Annotations,
-		GroupID:      group.ID(),
-		GroupName:    group.Name,
-		EvalInterval: group.Interval,
-		Debug:        cfg.Debug,
+		Type:          group.Type,
+		RuleID:        cfg.ID,
+		Name:          cfg.Alert,
+		Expr:          cfg.Expr,
+		For:           cfg.For.Duration(),
+		KeepFiringFor: cfg.KeepFiringFor.Duration(),
+		Labels:        cfg.Labels,
+		Annotations:   cfg.Annotations,
+		GroupID:       group.ID(),
+		GroupName:     group.Name,
+		EvalInterval:  group.Interval,
+		Debug:         cfg.Debug,
 		q: qb.BuildWithParams(datasource.QuerierParams{
 			DataSourceType:     group.Type.String(),
 			EvaluationInterval: group.Interval,
@@ -366,6 +368,7 @@ func (ar *AlertingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]pr
 			if err != nil {
 				return nil, err
 			}
+			a.KeepFiringSince = time.Time{}
 			continue
 		}
 		a, err := ar.newAlert(m, ls, start, qFn)
@@ -391,12 +394,24 @@ func (ar *AlertingRule) Exec(ctx context.Context, ts time.Time, limit int) ([]pr
 				ar.logDebugf(ts, a, "PENDING => DELETED: is absent in current evaluation round")
 				continue
 			}
+			// check if alert should keep StateFiring if rule has
+			// `keep_firing_for` field
 			if a.State == notifier.StateFiring {
-				a.State = notifier.StateInactive
-				a.ResolvedAt = ts
-				ar.logDebugf(ts, a, "FIRING => INACTIVE: is absent in current evaluation round")
+				if ar.KeepFiringFor > 0 {
+					if a.KeepFiringSince.IsZero() {
+						a.KeepFiringSince = ts
+					}
+				}
+				// alerts with ar.KeepFiringFor>0 may remain FIRING
+				// even if their expression isn't true anymore
+				if ts.Sub(a.KeepFiringSince) > ar.KeepFiringFor {
+					a.State = notifier.StateInactive
+					a.ResolvedAt = ts
+					ar.logDebugf(ts, a, "FIRING => INACTIVE: is absent in current evaluation round")
+					continue
+				}
+				ar.logDebugf(ts, a, "KEEP_FIRING: will keep firing for %fs since %v", ar.KeepFiringFor.Seconds(), a.KeepFiringSince)
 			}
-			continue
 		}
 		numActivePending++
 		if a.State == notifier.StatePending && ts.Sub(a.ActiveAt) >= ar.For {
@@ -436,6 +451,7 @@ func (ar *AlertingRule) UpdateWith(r Rule) error {
 	}
 	ar.Expr = nr.Expr
 	ar.For = nr.For
+	ar.KeepFiringFor = nr.KeepFiringFor
 	ar.Labels = nr.Labels
 	ar.Annotations = nr.Annotations
 	ar.EvalInterval = nr.EvalInterval
@@ -508,6 +524,7 @@ func (ar *AlertingRule) ToAPI() APIRule {
 		Name:              ar.Name,
 		Query:             ar.Expr,
 		Duration:          ar.For.Seconds(),
+		KeepFiringFor:     ar.KeepFiringFor.Seconds(),
 		Labels:            ar.Labels,
 		Annotations:       ar.Annotations,
 		LastEvaluation:    lastState.time,
@@ -576,6 +593,9 @@ func (ar *AlertingRule) newAlertAPI(a notifier.Alert) *APIAlert {
 	if alertURLGeneratorFn != nil {
 		aa.SourceLink = alertURLGeneratorFn(a)
 	}
+	if a.State == notifier.StateFiring && !a.KeepFiringSince.IsZero() {
+		aa.Stabilizing = true
+	}
 	return aa
 }
 
diff --git a/app/vmalert/alerting_test.go b/app/vmalert/alerting_test.go
index d05305c90d..5f8f725588 100644
--- a/app/vmalert/alerting_test.go
+++ b/app/vmalert/alerting_test.go
@@ -113,7 +113,7 @@ func TestAlertingRule_Exec(t *testing.T) {
 	testCases := []struct {
 		rule      *AlertingRule
 		steps     [][]datasource.Metric
-		expAlerts []testAlert
+		expAlerts map[int][]testAlert
 	}{
 		{
 			newTestAlertingRule("empty", 0),
@@ -125,50 +125,8 @@ func TestAlertingRule_Exec(t *testing.T) {
 			[][]datasource.Metric{
 				{datasource.Metric{Values: []float64{1}, Timestamps: []int64{1}}},
 			},
-			[]testAlert{
-				{alert: &notifier.Alert{State: notifier.StateFiring}},
-			},
-		},
-		{
-			newTestAlertingRule("single-firing", 0),
-			[][]datasource.Metric{
-				{metricWithLabels(t, "name", "foo")},
-			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
-			},
-		},
-		{
-			newTestAlertingRule("single-firing=>inactive", 0),
-			[][]datasource.Metric{
-				{metricWithLabels(t, "name", "foo")},
-				{},
-			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
-			},
-		},
-		{
-			newTestAlertingRule("single-firing=>inactive=>firing", 0),
-			[][]datasource.Metric{
-				{metricWithLabels(t, "name", "foo")},
-				{},
-				{metricWithLabels(t, "name", "foo")},
-			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
-			},
-		},
-		{
-			newTestAlertingRule("single-firing=>inactive=>firing=>inactive", 0),
-			[][]datasource.Metric{
-				{metricWithLabels(t, "name", "foo")},
-				{},
-				{metricWithLabels(t, "name", "foo")},
-				{},
-			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
+			map[int][]testAlert{
+				0: {{alert: &notifier.Alert{State: notifier.StateFiring}}},
 			},
 		},
 		{
@@ -180,12 +138,16 @@ func TestAlertingRule_Exec(t *testing.T) {
 				{},
 				{},
 			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
+			map[int][]testAlert{
+				0: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				1: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}}},
+				2: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				3: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}}},
+				4: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}}},
 			},
 		},
 		{
-			newTestAlertingRule("single-firing=>inactive=>firing=>inactive=>empty=>firing", 0),
+			newTestAlertingRule("single-firing=>inactive=>firing=>inactive=>inactive=>firing", 0),
 			[][]datasource.Metric{
 				{metricWithLabels(t, "name", "foo")},
 				{},
@@ -194,8 +156,13 @@ func TestAlertingRule_Exec(t *testing.T) {
 				{},
 				{metricWithLabels(t, "name", "foo")},
 			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+			map[int][]testAlert{
+				0: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				1: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}}},
+				2: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				3: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}}},
+				4: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}}},
+				5: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
 			},
 		},
 		{
@@ -207,10 +174,12 @@ func TestAlertingRule_Exec(t *testing.T) {
 					metricWithLabels(t, "name", "foo2"),
 				},
 			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
-				{labels: []string{"name", "foo1"}, alert: &notifier.Alert{State: notifier.StateFiring}},
-				{labels: []string{"name", "foo2"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+			map[int][]testAlert{
+				0: {
+					{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+					{labels: []string{"name", "foo1"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+					{labels: []string{"name", "foo2"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+				},
 			},
 		},
 		{
@@ -223,10 +192,19 @@ func TestAlertingRule_Exec(t *testing.T) {
 			// 1: fire first alert
 			// 2: fire second alert, set first inactive
 			// 3: fire third alert, set second inactive
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
-				{labels: []string{"name", "foo1"}, alert: &notifier.Alert{State: notifier.StateInactive}},
-				{labels: []string{"name", "foo2"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+			map[int][]testAlert{
+				0: {
+					{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+				},
+				1: {
+					{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
+					{labels: []string{"name", "foo1"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+				},
+				2: {
+					{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
+					{labels: []string{"name", "foo1"}, alert: &notifier.Alert{State: notifier.StateInactive}},
+					{labels: []string{"name", "foo2"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+				},
 			},
 		},
 		{
@@ -234,8 +212,8 @@ func TestAlertingRule_Exec(t *testing.T) {
 			[][]datasource.Metric{
 				{metricWithLabels(t, "name", "foo")},
 			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}},
+			map[int][]testAlert{
+				0: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}}},
 			},
 		},
 		{
@@ -244,8 +222,9 @@ func TestAlertingRule_Exec(t *testing.T) {
 				{metricWithLabels(t, "name", "foo")},
 				{metricWithLabels(t, "name", "foo")},
 			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+			map[int][]testAlert{
+				0: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}}},
+				1: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
 			},
 		},
 		{
@@ -253,34 +232,13 @@ func TestAlertingRule_Exec(t *testing.T) {
 			[][]datasource.Metric{
 				{metricWithLabels(t, "name", "foo")},
 				{metricWithLabels(t, "name", "foo")},
-				// empty step to reset and delete pending alerts
+				// empty step to delete pending alerts
 				{},
 			},
-			nil,
-		},
-		{
-			newTestAlertingRule("for-pending=>firing=>inactive", defaultStep),
-			[][]datasource.Metric{
-				{metricWithLabels(t, "name", "foo")},
-				{metricWithLabels(t, "name", "foo")},
-				// empty step to reset pending alerts
-				{},
-			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}},
-			},
-		},
-		{
-			newTestAlertingRule("for-pending=>firing=>inactive=>pending", defaultStep),
-			[][]datasource.Metric{
-				{metricWithLabels(t, "name", "foo")},
-				{metricWithLabels(t, "name", "foo")},
-				// empty step to reset pending alerts
-				{},
-				{metricWithLabels(t, "name", "foo")},
-			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}},
+			map[int][]testAlert{
+				0: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}}},
+				1: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}}},
+				2: {},
 			},
 		},
 		{
@@ -288,13 +246,57 @@ func TestAlertingRule_Exec(t *testing.T) {
 			[][]datasource.Metric{
 				{metricWithLabels(t, "name", "foo")},
 				{metricWithLabels(t, "name", "foo")},
-				// empty step to reset pending alerts
+				// empty step to set alert inactive
 				{},
 				{metricWithLabels(t, "name", "foo")},
 				{metricWithLabels(t, "name", "foo")},
 			},
-			[]testAlert{
-				{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}},
+			map[int][]testAlert{
+				0: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}}},
+				1: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				2: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}}},
+				3: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}}},
+				4: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+			},
+		},
+		{
+			newTestAlertingRuleWithKeepFiring("for-pending=>firing=>keepfiring=>firing", defaultStep, defaultStep),
+			[][]datasource.Metric{
+				{metricWithLabels(t, "name", "foo")},
+				{metricWithLabels(t, "name", "foo")},
+				// empty step to keep firing
+				{},
+				{metricWithLabels(t, "name", "foo")},
+			},
+			map[int][]testAlert{
+				0: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}}},
+				1: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				2: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				3: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+			},
+		},
+		{
+			newTestAlertingRuleWithKeepFiring("for-pending=>firing=>keepfiring=>keepfiring=>inactive=>pending=>firing", defaultStep, 2*defaultStep),
+			[][]datasource.Metric{
+				{metricWithLabels(t, "name", "foo")},
+				{metricWithLabels(t, "name", "foo")},
+				// empty step to keep firing
+				{},
+				// another empty step to keep firing
+				{},
+				// empty step to set alert inactive
+				{},
+				{metricWithLabels(t, "name", "foo")},
+				{metricWithLabels(t, "name", "foo")},
+			},
+			map[int][]testAlert{
+				0: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}}},
+				1: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				2: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				3: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
+				4: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateInactive}}},
+				5: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StatePending}}},
+				6: {{labels: []string{"name", "foo"}, alert: &notifier.Alert{State: notifier.StateFiring}}},
 			},
 		},
 	}
@@ -304,7 +306,7 @@ func TestAlertingRule_Exec(t *testing.T) {
 			fq := &fakeQuerier{}
 			tc.rule.q = fq
 			tc.rule.GroupID = fakeGroup.ID()
-			for _, step := range tc.steps {
+			for i, step := range tc.steps {
 				fq.reset()
 				fq.add(step...)
 				if _, err := tc.rule.Exec(context.TODO(), time.Now(), 0); err != nil {
@@ -312,28 +314,31 @@ func TestAlertingRule_Exec(t *testing.T) {
 				}
 				// artificial delay between applying steps
 				time.Sleep(defaultStep)
-			}
-			if len(tc.rule.alerts) != len(tc.expAlerts) {
-				t.Fatalf("expected %d alerts; got %d", len(tc.expAlerts), len(tc.rule.alerts))
-			}
-			expAlerts := make(map[uint64]*notifier.Alert)
-			for _, ta := range tc.expAlerts {
-				labels := make(map[string]string)
-				for i := 0; i < len(ta.labels); i += 2 {
-					k, v := ta.labels[i], ta.labels[i+1]
-					labels[k] = v
+				if _, ok := tc.expAlerts[i]; !ok {
+					continue
 				}
-				labels[alertNameLabel] = tc.rule.Name
-				h := hash(labels)
-				expAlerts[h] = ta.alert
-			}
-			for key, exp := range expAlerts {
-				got, ok := tc.rule.alerts[key]
-				if !ok {
-					t.Fatalf("expected to have key %d", key)
+				if len(tc.rule.alerts) != len(tc.expAlerts[i]) {
+					t.Fatalf("evalIndex %d: expected %d alerts; got %d", i, len(tc.expAlerts[i]), len(tc.rule.alerts))
 				}
-				if got.State != exp.State {
-					t.Fatalf("expected state %d; got %d", exp.State, got.State)
+				expAlerts := make(map[uint64]*notifier.Alert)
+				for _, ta := range tc.expAlerts[i] {
+					labels := make(map[string]string)
+					for i := 0; i < len(ta.labels); i += 2 {
+						k, v := ta.labels[i], ta.labels[i+1]
+						labels[k] = v
+					}
+					labels[alertNameLabel] = tc.rule.Name
+					h := hash(labels)
+					expAlerts[h] = ta.alert
+				}
+				for key, exp := range expAlerts {
+					got, ok := tc.rule.alerts[key]
+					if !ok {
+						t.Fatalf("evalIndex %d: expected to have key %d", i, key)
+					}
+					if got.State != exp.State {
+						t.Fatalf("evalIndex %d: expected state %d; got %d", i, exp.State, got.State)
+					}
 				}
 			}
 		})
@@ -970,11 +975,18 @@ func newTestRuleWithLabels(name string, labels ...string) *AlertingRule {
 }
 
 func newTestAlertingRule(name string, waitFor time.Duration) *AlertingRule {
-	return &AlertingRule{
+	rule := AlertingRule{
 		Name:         name,
 		For:          waitFor,
 		EvalInterval: waitFor,
 		alerts:       make(map[uint64]*notifier.Alert),
 		state:        newRuleState(10),
 	}
+	return &rule
+}
+
+func newTestAlertingRuleWithKeepFiring(name string, waitFor, keepFiringFor time.Duration) *AlertingRule {
+	rule := newTestAlertingRule(name, waitFor)
+	rule.KeepFiringFor = keepFiringFor
+	return rule
 }
diff --git a/app/vmalert/config/config.go b/app/vmalert/config/config.go
index 0fc201ef90..7719974685 100644
--- a/app/vmalert/config/config.go
+++ b/app/vmalert/config/config.go
@@ -105,14 +105,16 @@ func (g *Group) Validate(validateTplFn ValidateTplFn, validateExpressions bool)
 // Rule describes entity that represent either
 // recording rule or alerting rule.
 type Rule struct {
-	ID          uint64
-	Record      string              `yaml:"record,omitempty"`
-	Alert       string              `yaml:"alert,omitempty"`
-	Expr        string              `yaml:"expr"`
-	For         *promutils.Duration `yaml:"for,omitempty"`
-	Labels      map[string]string   `yaml:"labels,omitempty"`
-	Annotations map[string]string   `yaml:"annotations,omitempty"`
-	Debug       bool                `yaml:"debug,omitempty"`
+	ID     uint64
+	Record string              `yaml:"record,omitempty"`
+	Alert  string              `yaml:"alert,omitempty"`
+	Expr   string              `yaml:"expr"`
+	For    *promutils.Duration `yaml:"for,omitempty"`
+	// Alert will continue firing for this long even when the alerting expression no longer has results.
+	KeepFiringFor *promutils.Duration `yaml:"keep_firing_for,omitempty"`
+	Labels        map[string]string   `yaml:"labels,omitempty"`
+	Annotations   map[string]string   `yaml:"annotations,omitempty"`
+	Debug         bool                `yaml:"debug,omitempty"`
 	// UpdateEntriesLimit defines max number of rule's state updates stored in memory.
 	// Overrides `-rule.updateEntriesLimit`.
 	UpdateEntriesLimit *int `yaml:"update_entries_limit,omitempty"`
diff --git a/app/vmalert/config/config_test.go b/app/vmalert/config/config_test.go
index 90df89d8b4..b548f55f13 100644
--- a/app/vmalert/config/config_test.go
+++ b/app/vmalert/config/config_test.go
@@ -404,7 +404,7 @@ func TestHashRule(t *testing.T) {
 			true,
 		},
 		{
-			Rule{Alert: "alert", Expr: "up == 1", For: promutils.NewDuration(time.Minute)},
+			Rule{Alert: "alert", Expr: "up == 1", For: promutils.NewDuration(time.Minute), KeepFiringFor: promutils.NewDuration(time.Minute)},
 			Rule{Alert: "alert", Expr: "up == 1"},
 			true,
 		},
diff --git a/app/vmalert/group_test.go b/app/vmalert/group_test.go
index 29199a4bd8..c166b61b49 100644
--- a/app/vmalert/group_test.go
+++ b/app/vmalert/group_test.go
@@ -46,18 +46,36 @@ func TestUpdateWith(t *testing.T) {
 					"summary":     "{{ $value|humanize }}",
 					"description": "{{$labels}}",
 				},
-			}},
-			[]config.Rule{{
-				Alert: "foo",
-				Expr:  "up > 10",
-				For:   promutils.NewDuration(time.Second),
-				Labels: map[string]string{
-					"baz": "bar",
+			},
+				{
+					Alert: "bar",
+					Expr:  "up > 0",
+					For:   promutils.NewDuration(time.Second),
+					Labels: map[string]string{
+						"bar": "baz",
+					},
+				}},
+			[]config.Rule{
+				{
+					Alert: "foo",
+					Expr:  "up > 10",
+					For:   promutils.NewDuration(time.Second),
+					Labels: map[string]string{
+						"baz": "bar",
+					},
+					Annotations: map[string]string{
+						"summary": "none",
+					},
 				},
-				Annotations: map[string]string{
-					"summary": "none",
-				},
-			}},
+				{
+					Alert:         "bar",
+					Expr:          "up > 0",
+					For:           promutils.NewDuration(2 * time.Second),
+					KeepFiringFor: promutils.NewDuration(time.Minute),
+					Labels: map[string]string{
+						"bar": "baz",
+					},
+				}},
 		},
 		{
 			"update recording rule",
diff --git a/app/vmalert/helpers_test.go b/app/vmalert/helpers_test.go
index de0db34620..95e7c9f444 100644
--- a/app/vmalert/helpers_test.go
+++ b/app/vmalert/helpers_test.go
@@ -272,6 +272,9 @@ func compareAlertingRules(t *testing.T, a, b *AlertingRule) error {
 	if a.For != b.For {
 		return fmt.Errorf("expected to have for %q; got %q", a.For, b.For)
 	}
+	if a.KeepFiringFor != b.KeepFiringFor {
+		return fmt.Errorf("expected to have KeepFiringFor %q; got %q", a.KeepFiringFor, b.KeepFiringFor)
+	}
 	if !reflect.DeepEqual(a.Annotations, b.Annotations) {
 		return fmt.Errorf("expected to have annotations %#v; got %#v", a.Annotations, b.Annotations)
 	}
diff --git a/app/vmalert/notifier/alert.go b/app/vmalert/notifier/alert.go
index 9be76aeedd..3bdd3a8f85 100644
--- a/app/vmalert/notifier/alert.go
+++ b/app/vmalert/notifier/alert.go
@@ -39,6 +39,8 @@ type Alert struct {
 	ResolvedAt time.Time
 	// LastSent defines the moment when Alert was sent last time
 	LastSent time.Time
+	// KeepFiringSince defines the moment when StateFiring was kept because of `keep_firing_for` instead of real alert
+	KeepFiringSince time.Time
 	// Value stores the value returned from evaluating expression from Expr field
 	Value float64
 	// ID is the unique identifier for the Alert
diff --git a/app/vmalert/web.qtpl b/app/vmalert/web.qtpl
index 1b412c7c30..2cbf0a2ce7 100644
--- a/app/vmalert/web.qtpl
+++ b/app/vmalert/web.qtpl
@@ -116,7 +116,11 @@ btn-primary
                                     <div class="row">
                                         <div class="col-12 mb-2">
                                             {% if r.Type == "alerting" %}
+                                            {% if r.KeepFiringFor > 0 %}
+                                            <b>alert:</b> {%s r.Name %} (for: {%v r.Duration %} seconds, keep_firing_for: {%v r.KeepFiringFor %} seconds)
+                                            {% else %}
                                             <b>alert:</b> {%s r.Name %} (for: {%v r.Duration %} seconds)
+                                            {% endif %}
                                             {% else %}
                                             <b>record:</b> {%s r.Name %}
                                             {% endif %}
@@ -225,6 +229,7 @@ btn-primary
                                 <td>
                                     {%s ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00") %}
                                     {% if ar.Restored %}{%= badgeRestored() %}{% endif %}
+                                    {% if ar.Stabilizing %}{%= badgeStabilizing() %}{% endif %}
                                 </td>
                                 <td>{%s ar.Value %}</td>
                                 <td>
@@ -442,6 +447,18 @@ btn-primary
         </div>
       </div>
     </div>
+    {% if rule.KeepFiringFor > 0 %}
+    <div class="container border-bottom p-2">
+      <div class="row">
+        <div class="col-2">
+          Keep firing for
+        </div>
+        <div class="col">
+         {%v rule.KeepFiringFor %} seconds
+        </div>
+      </div>
+    </div>
+    {% endif %}
     {% endif %}
     <div class="container border-bottom p-2">
       <div class="row">
@@ -561,6 +578,10 @@ btn-primary
 <span class="badge bg-warning text-dark" title="Alert state was restored after the service restart from remote storage">restored</span>
 {% endfunc %}
 
+{% func badgeStabilizing() %}
+<span class="badge bg-warning text-dark" title="This firing state is kept because of `keep_firing_for`">stabilizing</span>
+{% endfunc %}
+
 {% func seriesFetchedWarn(r APIRule) %}
 {% if isNoMatch(r) %}
 <svg xmlns="http://www.w3.org/2000/svg"
diff --git a/app/vmalert/web.qtpl.go b/app/vmalert/web.qtpl.go
index 97999a74f8..0fa83c873c 100644
--- a/app/vmalert/web.qtpl.go
+++ b/app/vmalert/web.qtpl.go
@@ -438,251 +438,279 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, originGroups [
 				if r.Type == "alerting" {
 //line app/vmalert/web.qtpl:118
 					qw422016.N().S(`
-                                            <b>alert:</b> `)
-//line app/vmalert/web.qtpl:119
-					qw422016.E().S(r.Name)
-//line app/vmalert/web.qtpl:119
-					qw422016.N().S(` (for: `)
-//line app/vmalert/web.qtpl:119
-					qw422016.E().V(r.Duration)
-//line app/vmalert/web.qtpl:119
-					qw422016.N().S(` seconds)
                                             `)
+//line app/vmalert/web.qtpl:119
+					if r.KeepFiringFor > 0 {
+//line app/vmalert/web.qtpl:119
+						qw422016.N().S(`
+                                            <b>alert:</b> `)
 //line app/vmalert/web.qtpl:120
+						qw422016.E().S(r.Name)
+//line app/vmalert/web.qtpl:120
+						qw422016.N().S(` (for: `)
+//line app/vmalert/web.qtpl:120
+						qw422016.E().V(r.Duration)
+//line app/vmalert/web.qtpl:120
+						qw422016.N().S(` seconds, keep_firing_for: `)
+//line app/vmalert/web.qtpl:120
+						qw422016.E().V(r.KeepFiringFor)
+//line app/vmalert/web.qtpl:120
+						qw422016.N().S(` seconds)
+                                            `)
+//line app/vmalert/web.qtpl:121
+					} else {
+//line app/vmalert/web.qtpl:121
+						qw422016.N().S(`
+                                            <b>alert:</b> `)
+//line app/vmalert/web.qtpl:122
+						qw422016.E().S(r.Name)
+//line app/vmalert/web.qtpl:122
+						qw422016.N().S(` (for: `)
+//line app/vmalert/web.qtpl:122
+						qw422016.E().V(r.Duration)
+//line app/vmalert/web.qtpl:122
+						qw422016.N().S(` seconds)
+                                            `)
+//line app/vmalert/web.qtpl:123
+					}
+//line app/vmalert/web.qtpl:123
+					qw422016.N().S(`
+                                            `)
+//line app/vmalert/web.qtpl:124
 				} else {
-//line app/vmalert/web.qtpl:120
+//line app/vmalert/web.qtpl:124
 					qw422016.N().S(`
                                             <b>record:</b> `)
-//line app/vmalert/web.qtpl:121
+//line app/vmalert/web.qtpl:125
 					qw422016.E().S(r.Name)
-//line app/vmalert/web.qtpl:121
+//line app/vmalert/web.qtpl:125
 					qw422016.N().S(`
                                             `)
-//line app/vmalert/web.qtpl:122
+//line app/vmalert/web.qtpl:126
 				}
-//line app/vmalert/web.qtpl:122
+//line app/vmalert/web.qtpl:126
 				qw422016.N().S(`
                                             |
                                             `)
-//line app/vmalert/web.qtpl:124
+//line app/vmalert/web.qtpl:128
 				streamseriesFetchedWarn(qw422016, r)
-//line app/vmalert/web.qtpl:124
+//line app/vmalert/web.qtpl:128
 				qw422016.N().S(`
                                             <span><a target="_blank" href="`)
-//line app/vmalert/web.qtpl:125
+//line app/vmalert/web.qtpl:129
 				qw422016.E().S(prefix + r.WebLink())
-//line app/vmalert/web.qtpl:125
+//line app/vmalert/web.qtpl:129
 				qw422016.N().S(`">Details</a></span>
                                         </div>
                                         <div class="col-12">
                                             <code><pre>`)
-//line app/vmalert/web.qtpl:128
+//line app/vmalert/web.qtpl:132
 				qw422016.E().S(r.Query)
-//line app/vmalert/web.qtpl:128
+//line app/vmalert/web.qtpl:132
 				qw422016.N().S(`</pre></code>
                                         </div>
                                         <div class="col-12 mb-2">
                                             `)
-//line app/vmalert/web.qtpl:131
+//line app/vmalert/web.qtpl:135
 				if len(r.Labels) > 0 {
-//line app/vmalert/web.qtpl:131
+//line app/vmalert/web.qtpl:135
 					qw422016.N().S(` <b>Labels:</b>`)
-//line app/vmalert/web.qtpl:131
+//line app/vmalert/web.qtpl:135
 				}
-//line app/vmalert/web.qtpl:131
+//line app/vmalert/web.qtpl:135
 				qw422016.N().S(`
                                             `)
-//line app/vmalert/web.qtpl:132
+//line app/vmalert/web.qtpl:136
 				for k, v := range r.Labels {
-//line app/vmalert/web.qtpl:132
+//line app/vmalert/web.qtpl:136
 					qw422016.N().S(`
                                                     <span class="ms-1 badge bg-primary">`)
-//line app/vmalert/web.qtpl:133
+//line app/vmalert/web.qtpl:137
 					qw422016.E().S(k)
-//line app/vmalert/web.qtpl:133
+//line app/vmalert/web.qtpl:137
 					qw422016.N().S(`=`)
-//line app/vmalert/web.qtpl:133
+//line app/vmalert/web.qtpl:137
 					qw422016.E().S(v)
-//line app/vmalert/web.qtpl:133
+//line app/vmalert/web.qtpl:137
 					qw422016.N().S(`</span>
                                             `)
-//line app/vmalert/web.qtpl:134
+//line app/vmalert/web.qtpl:138
 				}
-//line app/vmalert/web.qtpl:134
+//line app/vmalert/web.qtpl:138
 				qw422016.N().S(`
                                         </div>
                                         `)
-//line app/vmalert/web.qtpl:136
+//line app/vmalert/web.qtpl:140
 				if r.LastError != "" {
-//line app/vmalert/web.qtpl:136
+//line app/vmalert/web.qtpl:140
 					qw422016.N().S(`
                                         <div class="col-12">
                                             <b>Error:</b>
                                             <div class="error-cell">
                                             `)
-//line app/vmalert/web.qtpl:140
+//line app/vmalert/web.qtpl:144
 					qw422016.E().S(r.LastError)
-//line app/vmalert/web.qtpl:140
+//line app/vmalert/web.qtpl:144
 					qw422016.N().S(`
                                             </div>
                                         </div>
                                         `)
-//line app/vmalert/web.qtpl:143
+//line app/vmalert/web.qtpl:147
 				}
-//line app/vmalert/web.qtpl:143
+//line app/vmalert/web.qtpl:147
 				qw422016.N().S(`
                                     </div>
                                 </td>
                                 <td class="text-center">`)
-//line app/vmalert/web.qtpl:146
+//line app/vmalert/web.qtpl:150
 				qw422016.N().D(r.LastSamples)
-//line app/vmalert/web.qtpl:146
+//line app/vmalert/web.qtpl:150
 				qw422016.N().S(`</td>
                                 <td class="text-center">`)
-//line app/vmalert/web.qtpl:147
+//line app/vmalert/web.qtpl:151
 				qw422016.N().FPrec(time.Since(r.LastEvaluation).Seconds(), 3)
-//line app/vmalert/web.qtpl:147
+//line app/vmalert/web.qtpl:151
 				qw422016.N().S(`s ago</td>
                             </tr>
                         `)
-//line app/vmalert/web.qtpl:149
+//line app/vmalert/web.qtpl:153
 			}
-//line app/vmalert/web.qtpl:149
+//line app/vmalert/web.qtpl:153
 			qw422016.N().S(`
                      </tbody>
                     </table>
                 </div>
             `)
-//line app/vmalert/web.qtpl:153
+//line app/vmalert/web.qtpl:157
 		}
-//line app/vmalert/web.qtpl:153
+//line app/vmalert/web.qtpl:157
 		qw422016.N().S(`
         `)
-//line app/vmalert/web.qtpl:154
+//line app/vmalert/web.qtpl:158
 	} else {
-//line app/vmalert/web.qtpl:154
+//line app/vmalert/web.qtpl:158
 		qw422016.N().S(`
             <div>
                 <p>No groups...</p>
             </div>
         `)
-//line app/vmalert/web.qtpl:158
+//line app/vmalert/web.qtpl:162
 	}
-//line app/vmalert/web.qtpl:158
+//line app/vmalert/web.qtpl:162
 	qw422016.N().S(`
 
     `)
-//line app/vmalert/web.qtpl:160
+//line app/vmalert/web.qtpl:164
 	tpl.StreamFooter(qw422016, r)
-//line app/vmalert/web.qtpl:160
+//line app/vmalert/web.qtpl:164
 	qw422016.N().S(`
 
 `)
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 }
 
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 func WriteListGroups(qq422016 qtio422016.Writer, r *http.Request, originGroups []APIGroup) {
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 	qw422016 := qt422016.AcquireWriter(qq422016)
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 	StreamListGroups(qw422016, r, originGroups)
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 	qt422016.ReleaseWriter(qw422016)
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 }
 
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 func ListGroups(r *http.Request, originGroups []APIGroup) string {
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 	qb422016 := qt422016.AcquireByteBuffer()
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 	WriteListGroups(qb422016, r, originGroups)
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 	qs422016 := string(qb422016.B)
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 	qt422016.ReleaseByteBuffer(qb422016)
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 	return qs422016
-//line app/vmalert/web.qtpl:162
+//line app/vmalert/web.qtpl:166
 }
 
-//line app/vmalert/web.qtpl:165
+//line app/vmalert/web.qtpl:169
 func StreamListAlerts(qw422016 *qt422016.Writer, r *http.Request, groupAlerts []GroupAlerts) {
-//line app/vmalert/web.qtpl:165
+//line app/vmalert/web.qtpl:169
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:166
+//line app/vmalert/web.qtpl:170
 	prefix := utils.Prefix(r.URL.Path)
 
-//line app/vmalert/web.qtpl:166
+//line app/vmalert/web.qtpl:170
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:167
+//line app/vmalert/web.qtpl:171
 	tpl.StreamHeader(qw422016, r, navItems, "Alerts", configError())
-//line app/vmalert/web.qtpl:167
+//line app/vmalert/web.qtpl:171
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:168
+//line app/vmalert/web.qtpl:172
 	if len(groupAlerts) > 0 {
-//line app/vmalert/web.qtpl:168
+//line app/vmalert/web.qtpl:172
 		qw422016.N().S(`
          <a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
          <a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
          `)
-//line app/vmalert/web.qtpl:171
+//line app/vmalert/web.qtpl:175
 		for _, ga := range groupAlerts {
-//line app/vmalert/web.qtpl:171
+//line app/vmalert/web.qtpl:175
 			qw422016.N().S(`
             `)
-//line app/vmalert/web.qtpl:172
+//line app/vmalert/web.qtpl:176
 			g := ga.Group
 
-//line app/vmalert/web.qtpl:172
+//line app/vmalert/web.qtpl:176
 			qw422016.N().S(`
             <div class="group-heading alert-danger" data-bs-target="rules-`)
-//line app/vmalert/web.qtpl:173
+//line app/vmalert/web.qtpl:177
 			qw422016.E().S(g.ID)
-//line app/vmalert/web.qtpl:173
+//line app/vmalert/web.qtpl:177
 			qw422016.N().S(`">
                 <span class="anchor" id="group-`)
-//line app/vmalert/web.qtpl:174
+//line app/vmalert/web.qtpl:178
 			qw422016.E().S(g.ID)
-//line app/vmalert/web.qtpl:174
+//line app/vmalert/web.qtpl:178
 			qw422016.N().S(`"></span>
                 <a href="#group-`)
-//line app/vmalert/web.qtpl:175
+//line app/vmalert/web.qtpl:179
 			qw422016.E().S(g.ID)
-//line app/vmalert/web.qtpl:175
+//line app/vmalert/web.qtpl:179
 			qw422016.N().S(`">`)
-//line app/vmalert/web.qtpl:175
+//line app/vmalert/web.qtpl:179
 			qw422016.E().S(g.Name)
-//line app/vmalert/web.qtpl:175
+//line app/vmalert/web.qtpl:179
 			if g.Type != "prometheus" {
-//line app/vmalert/web.qtpl:175
+//line app/vmalert/web.qtpl:179
 				qw422016.N().S(` (`)
-//line app/vmalert/web.qtpl:175
+//line app/vmalert/web.qtpl:179
 				qw422016.E().S(g.Type)
-//line app/vmalert/web.qtpl:175
+//line app/vmalert/web.qtpl:179
 				qw422016.N().S(`)`)
-//line app/vmalert/web.qtpl:175
+//line app/vmalert/web.qtpl:179
 			}
-//line app/vmalert/web.qtpl:175
+//line app/vmalert/web.qtpl:179
 			qw422016.N().S(`</a>
                 <span class="badge bg-danger" title="Number of active alerts">`)
-//line app/vmalert/web.qtpl:176
+//line app/vmalert/web.qtpl:180
 			qw422016.N().D(len(ga.Alerts))
-//line app/vmalert/web.qtpl:176
+//line app/vmalert/web.qtpl:180
 			qw422016.N().S(`</span>
                 <br>
                 <p class="fs-6 fw-lighter">`)
-//line app/vmalert/web.qtpl:178
+//line app/vmalert/web.qtpl:182
 			qw422016.E().S(g.File)
-//line app/vmalert/web.qtpl:178
+//line app/vmalert/web.qtpl:182
 			qw422016.N().S(`</p>
             </div>
             `)
-//line app/vmalert/web.qtpl:181
+//line app/vmalert/web.qtpl:185
 			var keys []string
 			alertsByRule := make(map[string][]*APIAlert)
 			for _, alert := range ga.Alerts {
@@ -693,20 +721,20 @@ func StreamListAlerts(qw422016 *qt422016.Writer, r *http.Request, groupAlerts []
 			}
 			sort.Strings(keys)
 
-//line app/vmalert/web.qtpl:190
+//line app/vmalert/web.qtpl:194
 			qw422016.N().S(`
             <div class="collapse" id="rules-`)
-//line app/vmalert/web.qtpl:191
+//line app/vmalert/web.qtpl:195
 			qw422016.E().S(g.ID)
-//line app/vmalert/web.qtpl:191
+//line app/vmalert/web.qtpl:195
 			qw422016.N().S(`">
                 `)
-//line app/vmalert/web.qtpl:192
+//line app/vmalert/web.qtpl:196
 			for _, ruleID := range keys {
-//line app/vmalert/web.qtpl:192
+//line app/vmalert/web.qtpl:196
 				qw422016.N().S(`
                     `)
-//line app/vmalert/web.qtpl:194
+//line app/vmalert/web.qtpl:198
 				defaultAR := alertsByRule[ruleID][0]
 				var labelKeys []string
 				for k := range defaultAR.Labels {
@@ -714,28 +742,28 @@ func StreamListAlerts(qw422016 *qt422016.Writer, r *http.Request, groupAlerts []
 				}
 				sort.Strings(labelKeys)
 
-//line app/vmalert/web.qtpl:200
+//line app/vmalert/web.qtpl:204
 				qw422016.N().S(`
                     <br>
                     <b>alert:</b> `)
-//line app/vmalert/web.qtpl:202
+//line app/vmalert/web.qtpl:206
 				qw422016.E().S(defaultAR.Name)
-//line app/vmalert/web.qtpl:202
+//line app/vmalert/web.qtpl:206
 				qw422016.N().S(` (`)
-//line app/vmalert/web.qtpl:202
+//line app/vmalert/web.qtpl:206
 				qw422016.N().D(len(alertsByRule[ruleID]))
-//line app/vmalert/web.qtpl:202
+//line app/vmalert/web.qtpl:206
 				qw422016.N().S(`)
                      | <span><a target="_blank" href="`)
-//line app/vmalert/web.qtpl:203
+//line app/vmalert/web.qtpl:207
 				qw422016.E().S(defaultAR.SourceLink)
-//line app/vmalert/web.qtpl:203
+//line app/vmalert/web.qtpl:207
 				qw422016.N().S(`">Source</a></span>
                     <br>
                     <b>expr:</b><code><pre>`)
-//line app/vmalert/web.qtpl:205
+//line app/vmalert/web.qtpl:209
 				qw422016.E().S(defaultAR.Expression)
-//line app/vmalert/web.qtpl:205
+//line app/vmalert/web.qtpl:209
 				qw422016.N().S(`</pre></code>
                     <table class="table table-striped table-hover table-sm">
                         <thead>
@@ -749,204 +777,213 @@ func StreamListAlerts(qw422016 *qt422016.Writer, r *http.Request, groupAlerts []
                         </thead>
                         <tbody>
                         `)
-//line app/vmalert/web.qtpl:217
+//line app/vmalert/web.qtpl:221
 				for _, ar := range alertsByRule[ruleID] {
-//line app/vmalert/web.qtpl:217
+//line app/vmalert/web.qtpl:221
 					qw422016.N().S(`
                             <tr>
                                 <td>
                                     `)
-//line app/vmalert/web.qtpl:220
+//line app/vmalert/web.qtpl:224
 					for _, k := range labelKeys {
-//line app/vmalert/web.qtpl:220
+//line app/vmalert/web.qtpl:224
 						qw422016.N().S(`
                                         <span class="ms-1 badge bg-primary">`)
-//line app/vmalert/web.qtpl:221
+//line app/vmalert/web.qtpl:225
 						qw422016.E().S(k)
-//line app/vmalert/web.qtpl:221
+//line app/vmalert/web.qtpl:225
 						qw422016.N().S(`=`)
-//line app/vmalert/web.qtpl:221
+//line app/vmalert/web.qtpl:225
 						qw422016.E().S(ar.Labels[k])
-//line app/vmalert/web.qtpl:221
+//line app/vmalert/web.qtpl:225
 						qw422016.N().S(`</span>
                                     `)
-//line app/vmalert/web.qtpl:222
+//line app/vmalert/web.qtpl:226
 					}
-//line app/vmalert/web.qtpl:222
+//line app/vmalert/web.qtpl:226
 					qw422016.N().S(`
                                 </td>
                                 <td>`)
-//line app/vmalert/web.qtpl:224
+//line app/vmalert/web.qtpl:228
 					streambadgeState(qw422016, ar.State)
-//line app/vmalert/web.qtpl:224
+//line app/vmalert/web.qtpl:228
 					qw422016.N().S(`</td>
                                 <td>
                                     `)
-//line app/vmalert/web.qtpl:226
+//line app/vmalert/web.qtpl:230
 					qw422016.E().S(ar.ActiveAt.Format("2006-01-02T15:04:05Z07:00"))
-//line app/vmalert/web.qtpl:226
+//line app/vmalert/web.qtpl:230
 					qw422016.N().S(`
                                     `)
-//line app/vmalert/web.qtpl:227
+//line app/vmalert/web.qtpl:231
 					if ar.Restored {
-//line app/vmalert/web.qtpl:227
+//line app/vmalert/web.qtpl:231
 						streambadgeRestored(qw422016)
-//line app/vmalert/web.qtpl:227
+//line app/vmalert/web.qtpl:231
 					}
-//line app/vmalert/web.qtpl:227
+//line app/vmalert/web.qtpl:231
+					qw422016.N().S(`
+                                    `)
+//line app/vmalert/web.qtpl:232
+					if ar.Stabilizing {
+//line app/vmalert/web.qtpl:232
+						streambadgeStabilizing(qw422016)
+//line app/vmalert/web.qtpl:232
+					}
+//line app/vmalert/web.qtpl:232
 					qw422016.N().S(`
                                 </td>
                                 <td>`)
-//line app/vmalert/web.qtpl:229
+//line app/vmalert/web.qtpl:234
 					qw422016.E().S(ar.Value)
-//line app/vmalert/web.qtpl:229
+//line app/vmalert/web.qtpl:234
 					qw422016.N().S(`</td>
                                 <td>
                                     <a href="`)
-//line app/vmalert/web.qtpl:231
+//line app/vmalert/web.qtpl:236
 					qw422016.E().S(prefix + ar.WebLink())
-//line app/vmalert/web.qtpl:231
+//line app/vmalert/web.qtpl:236
 					qw422016.N().S(`">Details</a>
                                 </td>
                             </tr>
                         `)
-//line app/vmalert/web.qtpl:234
+//line app/vmalert/web.qtpl:239
 				}
-//line app/vmalert/web.qtpl:234
+//line app/vmalert/web.qtpl:239
 				qw422016.N().S(`
                      </tbody>
                     </table>
                 `)
-//line app/vmalert/web.qtpl:237
+//line app/vmalert/web.qtpl:242
 			}
-//line app/vmalert/web.qtpl:237
+//line app/vmalert/web.qtpl:242
 			qw422016.N().S(`
             </div>
             <br>
         `)
-//line app/vmalert/web.qtpl:240
+//line app/vmalert/web.qtpl:245
 		}
-//line app/vmalert/web.qtpl:240
+//line app/vmalert/web.qtpl:245
 		qw422016.N().S(`
 
     `)
-//line app/vmalert/web.qtpl:242
+//line app/vmalert/web.qtpl:247
 	} else {
-//line app/vmalert/web.qtpl:242
+//line app/vmalert/web.qtpl:247
 		qw422016.N().S(`
         <div>
             <p>No active alerts...</p>
         </div>
     `)
-//line app/vmalert/web.qtpl:246
+//line app/vmalert/web.qtpl:251
 	}
-//line app/vmalert/web.qtpl:246
+//line app/vmalert/web.qtpl:251
 	qw422016.N().S(`
 
     `)
-//line app/vmalert/web.qtpl:248
+//line app/vmalert/web.qtpl:253
 	tpl.StreamFooter(qw422016, r)
-//line app/vmalert/web.qtpl:248
+//line app/vmalert/web.qtpl:253
 	qw422016.N().S(`
 
 `)
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 }
 
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 func WriteListAlerts(qq422016 qtio422016.Writer, r *http.Request, groupAlerts []GroupAlerts) {
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 	qw422016 := qt422016.AcquireWriter(qq422016)
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 	StreamListAlerts(qw422016, r, groupAlerts)
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 	qt422016.ReleaseWriter(qw422016)
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 }
 
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 func ListAlerts(r *http.Request, groupAlerts []GroupAlerts) string {
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 	qb422016 := qt422016.AcquireByteBuffer()
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 	WriteListAlerts(qb422016, r, groupAlerts)
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 	qs422016 := string(qb422016.B)
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 	qt422016.ReleaseByteBuffer(qb422016)
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 	return qs422016
-//line app/vmalert/web.qtpl:250
+//line app/vmalert/web.qtpl:255
 }
 
-//line app/vmalert/web.qtpl:252
+//line app/vmalert/web.qtpl:257
 func StreamListTargets(qw422016 *qt422016.Writer, r *http.Request, targets map[notifier.TargetType][]notifier.Target) {
-//line app/vmalert/web.qtpl:252
+//line app/vmalert/web.qtpl:257
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:253
+//line app/vmalert/web.qtpl:258
 	tpl.StreamHeader(qw422016, r, navItems, "Notifiers", configError())
-//line app/vmalert/web.qtpl:253
+//line app/vmalert/web.qtpl:258
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:254
+//line app/vmalert/web.qtpl:259
 	if len(targets) > 0 {
-//line app/vmalert/web.qtpl:254
+//line app/vmalert/web.qtpl:259
 		qw422016.N().S(`
          <a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
          <a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
 
          `)
-//line app/vmalert/web.qtpl:259
+//line app/vmalert/web.qtpl:264
 		var keys []string
 		for key := range targets {
 			keys = append(keys, string(key))
 		}
 		sort.Strings(keys)
 
-//line app/vmalert/web.qtpl:264
+//line app/vmalert/web.qtpl:269
 		qw422016.N().S(`
 
          `)
-//line app/vmalert/web.qtpl:266
+//line app/vmalert/web.qtpl:271
 		for i := range keys {
-//line app/vmalert/web.qtpl:266
+//line app/vmalert/web.qtpl:271
 			qw422016.N().S(`
            `)
-//line app/vmalert/web.qtpl:267
+//line app/vmalert/web.qtpl:272
 			typeK, ns := keys[i], targets[notifier.TargetType(keys[i])]
 			count := len(ns)
 
-//line app/vmalert/web.qtpl:269
+//line app/vmalert/web.qtpl:274
 			qw422016.N().S(`
            <div class="group-heading" data-bs-target="notifiers-`)
-//line app/vmalert/web.qtpl:270
+//line app/vmalert/web.qtpl:275
 			qw422016.E().S(typeK)
-//line app/vmalert/web.qtpl:270
+//line app/vmalert/web.qtpl:275
 			qw422016.N().S(`">
              <span class="anchor" id="group-`)
-//line app/vmalert/web.qtpl:271
+//line app/vmalert/web.qtpl:276
 			qw422016.E().S(typeK)
-//line app/vmalert/web.qtpl:271
+//line app/vmalert/web.qtpl:276
 			qw422016.N().S(`"></span>
              <a href="#group-`)
-//line app/vmalert/web.qtpl:272
+//line app/vmalert/web.qtpl:277
 			qw422016.E().S(typeK)
-//line app/vmalert/web.qtpl:272
+//line app/vmalert/web.qtpl:277
 			qw422016.N().S(`">`)
-//line app/vmalert/web.qtpl:272
+//line app/vmalert/web.qtpl:277
 			qw422016.E().S(typeK)
-//line app/vmalert/web.qtpl:272
+//line app/vmalert/web.qtpl:277
 			qw422016.N().S(` (`)
-//line app/vmalert/web.qtpl:272
+//line app/vmalert/web.qtpl:277
 			qw422016.N().D(count)
-//line app/vmalert/web.qtpl:272
+//line app/vmalert/web.qtpl:277
 			qw422016.N().S(`)</a>
          </div>
          <div class="collapse show" id="notifiers-`)
-//line app/vmalert/web.qtpl:274
+//line app/vmalert/web.qtpl:279
 			qw422016.E().S(typeK)
-//line app/vmalert/web.qtpl:274
+//line app/vmalert/web.qtpl:279
 			qw422016.N().S(`">
              <table class="table table-striped table-hover table-sm">
                  <thead>
@@ -957,119 +994,119 @@ func StreamListTargets(qw422016 *qt422016.Writer, r *http.Request, targets map[n
                  </thead>
                  <tbody>
                  `)
-//line app/vmalert/web.qtpl:283
+//line app/vmalert/web.qtpl:288
 			for _, n := range ns {
-//line app/vmalert/web.qtpl:283
+//line app/vmalert/web.qtpl:288
 				qw422016.N().S(`
                      <tr>
                          <td>
                               `)
-//line app/vmalert/web.qtpl:286
+//line app/vmalert/web.qtpl:291
 				for _, l := range n.Labels.GetLabels() {
-//line app/vmalert/web.qtpl:286
+//line app/vmalert/web.qtpl:291
 					qw422016.N().S(`
                                       <span class="ms-1 badge bg-primary">`)
-//line app/vmalert/web.qtpl:287
+//line app/vmalert/web.qtpl:292
 					qw422016.E().S(l.Name)
-//line app/vmalert/web.qtpl:287
+//line app/vmalert/web.qtpl:292
 					qw422016.N().S(`=`)
-//line app/vmalert/web.qtpl:287
+//line app/vmalert/web.qtpl:292
 					qw422016.E().S(l.Value)
-//line app/vmalert/web.qtpl:287
+//line app/vmalert/web.qtpl:292
 					qw422016.N().S(`</span>
                               `)
-//line app/vmalert/web.qtpl:288
+//line app/vmalert/web.qtpl:293
 				}
-//line app/vmalert/web.qtpl:288
+//line app/vmalert/web.qtpl:293
 				qw422016.N().S(`
                           </td>
                          <td>`)
-//line app/vmalert/web.qtpl:290
+//line app/vmalert/web.qtpl:295
 				qw422016.E().S(n.Notifier.Addr())
-//line app/vmalert/web.qtpl:290
+//line app/vmalert/web.qtpl:295
 				qw422016.N().S(`</td>
                      </tr>
                  `)
-//line app/vmalert/web.qtpl:292
+//line app/vmalert/web.qtpl:297
 			}
-//line app/vmalert/web.qtpl:292
+//line app/vmalert/web.qtpl:297
 			qw422016.N().S(`
               </tbody>
              </table>
          </div>
      `)
-//line app/vmalert/web.qtpl:296
+//line app/vmalert/web.qtpl:301
 		}
-//line app/vmalert/web.qtpl:296
+//line app/vmalert/web.qtpl:301
 		qw422016.N().S(`
 
     `)
-//line app/vmalert/web.qtpl:298
+//line app/vmalert/web.qtpl:303
 	} else {
-//line app/vmalert/web.qtpl:298
+//line app/vmalert/web.qtpl:303
 		qw422016.N().S(`
         <div>
             <p>No targets...</p>
         </div>
     `)
-//line app/vmalert/web.qtpl:302
+//line app/vmalert/web.qtpl:307
 	}
-//line app/vmalert/web.qtpl:302
+//line app/vmalert/web.qtpl:307
 	qw422016.N().S(`
 
     `)
-//line app/vmalert/web.qtpl:304
+//line app/vmalert/web.qtpl:309
 	tpl.StreamFooter(qw422016, r)
-//line app/vmalert/web.qtpl:304
+//line app/vmalert/web.qtpl:309
 	qw422016.N().S(`
 
 `)
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 }
 
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 func WriteListTargets(qq422016 qtio422016.Writer, r *http.Request, targets map[notifier.TargetType][]notifier.Target) {
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 	qw422016 := qt422016.AcquireWriter(qq422016)
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 	StreamListTargets(qw422016, r, targets)
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 	qt422016.ReleaseWriter(qw422016)
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 }
 
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 func ListTargets(r *http.Request, targets map[notifier.TargetType][]notifier.Target) string {
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 	qb422016 := qt422016.AcquireByteBuffer()
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 	WriteListTargets(qb422016, r, targets)
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 	qs422016 := string(qb422016.B)
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 	qt422016.ReleaseByteBuffer(qb422016)
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 	return qs422016
-//line app/vmalert/web.qtpl:306
+//line app/vmalert/web.qtpl:311
 }
 
-//line app/vmalert/web.qtpl:308
+//line app/vmalert/web.qtpl:313
 func StreamAlert(qw422016 *qt422016.Writer, r *http.Request, alert *APIAlert) {
-//line app/vmalert/web.qtpl:308
+//line app/vmalert/web.qtpl:313
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:309
+//line app/vmalert/web.qtpl:314
 	prefix := utils.Prefix(r.URL.Path)
 
-//line app/vmalert/web.qtpl:309
+//line app/vmalert/web.qtpl:314
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:310
+//line app/vmalert/web.qtpl:315
 	tpl.StreamHeader(qw422016, r, navItems, "", configError())
-//line app/vmalert/web.qtpl:310
+//line app/vmalert/web.qtpl:315
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:312
+//line app/vmalert/web.qtpl:317
 	var labelKeys []string
 	for k := range alert.Labels {
 		labelKeys = append(labelKeys, k)
@@ -1082,28 +1119,28 @@ func StreamAlert(qw422016 *qt422016.Writer, r *http.Request, alert *APIAlert) {
 	}
 	sort.Strings(annotationKeys)
 
-//line app/vmalert/web.qtpl:323
+//line app/vmalert/web.qtpl:328
 	qw422016.N().S(`
     <div class="display-6 pb-3 mb-3">Alert: `)
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 	qw422016.E().S(alert.Name)
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 	qw422016.N().S(`<span class="ms-2 badge `)
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 	if alert.State == "firing" {
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 		qw422016.N().S(`bg-danger`)
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 	} else {
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 		qw422016.N().S(` bg-warning text-dark`)
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 	}
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 	qw422016.N().S(`">`)
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 	qw422016.E().S(alert.State)
-//line app/vmalert/web.qtpl:324
+//line app/vmalert/web.qtpl:329
 	qw422016.N().S(`</span></div>
     <div class="container border-bottom p-2">
       <div class="row">
@@ -1112,9 +1149,9 @@ func StreamAlert(qw422016 *qt422016.Writer, r *http.Request, alert *APIAlert) {
         </div>
         <div class="col">
           `)
-//line app/vmalert/web.qtpl:331
+//line app/vmalert/web.qtpl:336
 	qw422016.E().S(alert.ActiveAt.Format("2006-01-02T15:04:05Z07:00"))
-//line app/vmalert/web.qtpl:331
+//line app/vmalert/web.qtpl:336
 	qw422016.N().S(`
         </div>
       </div>
@@ -1126,9 +1163,9 @@ func StreamAlert(qw422016 *qt422016.Writer, r *http.Request, alert *APIAlert) {
         </div>
         <div class="col">
           <code><pre>`)
-//line app/vmalert/web.qtpl:341
+//line app/vmalert/web.qtpl:346
 	qw422016.E().S(alert.Expression)
-//line app/vmalert/web.qtpl:341
+//line app/vmalert/web.qtpl:346
 	qw422016.N().S(`</pre></code>
         </div>
       </div>
@@ -1140,23 +1177,23 @@ func StreamAlert(qw422016 *qt422016.Writer, r *http.Request, alert *APIAlert) {
         </div>
         <div class="col">
            `)
-//line app/vmalert/web.qtpl:351
+//line app/vmalert/web.qtpl:356
 	for _, k := range labelKeys {
-//line app/vmalert/web.qtpl:351
+//line app/vmalert/web.qtpl:356
 		qw422016.N().S(`
                 <span class="m-1 badge bg-primary">`)
-//line app/vmalert/web.qtpl:352
+//line app/vmalert/web.qtpl:357
 		qw422016.E().S(k)
-//line app/vmalert/web.qtpl:352
+//line app/vmalert/web.qtpl:357
 		qw422016.N().S(`=`)
-//line app/vmalert/web.qtpl:352
+//line app/vmalert/web.qtpl:357
 		qw422016.E().S(alert.Labels[k])
-//line app/vmalert/web.qtpl:352
+//line app/vmalert/web.qtpl:357
 		qw422016.N().S(`</span>
           `)
-//line app/vmalert/web.qtpl:353
+//line app/vmalert/web.qtpl:358
 	}
-//line app/vmalert/web.qtpl:353
+//line app/vmalert/web.qtpl:358
 	qw422016.N().S(`
         </div>
       </div>
@@ -1168,24 +1205,24 @@ func StreamAlert(qw422016 *qt422016.Writer, r *http.Request, alert *APIAlert) {
         </div>
         <div class="col">
            `)
-//line app/vmalert/web.qtpl:363
+//line app/vmalert/web.qtpl:368
 	for _, k := range annotationKeys {
-//line app/vmalert/web.qtpl:363
+//line app/vmalert/web.qtpl:368
 		qw422016.N().S(`
                 <b>`)
-//line app/vmalert/web.qtpl:364
+//line app/vmalert/web.qtpl:369
 		qw422016.E().S(k)
-//line app/vmalert/web.qtpl:364
+//line app/vmalert/web.qtpl:369
 		qw422016.N().S(`:</b><br>
                 <p>`)
-//line app/vmalert/web.qtpl:365
+//line app/vmalert/web.qtpl:370
 		qw422016.E().S(alert.Annotations[k])
-//line app/vmalert/web.qtpl:365
+//line app/vmalert/web.qtpl:370
 		qw422016.N().S(`</p>
           `)
-//line app/vmalert/web.qtpl:366
+//line app/vmalert/web.qtpl:371
 	}
-//line app/vmalert/web.qtpl:366
+//line app/vmalert/web.qtpl:371
 	qw422016.N().S(`
         </div>
       </div>
@@ -1197,17 +1234,17 @@ func StreamAlert(qw422016 *qt422016.Writer, r *http.Request, alert *APIAlert) {
         </div>
         <div class="col">
            <a target="_blank" href="`)
-//line app/vmalert/web.qtpl:376
+//line app/vmalert/web.qtpl:381
 	qw422016.E().S(prefix)
-//line app/vmalert/web.qtpl:376
+//line app/vmalert/web.qtpl:381
 	qw422016.N().S(`groups#group-`)
-//line app/vmalert/web.qtpl:376
+//line app/vmalert/web.qtpl:381
 	qw422016.E().S(alert.GroupID)
-//line app/vmalert/web.qtpl:376
+//line app/vmalert/web.qtpl:381
 	qw422016.N().S(`">`)
-//line app/vmalert/web.qtpl:376
+//line app/vmalert/web.qtpl:381
 	qw422016.E().S(alert.GroupID)
-//line app/vmalert/web.qtpl:376
+//line app/vmalert/web.qtpl:381
 	qw422016.N().S(`</a>
         </div>
       </div>
@@ -1219,66 +1256,66 @@ func StreamAlert(qw422016 *qt422016.Writer, r *http.Request, alert *APIAlert) {
         </div>
         <div class="col">
            <a target="_blank" href="`)
-//line app/vmalert/web.qtpl:386
+//line app/vmalert/web.qtpl:391
 	qw422016.E().S(alert.SourceLink)
-//line app/vmalert/web.qtpl:386
+//line app/vmalert/web.qtpl:391
 	qw422016.N().S(`">Link</a>
         </div>
       </div>
     </div>
     `)
-//line app/vmalert/web.qtpl:390
+//line app/vmalert/web.qtpl:395
 	tpl.StreamFooter(qw422016, r)
-//line app/vmalert/web.qtpl:390
+//line app/vmalert/web.qtpl:395
 	qw422016.N().S(`
 
 `)
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 }
 
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 func WriteAlert(qq422016 qtio422016.Writer, r *http.Request, alert *APIAlert) {
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 	qw422016 := qt422016.AcquireWriter(qq422016)
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 	StreamAlert(qw422016, r, alert)
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 	qt422016.ReleaseWriter(qw422016)
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 }
 
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 func Alert(r *http.Request, alert *APIAlert) string {
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 	qb422016 := qt422016.AcquireByteBuffer()
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 	WriteAlert(qb422016, r, alert)
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 	qs422016 := string(qb422016.B)
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 	qt422016.ReleaseByteBuffer(qb422016)
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 	return qs422016
-//line app/vmalert/web.qtpl:392
+//line app/vmalert/web.qtpl:397
 }
 
-//line app/vmalert/web.qtpl:395
+//line app/vmalert/web.qtpl:400
 func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule) {
-//line app/vmalert/web.qtpl:395
+//line app/vmalert/web.qtpl:400
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:396
+//line app/vmalert/web.qtpl:401
 	prefix := utils.Prefix(r.URL.Path)
 
-//line app/vmalert/web.qtpl:396
+//line app/vmalert/web.qtpl:401
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:397
+//line app/vmalert/web.qtpl:402
 	tpl.StreamHeader(qw422016, r, navItems, "", configError())
-//line app/vmalert/web.qtpl:397
+//line app/vmalert/web.qtpl:402
 	qw422016.N().S(`
     `)
-//line app/vmalert/web.qtpl:399
+//line app/vmalert/web.qtpl:404
 	var labelKeys []string
 	for k := range rule.Labels {
 		labelKeys = append(labelKeys, k)
@@ -1302,28 +1339,28 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
 		}
 	}
 
-//line app/vmalert/web.qtpl:422
+//line app/vmalert/web.qtpl:427
 	qw422016.N().S(`
     <div class="display-6 pb-3 mb-3">Rule: `)
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 	qw422016.E().S(rule.Name)
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 	qw422016.N().S(`<span class="ms-2 badge `)
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 	if rule.Health != "ok" {
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 		qw422016.N().S(`bg-danger`)
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 	} else {
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 		qw422016.N().S(` bg-success text-dark`)
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 	}
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 	qw422016.N().S(`">`)
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 	qw422016.E().S(rule.Health)
-//line app/vmalert/web.qtpl:423
+//line app/vmalert/web.qtpl:428
 	qw422016.N().S(`</span></div>
     <div class="container border-bottom p-2">
       <div class="row">
@@ -1332,17 +1369,17 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
         </div>
         <div class="col">
           <code><pre>`)
-//line app/vmalert/web.qtpl:430
+//line app/vmalert/web.qtpl:435
 	qw422016.E().S(rule.Query)
-//line app/vmalert/web.qtpl:430
+//line app/vmalert/web.qtpl:435
 	qw422016.N().S(`</pre></code>
         </div>
       </div>
     </div>
     `)
-//line app/vmalert/web.qtpl:434
+//line app/vmalert/web.qtpl:439
 	if rule.Type == "alerting" {
-//line app/vmalert/web.qtpl:434
+//line app/vmalert/web.qtpl:439
 		qw422016.N().S(`
     <div class="container border-bottom p-2">
       <div class="row">
@@ -1351,17 +1388,41 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
         </div>
         <div class="col">
          `)
-//line app/vmalert/web.qtpl:441
+//line app/vmalert/web.qtpl:446
 		qw422016.E().V(rule.Duration)
-//line app/vmalert/web.qtpl:441
+//line app/vmalert/web.qtpl:446
 		qw422016.N().S(` seconds
         </div>
       </div>
     </div>
     `)
-//line app/vmalert/web.qtpl:445
+//line app/vmalert/web.qtpl:450
+		if rule.KeepFiringFor > 0 {
+//line app/vmalert/web.qtpl:450
+			qw422016.N().S(`
+    <div class="container border-bottom p-2">
+      <div class="row">
+        <div class="col-2">
+          Keep firing for
+        </div>
+        <div class="col">
+         `)
+//line app/vmalert/web.qtpl:457
+			qw422016.E().V(rule.KeepFiringFor)
+//line app/vmalert/web.qtpl:457
+			qw422016.N().S(` seconds
+        </div>
+      </div>
+    </div>
+    `)
+//line app/vmalert/web.qtpl:461
+		}
+//line app/vmalert/web.qtpl:461
+		qw422016.N().S(`
+    `)
+//line app/vmalert/web.qtpl:462
 	}
-//line app/vmalert/web.qtpl:445
+//line app/vmalert/web.qtpl:462
 	qw422016.N().S(`
     <div class="container border-bottom p-2">
       <div class="row">
@@ -1370,31 +1431,31 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
         </div>
         <div class="col">
           `)
-//line app/vmalert/web.qtpl:452
+//line app/vmalert/web.qtpl:469
 	for _, k := range labelKeys {
-//line app/vmalert/web.qtpl:452
+//line app/vmalert/web.qtpl:469
 		qw422016.N().S(`
                 <span class="m-1 badge bg-primary">`)
-//line app/vmalert/web.qtpl:453
+//line app/vmalert/web.qtpl:470
 		qw422016.E().S(k)
-//line app/vmalert/web.qtpl:453
+//line app/vmalert/web.qtpl:470
 		qw422016.N().S(`=`)
-//line app/vmalert/web.qtpl:453
+//line app/vmalert/web.qtpl:470
 		qw422016.E().S(rule.Labels[k])
-//line app/vmalert/web.qtpl:453
+//line app/vmalert/web.qtpl:470
 		qw422016.N().S(`</span>
           `)
-//line app/vmalert/web.qtpl:454
+//line app/vmalert/web.qtpl:471
 	}
-//line app/vmalert/web.qtpl:454
+//line app/vmalert/web.qtpl:471
 	qw422016.N().S(`
         </div>
       </div>
     </div>
     `)
-//line app/vmalert/web.qtpl:458
+//line app/vmalert/web.qtpl:475
 	if rule.Type == "alerting" {
-//line app/vmalert/web.qtpl:458
+//line app/vmalert/web.qtpl:475
 		qw422016.N().S(`
     <div class="container border-bottom p-2">
       <div class="row">
@@ -1403,24 +1464,24 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
         </div>
         <div class="col">
           `)
-//line app/vmalert/web.qtpl:465
+//line app/vmalert/web.qtpl:482
 		for _, k := range annotationKeys {
-//line app/vmalert/web.qtpl:465
+//line app/vmalert/web.qtpl:482
 			qw422016.N().S(`
                 <b>`)
-//line app/vmalert/web.qtpl:466
+//line app/vmalert/web.qtpl:483
 			qw422016.E().S(k)
-//line app/vmalert/web.qtpl:466
+//line app/vmalert/web.qtpl:483
 			qw422016.N().S(`:</b><br>
                 <p>`)
-//line app/vmalert/web.qtpl:467
+//line app/vmalert/web.qtpl:484
 			qw422016.E().S(rule.Annotations[k])
-//line app/vmalert/web.qtpl:467
+//line app/vmalert/web.qtpl:484
 			qw422016.N().S(`</p>
           `)
-//line app/vmalert/web.qtpl:468
+//line app/vmalert/web.qtpl:485
 		}
-//line app/vmalert/web.qtpl:468
+//line app/vmalert/web.qtpl:485
 		qw422016.N().S(`
         </div>
       </div>
@@ -1432,17 +1493,17 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
         </div>
         <div class="col">
            `)
-//line app/vmalert/web.qtpl:478
+//line app/vmalert/web.qtpl:495
 		qw422016.E().V(rule.Debug)
-//line app/vmalert/web.qtpl:478
+//line app/vmalert/web.qtpl:495
 		qw422016.N().S(`
         </div>
       </div>
     </div>
     `)
-//line app/vmalert/web.qtpl:482
+//line app/vmalert/web.qtpl:499
 	}
-//line app/vmalert/web.qtpl:482
+//line app/vmalert/web.qtpl:499
 	qw422016.N().S(`
     <div class="container border-bottom p-2">
       <div class="row">
@@ -1451,17 +1512,17 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
         </div>
         <div class="col">
            <a target="_blank" href="`)
-//line app/vmalert/web.qtpl:489
+//line app/vmalert/web.qtpl:506
 	qw422016.E().S(prefix)
-//line app/vmalert/web.qtpl:489
+//line app/vmalert/web.qtpl:506
 	qw422016.N().S(`groups#group-`)
-//line app/vmalert/web.qtpl:489
+//line app/vmalert/web.qtpl:506
 	qw422016.E().S(rule.GroupID)
-//line app/vmalert/web.qtpl:489
+//line app/vmalert/web.qtpl:506
 	qw422016.N().S(`">`)
-//line app/vmalert/web.qtpl:489
+//line app/vmalert/web.qtpl:506
 	qw422016.E().S(rule.GroupID)
-//line app/vmalert/web.qtpl:489
+//line app/vmalert/web.qtpl:506
 	qw422016.N().S(`</a>
         </div>
       </div>
@@ -1469,9 +1530,9 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
 
     <br>
     `)
-//line app/vmalert/web.qtpl:495
+//line app/vmalert/web.qtpl:512
 	if seriesFetchedWarning {
-//line app/vmalert/web.qtpl:495
+//line app/vmalert/web.qtpl:512
 		qw422016.N().S(`
     <div class="alert alert-warning" role="alert">
        <strong>Warning:</strong> some of updates have "Series fetched" equal to 0.<br>
@@ -1485,18 +1546,18 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
        See more details about this detection <a target="_blank" href="https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4039">here</a>.
     </div>
     `)
-//line app/vmalert/web.qtpl:507
+//line app/vmalert/web.qtpl:524
 	}
-//line app/vmalert/web.qtpl:507
+//line app/vmalert/web.qtpl:524
 	qw422016.N().S(`
     <div class="display-6 pb-3">Last `)
-//line app/vmalert/web.qtpl:508
+//line app/vmalert/web.qtpl:525
 	qw422016.N().D(len(rule.Updates))
-//line app/vmalert/web.qtpl:508
+//line app/vmalert/web.qtpl:525
 	qw422016.N().S(`/`)
-//line app/vmalert/web.qtpl:508
+//line app/vmalert/web.qtpl:525
 	qw422016.N().D(rule.MaxUpdates)
-//line app/vmalert/web.qtpl:508
+//line app/vmalert/web.qtpl:525
 	qw422016.N().S(` updates</span>:</div>
         <table class="table table-striped table-hover table-sm">
             <thead>
@@ -1504,13 +1565,13 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
                     <th scope="col" title="The time when event was created">Updated at</th>
                     <th scope="col" style="width: 10%" class="text-center" title="How many samples were returned">Samples</th>
                     `)
-//line app/vmalert/web.qtpl:514
+//line app/vmalert/web.qtpl:531
 	if seriesFetchedEnabled {
-//line app/vmalert/web.qtpl:514
+//line app/vmalert/web.qtpl:531
 		qw422016.N().S(`<th scope="col" style="width: 10%" class="text-center" title="How many series were scanned by datasource during the evaluation">Series fetched</th>`)
-//line app/vmalert/web.qtpl:514
+//line app/vmalert/web.qtpl:531
 	}
-//line app/vmalert/web.qtpl:514
+//line app/vmalert/web.qtpl:531
 	qw422016.N().S(`
                     <th scope="col" style="width: 10%" class="text-center" title="How many seconds request took">Duration</th>
                     <th scope="col" class="text-center" title="Time used for rule execution">Executed at</th>
@@ -1520,242 +1581,285 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
             <tbody>
 
      `)
-//line app/vmalert/web.qtpl:522
+//line app/vmalert/web.qtpl:539
 	for _, u := range rule.Updates {
-//line app/vmalert/web.qtpl:522
+//line app/vmalert/web.qtpl:539
 		qw422016.N().S(`
              <tr`)
-//line app/vmalert/web.qtpl:523
+//line app/vmalert/web.qtpl:540
 		if u.err != nil {
-//line app/vmalert/web.qtpl:523
+//line app/vmalert/web.qtpl:540
 			qw422016.N().S(` class="alert-danger"`)
-//line app/vmalert/web.qtpl:523
+//line app/vmalert/web.qtpl:540
 		}
-//line app/vmalert/web.qtpl:523
+//line app/vmalert/web.qtpl:540
 		qw422016.N().S(`>
                  <td>
                     <span class="badge bg-primary rounded-pill me-3" title="Updated at">`)
-//line app/vmalert/web.qtpl:525
+//line app/vmalert/web.qtpl:542
 		qw422016.E().S(u.time.Format(time.RFC3339))
-//line app/vmalert/web.qtpl:525
+//line app/vmalert/web.qtpl:542
 		qw422016.N().S(`</span>
                  </td>
                  <td class="text-center">`)
-//line app/vmalert/web.qtpl:527
+//line app/vmalert/web.qtpl:544
 		qw422016.N().D(u.samples)
-//line app/vmalert/web.qtpl:527
+//line app/vmalert/web.qtpl:544
 		qw422016.N().S(`</td>
                  `)
-//line app/vmalert/web.qtpl:528
+//line app/vmalert/web.qtpl:545
 		if seriesFetchedEnabled {
-//line app/vmalert/web.qtpl:528
+//line app/vmalert/web.qtpl:545
 			qw422016.N().S(`<td class="text-center">`)
-//line app/vmalert/web.qtpl:528
+//line app/vmalert/web.qtpl:545
 			if u.seriesFetched != nil {
-//line app/vmalert/web.qtpl:528
+//line app/vmalert/web.qtpl:545
 				qw422016.N().D(*u.seriesFetched)
-//line app/vmalert/web.qtpl:528
+//line app/vmalert/web.qtpl:545
 			}
-//line app/vmalert/web.qtpl:528
+//line app/vmalert/web.qtpl:545
 			qw422016.N().S(`</td>`)
-//line app/vmalert/web.qtpl:528
+//line app/vmalert/web.qtpl:545
 		}
-//line app/vmalert/web.qtpl:528
+//line app/vmalert/web.qtpl:545
 		qw422016.N().S(`
                  <td class="text-center">`)
-//line app/vmalert/web.qtpl:529
+//line app/vmalert/web.qtpl:546
 		qw422016.N().FPrec(u.duration.Seconds(), 3)
-//line app/vmalert/web.qtpl:529
+//line app/vmalert/web.qtpl:546
 		qw422016.N().S(`s</td>
                  <td class="text-center">`)
-//line app/vmalert/web.qtpl:530
+//line app/vmalert/web.qtpl:547
 		qw422016.E().S(u.at.Format(time.RFC3339))
-//line app/vmalert/web.qtpl:530
+//line app/vmalert/web.qtpl:547
 		qw422016.N().S(`</td>
                  <td>
                     <textarea class="curl-area" rows="1" onclick="this.focus();this.select()">`)
-//line app/vmalert/web.qtpl:532
+//line app/vmalert/web.qtpl:549
 		qw422016.E().S(u.curl)
-//line app/vmalert/web.qtpl:532
+//line app/vmalert/web.qtpl:549
 		qw422016.N().S(`</textarea>
                 </td>
              </tr>
           </li>
           `)
-//line app/vmalert/web.qtpl:536
+//line app/vmalert/web.qtpl:553
 		if u.err != nil {
-//line app/vmalert/web.qtpl:536
+//line app/vmalert/web.qtpl:553
 			qw422016.N().S(`
              <tr`)
-//line app/vmalert/web.qtpl:537
+//line app/vmalert/web.qtpl:554
 			if u.err != nil {
-//line app/vmalert/web.qtpl:537
+//line app/vmalert/web.qtpl:554
 				qw422016.N().S(` class="alert-danger"`)
-//line app/vmalert/web.qtpl:537
+//line app/vmalert/web.qtpl:554
 			}
-//line app/vmalert/web.qtpl:537
+//line app/vmalert/web.qtpl:554
 			qw422016.N().S(`>
                <td colspan="`)
-//line app/vmalert/web.qtpl:538
+//line app/vmalert/web.qtpl:555
 			if seriesFetchedEnabled {
-//line app/vmalert/web.qtpl:538
+//line app/vmalert/web.qtpl:555
 				qw422016.N().S(`6`)
-//line app/vmalert/web.qtpl:538
+//line app/vmalert/web.qtpl:555
 			} else {
-//line app/vmalert/web.qtpl:538
+//line app/vmalert/web.qtpl:555
 				qw422016.N().S(`5`)
-//line app/vmalert/web.qtpl:538
+//line app/vmalert/web.qtpl:555
 			}
-//line app/vmalert/web.qtpl:538
+//line app/vmalert/web.qtpl:555
 			qw422016.N().S(`">
                    <span class="alert-danger">`)
-//line app/vmalert/web.qtpl:539
+//line app/vmalert/web.qtpl:556
 			qw422016.E().V(u.err)
-//line app/vmalert/web.qtpl:539
+//line app/vmalert/web.qtpl:556
 			qw422016.N().S(`</span>
                </td>
              </tr>
           `)
-//line app/vmalert/web.qtpl:542
+//line app/vmalert/web.qtpl:559
 		}
-//line app/vmalert/web.qtpl:542
+//line app/vmalert/web.qtpl:559
 		qw422016.N().S(`
      `)
-//line app/vmalert/web.qtpl:543
+//line app/vmalert/web.qtpl:560
 	}
-//line app/vmalert/web.qtpl:543
+//line app/vmalert/web.qtpl:560
 	qw422016.N().S(`
 
     `)
-//line app/vmalert/web.qtpl:545
+//line app/vmalert/web.qtpl:562
 	tpl.StreamFooter(qw422016, r)
-//line app/vmalert/web.qtpl:545
+//line app/vmalert/web.qtpl:562
 	qw422016.N().S(`
 `)
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 }
 
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 func WriteRuleDetails(qq422016 qtio422016.Writer, r *http.Request, rule APIRule) {
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 	qw422016 := qt422016.AcquireWriter(qq422016)
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 	StreamRuleDetails(qw422016, r, rule)
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 	qt422016.ReleaseWriter(qw422016)
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 }
 
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 func RuleDetails(r *http.Request, rule APIRule) string {
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 	qb422016 := qt422016.AcquireByteBuffer()
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 	WriteRuleDetails(qb422016, r, rule)
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 	qs422016 := string(qb422016.B)
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 	qt422016.ReleaseByteBuffer(qb422016)
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 	return qs422016
-//line app/vmalert/web.qtpl:546
+//line app/vmalert/web.qtpl:563
 }
 
-//line app/vmalert/web.qtpl:550
+//line app/vmalert/web.qtpl:567
 func streambadgeState(qw422016 *qt422016.Writer, state string) {
-//line app/vmalert/web.qtpl:550
+//line app/vmalert/web.qtpl:567
 	qw422016.N().S(`
 `)
-//line app/vmalert/web.qtpl:552
+//line app/vmalert/web.qtpl:569
 	badgeClass := "bg-warning text-dark"
 	if state == "firing" {
 		badgeClass = "bg-danger"
 	}
 
-//line app/vmalert/web.qtpl:556
+//line app/vmalert/web.qtpl:573
 	qw422016.N().S(`
 <span class="badge `)
-//line app/vmalert/web.qtpl:557
+//line app/vmalert/web.qtpl:574
 	qw422016.E().S(badgeClass)
-//line app/vmalert/web.qtpl:557
+//line app/vmalert/web.qtpl:574
 	qw422016.N().S(`">`)
-//line app/vmalert/web.qtpl:557
+//line app/vmalert/web.qtpl:574
 	qw422016.E().S(state)
-//line app/vmalert/web.qtpl:557
+//line app/vmalert/web.qtpl:574
 	qw422016.N().S(`</span>
 `)
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 }
 
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 func writebadgeState(qq422016 qtio422016.Writer, state string) {
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 	qw422016 := qt422016.AcquireWriter(qq422016)
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 	streambadgeState(qw422016, state)
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 	qt422016.ReleaseWriter(qw422016)
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 }
 
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 func badgeState(state string) string {
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 	qb422016 := qt422016.AcquireByteBuffer()
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 	writebadgeState(qb422016, state)
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 	qs422016 := string(qb422016.B)
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 	qt422016.ReleaseByteBuffer(qb422016)
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 	return qs422016
-//line app/vmalert/web.qtpl:558
+//line app/vmalert/web.qtpl:575
 }
 
-//line app/vmalert/web.qtpl:560
+//line app/vmalert/web.qtpl:577
 func streambadgeRestored(qw422016 *qt422016.Writer) {
-//line app/vmalert/web.qtpl:560
+//line app/vmalert/web.qtpl:577
 	qw422016.N().S(`
 <span class="badge bg-warning text-dark" title="Alert state was restored after the service restart from remote storage">restored</span>
 `)
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 }
 
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 func writebadgeRestored(qq422016 qtio422016.Writer) {
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 	qw422016 := qt422016.AcquireWriter(qq422016)
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 	streambadgeRestored(qw422016)
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 	qt422016.ReleaseWriter(qw422016)
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 }
 
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 func badgeRestored() string {
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 	qb422016 := qt422016.AcquireByteBuffer()
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 	writebadgeRestored(qb422016)
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 	qs422016 := string(qb422016.B)
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 	qt422016.ReleaseByteBuffer(qb422016)
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 	return qs422016
-//line app/vmalert/web.qtpl:562
+//line app/vmalert/web.qtpl:579
 }
 
-//line app/vmalert/web.qtpl:564
+//line app/vmalert/web.qtpl:581
+func streambadgeStabilizing(qw422016 *qt422016.Writer) {
+//line app/vmalert/web.qtpl:581
+	qw422016.N().S(`
+<span class="badge bg-warning text-dark" title="This firing state is kept because of `)
+//line app/vmalert/web.qtpl:581
+	qw422016.N().S("`")
+//line app/vmalert/web.qtpl:581
+	qw422016.N().S(`keep_firing_for`)
+//line app/vmalert/web.qtpl:581
+	qw422016.N().S("`")
+//line app/vmalert/web.qtpl:581
+	qw422016.N().S(`">stabilizing</span>
+`)
+//line app/vmalert/web.qtpl:583
+}
+
+//line app/vmalert/web.qtpl:583
+func writebadgeStabilizing(qq422016 qtio422016.Writer) {
+//line app/vmalert/web.qtpl:583
+	qw422016 := qt422016.AcquireWriter(qq422016)
+//line app/vmalert/web.qtpl:583
+	streambadgeStabilizing(qw422016)
+//line app/vmalert/web.qtpl:583
+	qt422016.ReleaseWriter(qw422016)
+//line app/vmalert/web.qtpl:583
+}
+
+//line app/vmalert/web.qtpl:583
+func badgeStabilizing() string {
+//line app/vmalert/web.qtpl:583
+	qb422016 := qt422016.AcquireByteBuffer()
+//line app/vmalert/web.qtpl:583
+	writebadgeStabilizing(qb422016)
+//line app/vmalert/web.qtpl:583
+	qs422016 := string(qb422016.B)
+//line app/vmalert/web.qtpl:583
+	qt422016.ReleaseByteBuffer(qb422016)
+//line app/vmalert/web.qtpl:583
+	return qs422016
+//line app/vmalert/web.qtpl:583
+}
+
+//line app/vmalert/web.qtpl:585
 func streamseriesFetchedWarn(qw422016 *qt422016.Writer, r APIRule) {
-//line app/vmalert/web.qtpl:564
+//line app/vmalert/web.qtpl:585
 	qw422016.N().S(`
 `)
-//line app/vmalert/web.qtpl:565
+//line app/vmalert/web.qtpl:586
 	if isNoMatch(r) {
-//line app/vmalert/web.qtpl:565
+//line app/vmalert/web.qtpl:586
 		qw422016.N().S(`
 <svg xmlns="http://www.w3.org/2000/svg"
     data-bs-toggle="tooltip"
@@ -1766,41 +1870,41 @@ func streamseriesFetchedWarn(qw422016 *qt422016.Writer, r APIRule) {
        <path d="M8 16A8 8 0 1 0 8 0a8 8 0 0 0 0 16zm.93-9.412-1 4.705c-.07.34.029.533.304.533.194 0 .487-.07.686-.246l-.088.416c-.287.346-.92.598-1.465.598-.703 0-1.002-.422-.808-1.319l.738-3.468c.064-.293.006-.399-.287-.47l-.451-.081.082-.381 2.29-.287zM8 5.5a1 1 0 1 1 0-2 1 1 0 0 1 0 2z"/>
 </svg>
 `)
-//line app/vmalert/web.qtpl:574
+//line app/vmalert/web.qtpl:595
 	}
-//line app/vmalert/web.qtpl:574
+//line app/vmalert/web.qtpl:595
 	qw422016.N().S(`
 `)
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 }
 
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 func writeseriesFetchedWarn(qq422016 qtio422016.Writer, r APIRule) {
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 	qw422016 := qt422016.AcquireWriter(qq422016)
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 	streamseriesFetchedWarn(qw422016, r)
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 	qt422016.ReleaseWriter(qw422016)
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 }
 
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 func seriesFetchedWarn(r APIRule) string {
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 	qb422016 := qt422016.AcquireByteBuffer()
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 	writeseriesFetchedWarn(qb422016, r)
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 	qs422016 := string(qb422016.B)
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 	qt422016.ReleaseByteBuffer(qb422016)
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 	return qs422016
-//line app/vmalert/web.qtpl:575
+//line app/vmalert/web.qtpl:596
 }
 
-//line app/vmalert/web.qtpl:578
+//line app/vmalert/web.qtpl:599
 func isNoMatch(r APIRule) bool {
 	return r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0
 }
diff --git a/app/vmalert/web_types.go b/app/vmalert/web_types.go
index f9a162769d..4f4a8f911b 100644
--- a/app/vmalert/web_types.go
+++ b/app/vmalert/web_types.go
@@ -32,6 +32,9 @@ type APIAlert struct {
 	SourceLink string `json:"source"`
 	// Restored shows whether Alert's state was restored on restart
 	Restored bool `json:"restored"`
+	// Stabilizing shows when firing state is kept because of
+	// `keep_firing_for` instead of real alert
+	Stabilizing bool `json:"stabilizing"`
 }
 
 // WebLink returns a link to the alert which can be used in UI.
@@ -96,9 +99,11 @@ type APIRule struct {
 	// Query represents Rule's `expression` field
 	Query string `json:"query"`
 	// Duration represents Rule's `for` field
-	Duration    float64           `json:"duration"`
-	Labels      map[string]string `json:"labels,omitempty"`
-	Annotations map[string]string `json:"annotations,omitempty"`
+	Duration float64 `json:"duration"`
+	// Alert will continue firing for this long even when the alerting expression no longer has results.
+	KeepFiringFor float64           `json:"keep_firing_for"`
+	Labels        map[string]string `json:"labels,omitempty"`
+	Annotations   map[string]string `json:"annotations,omitempty"`
 	// LastError contains the error faced while executing the rule.
 	LastError string `json:"lastError"`
 	// EvaluationTime is the time taken to completely evaluate the rule in float seconds.
diff --git a/app/vminsert/main.go b/app/vminsert/main.go
index 39ee611582..ee86f0eb5d 100644
--- a/app/vminsert/main.go
+++ b/app/vminsert/main.go
@@ -15,6 +15,7 @@ import (
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/graphite"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/influx"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/native"
+	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentelemetry"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdb"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdbhttp"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/prometheusimport"
@@ -210,6 +211,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
 		addInfluxResponseHeaders(w)
 		influxutils.WriteDatabaseNames(w)
 		return true
+	case "/opentelemetry/api/v1/push":
+		opentelemetryPushRequests.Inc()
+		if err := opentelemetry.InsertHandler(r); err != nil {
+			opentelemetryPushErrors.Inc()
+			httpserver.Errorf(w, r, "%s", err)
+			return true
+		}
+		w.WriteHeader(http.StatusOK)
+		return true
 	case "/datadog/api/v1/series":
 		datadogWriteRequests.Inc()
 		if err := datadog.InsertHandlerForHTTP(r); err != nil {
@@ -344,6 +354,9 @@ var (
 	datadogIntakeRequests   = metrics.NewCounter(`vm_http_requests_total{path="/datadog/intake", protocol="datadog"}`)
 	datadogMetadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`)
 
+	opentelemetryPushRequests = metrics.NewCounter(`vm_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
+	opentelemetryPushErrors   = metrics.NewCounter(`vm_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`)
+
 	promscrapeTargetsRequests          = metrics.NewCounter(`vm_http_requests_total{path="/targets"}`)
 	promscrapeServiceDiscoveryRequests = metrics.NewCounter(`vm_http_requests_total{path="/service-discovery"}`)
 
diff --git a/app/vminsert/opentelemetry/request_handler.go b/app/vminsert/opentelemetry/request_handler.go
new file mode 100644
index 0000000000..245b63f978
--- /dev/null
+++ b/app/vminsert/opentelemetry/request_handler.go
@@ -0,0 +1,74 @@
+package opentelemetry
+
+import (
+	"net/http"
+
+	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
+	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream"
+	"github.com/VictoriaMetrics/metrics"
+)
+
+var (
+	rowsInserted  = metrics.NewCounter(`vm_rows_inserted_total{type="opentelemetry"}`)
+	rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="opentelemetry"}`)
+)
+
+// InsertHandler processes opentelemetry metrics.
+func InsertHandler(req *http.Request) error {
+	extraLabels, err := parserCommon.GetExtraLabels(req)
+	if err != nil {
+		return err
+	}
+	isGzipped := req.Header.Get("Content-Encoding") == "gzip"
+	return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error {
+		return insertRows(tss, extraLabels)
+	})
+}
+
+func insertRows(tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Label) error {
+	ctx := common.GetInsertCtx()
+	defer common.PutInsertCtx(ctx)
+
+	rowsLen := 0
+	for i := range tss {
+		rowsLen += len(tss[i].Samples)
+	}
+	ctx.Reset(rowsLen)
+	rowsTotal := 0
+	hasRelabeling := relabel.HasRelabeling()
+	for i := range tss {
+		ts := &tss[i]
+		rowsTotal += len(ts.Samples)
+		ctx.Labels = ctx.Labels[:0]
+		for _, label := range ts.Labels {
+			ctx.AddLabel(label.Name, label.Value)
+		}
+		for _, label := range extraLabels {
+			ctx.AddLabel(label.Name, label.Value)
+		}
+		if hasRelabeling {
+			ctx.ApplyRelabeling()
+		}
+		if len(ctx.Labels) == 0 {
+			// Skip metric without labels.
+			continue
+		}
+		ctx.SortLabelsIfNeeded()
+		var metricNameRaw []byte
+		var err error
+		samples := ts.Samples
+		for i := range samples {
+			r := &samples[i]
+			metricNameRaw, err = ctx.WriteDataPointExt(metricNameRaw, ctx.Labels, r.Timestamp, r.Value)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	rowsInserted.Add(rowsTotal)
+	rowsPerInsert.Update(float64(rowsTotal))
+	return ctx.FlushBufs()
+}
diff --git a/deployment/docker/docker-compose-cluster.yml b/deployment/docker/docker-compose-cluster.yml
index 83cdf8a77f..d66ba0c76f 100644
--- a/deployment/docker/docker-compose-cluster.yml
+++ b/deployment/docker/docker-compose-cluster.yml
@@ -2,7 +2,7 @@ version: '3.5'
 services:
   vmagent:
     container_name: vmagent
-    image: victoriametrics/vmagent:v1.91.3
+    image: victoriametrics/vmagent:v1.92.0
     depends_on:
       - "vminsert"
     ports:
@@ -32,7 +32,7 @@ services:
 
   vmstorage-1:
     container_name: vmstorage-1
-    image: victoriametrics/vmstorage:v1.91.3-cluster
+    image: victoriametrics/vmstorage:v1.92.0-cluster
     ports:
       - 8482
       - 8400
@@ -44,7 +44,7 @@ services:
     restart: always
   vmstorage-2:
     container_name: vmstorage-2
-    image: victoriametrics/vmstorage:v1.91.3-cluster
+    image: victoriametrics/vmstorage:v1.92.0-cluster
     ports:
       - 8482
       - 8400
@@ -56,7 +56,7 @@ services:
     restart: always
   vminsert:
     container_name: vminsert
-    image: victoriametrics/vminsert:v1.91.3-cluster
+    image: victoriametrics/vminsert:v1.92.0-cluster
     depends_on:
       - "vmstorage-1"
       - "vmstorage-2"
@@ -68,7 +68,7 @@ services:
     restart: always
   vmselect:
     container_name: vmselect
-    image: victoriametrics/vmselect:v1.91.3-cluster
+    image: victoriametrics/vmselect:v1.92.0-cluster
     depends_on:
       - "vmstorage-1"
       - "vmstorage-2"
@@ -82,7 +82,7 @@ services:
 
   vmalert:
     container_name: vmalert
-    image: victoriametrics/vmalert:v1.91.3
+    image: victoriametrics/vmalert:v1.92.0
     depends_on:
       - "vmselect"
     ports:
diff --git a/deployment/docker/docker-compose.yml b/deployment/docker/docker-compose.yml
index eed8b26436..850bfd22d9 100644
--- a/deployment/docker/docker-compose.yml
+++ b/deployment/docker/docker-compose.yml
@@ -2,7 +2,7 @@ version: "3.5"
 services:
   vmagent:
     container_name: vmagent
-    image: victoriametrics/vmagent:v1.91.3
+    image: victoriametrics/vmagent:v1.92.0
     depends_on:
       - "victoriametrics"
     ports:
@@ -18,7 +18,7 @@ services:
     restart: always
   victoriametrics:
     container_name: victoriametrics
-    image: victoriametrics/victoria-metrics:v1.91.3
+    image: victoriametrics/victoria-metrics:v1.92.0
     ports:
       - 8428:8428
       - 8089:8089
@@ -56,7 +56,7 @@ services:
     restart: always
   vmalert:
     container_name: vmalert
-    image: victoriametrics/vmalert:v1.91.3
+    image: victoriametrics/vmalert:v1.92.0
     depends_on:
       - "victoriametrics"
       - "alertmanager"
diff --git a/deployment/logs-benchmark/docker-compose.yml b/deployment/logs-benchmark/docker-compose.yml
index 9ed0de234f..fa28a1e22c 100644
--- a/deployment/logs-benchmark/docker-compose.yml
+++ b/deployment/logs-benchmark/docker-compose.yml
@@ -105,7 +105,7 @@ services:
       - '--config=/config.yml'
 
   vmsingle:
-    image: victoriametrics/victoria-metrics:v1.91.3
+    image: victoriametrics/victoria-metrics:v1.92.0
     ports:
       - '8428:8428'
     command:
diff --git a/deployment/marketplace/digitialocean/one-click-droplet/RELEASE_GUIDE.md b/deployment/marketplace/digitialocean/one-click-droplet/RELEASE_GUIDE.md
index f10e1a7099..e8e2d6f90f 100644
--- a/deployment/marketplace/digitialocean/one-click-droplet/RELEASE_GUIDE.md
+++ b/deployment/marketplace/digitialocean/one-click-droplet/RELEASE_GUIDE.md
@@ -8,7 +8,7 @@
 4. Set variables `DIGITALOCEAN_API_TOKEN` with `VM_VERSION` for `packer` environment and run make from example below:
 
 ```console
-make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="dop_v23_2e46f4759ceeeba0d0248" VM_VERSION="1.91.3"
+make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="dop_v23_2e46f4759ceeeba0d0248" VM_VERSION="1.92.0"
 ```
 
 
diff --git a/deployment/marketplace/digitialocean/one-click-droplet/files/etc/update-motd.d/99-one-click b/deployment/marketplace/digitialocean/one-click-droplet/files/etc/update-motd.d/99-one-click
index 4b1a97726a..8a0c0744fb 100755
--- a/deployment/marketplace/digitialocean/one-click-droplet/files/etc/update-motd.d/99-one-click
+++ b/deployment/marketplace/digitialocean/one-click-droplet/files/etc/update-motd.d/99-one-click
@@ -19,8 +19,8 @@ On the server:
   * VictoriaMetrics is running on ports: 8428, 8089, 4242, 2003 and they are bound to the local interface.
 
 ********************************************************************************
-  # This image includes 1.91.3 version of VictoriaMetrics.
-  # See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.91.3
+  # This image includes 1.92.0 version of VictoriaMetrics.
+  # See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.92.0
 
   # Welcome to VictoriaMetrics droplet!
 
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index b59cd20f12..5a51b74204 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -24,8 +24,12 @@ The following `tip` changes can be tested by building VictoriaMetrics components
 
 ## tip
 
-**Update note: starting from this release, [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) writes
-to the configured storage the following samples by default:
+## [v1.92.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.92.0)
+
+Released at 2023-07-27
+
+**Update note**: starting from this release, [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) writes
+the following samples to the configured remote storage by default:
 
 - aggregated samples;
 - the original input samples, which match zero `match` options from the provided [config](https://docs.victoriametrics.com/stream-aggregation.html#stream-aggregation-config).
@@ -34,7 +38,7 @@ Previously only aggregated samples were written to the storage by default.
 The previous behavior can be restored in the following ways:
 
 - by passing `-streamAggr.dropInput` command-line flag to single-node VictoriaMetrics;
-- by passing `-remoteWrite.streamAggr.dropInput` command-line flag per each configured `-remoteWrite.streamAggr.config` at `vmagent`.**
+- by passing `-remoteWrite.streamAggr.dropInput` command-line flag per each configured `-remoteWrite.streamAggr.config` at `vmagent`.
 
 * SECURITY: upgrade base docker image (alpine) from 3.18.0 to 3.18.2. See [alpine 3.18.2 release notes](https://alpinelinux.org/posts/Alpine-3.15.9-3.16.6-3.17.4-3.18.2-released.html).
 * SECURITY: upgrade Go builder from Go1.20.5 to Go1.20.6. See [the list of issues addressed in Go1.20.6](https://github.com/golang/go/issues?q=milestone%3AGo1.20.6+label%3ACherryPickApproved).
@@ -49,6 +53,7 @@ The previous behavior can be restored in the following ways:
   - `WITH (f(window, step, off) = m[window:step] offset off) f(5m, 10s, 1h)` is automatically transformed to `m[5m:10s] offset 1h`
   Thanks to @lujiajing1126 for the initial idea and [implementation](https://github.com/VictoriaMetrics/metricsql/pull/13). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4025).
 * FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): added a new page with the list of currently running queries. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4598) and [these docs](https://docs.victoriametrics.com/#active-queries).
+* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for data ingestion via [OpenTelemetry protocol](https://opentelemetry.io/docs/reference/specification/metrics/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry), [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2424) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2570).
 * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow sharding outgoing time series among the configured remote storage systems. This can be useful for building horizontally scalable [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), when samples for the same time series must be aggregated by the same `vmagent` instance at the second level. See [these docs](https://docs.victoriametrics.com/vmagent.html#sharding-among-remote-storages) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4637) for details.
 * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow configuring staleness interval in [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) config. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4667) for details.
 * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow specifying a list of [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) inside `if` option of relabeling rules. The corresponding relabeling rule is executed when at least a single series selector matches. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling-enhancements).
@@ -66,7 +71,8 @@ The previous behavior can be restored in the following ways:
 * FEATUTE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow disabling of `step` param attached to [instant queries](https://docs.victoriametrics.com/keyConcepts.html#instant-query). This might be useful for using vmalert with datasources that to not support this param, unlike VictoriaMetrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4573) for details.
 * FEATUTE: [vmalert](https://docs.victoriametrics.com/vmalert.html): support option for "blackholing" alerting notifications if `-notifier.blackhole` cmd-line flag is set. Enable this flag if you want vmalert to evaluate alerting rules without sending any notifications to external receivers (eg. alertmanager). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4122) for details. Thanks to @venkatbvc for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4639).
 * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add unit test for alerting and recording rules, see more [details](https://docs.victoriametrics.com/vmalert.html#unit-testing-for-rules) here. Thanks to @Haleygo for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4596).
-* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow overriding default GET params for rules  with `graphite` datasource type, in the same way as it happens for `prometheus` type. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4685). 
+* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow overriding default GET params for rules  with `graphite` datasource type, in the same way as it happens for `prometheus` type. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4685).
+* FEATUTE: [vmalert](https://docs.victoriametrics.com/vmalert.html): support `keep_firing_for` field for alerting rules. See docs updated [here](https://docs.victoriametrics.com/vmalert.html#alerting-rules) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4529). Thanks to @Haleygo for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4669).
 * FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): expose `vmauth_user_request_duration_seconds` and `vmauth_unauthorized_user_request_duration_seconds` summary metrics for measuring requests latency per user.
 * FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): show backup progress percentage in log during backup uploading. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4460).
 * FEATURE: [vmrestore](https://docs.victoriametrics.com/vmrestore.html): show restoring progress percentage in log during backup downloading. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4460).
@@ -86,7 +92,9 @@ The previous behavior can be restored in the following ways:
 * BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): correctly calculate evaluation time for rules. Before, there was a low probability for discrepancy between actual time and rules evaluation time if evaluation interval was lower than the execution time for rules within the group.
 * BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): reset evaluation timestamp after modifying group interval. Before, there could have latency on rule evaluation time.
 * BUGFIX: vmselect: fix timestamp alignment for Prometheus querying API if time argument is less than 10m from the beginning of Unix epoch.
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): close HTTP connections to [service discovery](https://docs.victoriametrics.com/sd_configs.html) servers when they are no longer needed. This should prevent from possible connection exhasution in some cases. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4724).
 * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not show [relabel debug](https://docs.victoriametrics.com/vmagent.html#relabel-debug) links at the `/targets` page when `vmagent` runs with `-promscrape.dropOriginalLabels` command-line flag, since it has no the original labels needed for relabel debug. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4597).
+* BUGFIX: vminsert: fixed decoding of label values with slash when accepting data via [pushgateway protocol](https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format). This fixes Prometheus golang client compatibility. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4692).
 * BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly parse binary operations with reserved words on the right side such as `foo + (on{bar="baz"})`. Previously such queries could lead to panic. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4422).
 * BUGFIX: [Official Grafana dashboards for VictoriaMetrics](https://grafana.com/orgs/victoriametrics): display cache usage for all components on panel `Cache usage % by type` for cluster dashboard. Before, only vmstorage caches were shown.
 
diff --git a/docs/Cluster-VictoriaMetrics.md b/docs/Cluster-VictoriaMetrics.md
index a365ba6fd8..af7289dbbe 100644
--- a/docs/Cluster-VictoriaMetrics.md
+++ b/docs/Cluster-VictoriaMetrics.md
@@ -335,13 +335,14 @@ Check practical examples of VictoriaMetrics API [here](https://docs.victoriametr
     The `<accountID>` can be set to `multitenant` string, e.g. `http://<vminsert>:8480/insert/multitenant/<suffix>`. Such urls accept data from multiple tenants
     specified via `vm_account_id` and `vm_project_id` labels. See [multitenancy via labels](#multitenancy-via-labels) for more details.
   - `<suffix>` may have the following values:
-    - `prometheus` and `prometheus/api/v1/write` - for inserting data with [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write).
+    - `prometheus` and `prometheus/api/v1/write` - for ingesting data with [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write).
     - `prometheus/api/v1/import` - for importing data obtained via `api/v1/export` at `vmselect` (see below), JSON line format.
     - `prometheus/api/v1/import/native` - for importing data obtained via `api/v1/export/native` on `vmselect` (see below).
     - `prometheus/api/v1/import/csv` - for importing arbitrary CSV data. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-csv-data) for details.
     - `prometheus/api/v1/import/prometheus` - for importing data in [Prometheus text exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) and in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md). This endpoint also supports [Pushgateway protocol](https://github.com/prometheus/pushgateway#url). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) for details.
-    - `datadog/api/v1/series` - for inserting data with [DataDog submit metrics API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent) for details.
-    - `influx/write` and `influx/api/v2/write` - for inserting data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
+    - `opentemetry/api/v1/push` - for ingesting data via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry).
+    - `datadog/api/v1/series` - for ingesting data with [DataDog submit metrics API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent) for details.
+    - `influx/write` and `influx/api/v2/write` - for ingesting data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
     - `opentsdb/api/put` - for accepting [OpenTSDB HTTP /api/put requests](http://opentsdb.net/docs/build/html/api_http/put.html). This handler is disabled by default. It is exposed on a distinct TCP address set via `-opentsdbHTTPListenAddr` command-line flag. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-opentsdb-data-via-http-apiput-requests) for details.
 
 - URLs for [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/): `http://<vmselect>:8481/select/<accountID>/prometheus/<suffix>`, where:
diff --git a/docs/README.md b/docs/README.md
index 17d13545cb..52d1de8510 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -89,6 +89,7 @@ VictoriaMetrics has the following prominent features:
   * [Arbitrary CSV data](#how-to-import-csv-data).
   * [Native binary format](#how-to-import-data-in-native-format).
   * [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent).
+  * [OpenTelemetry metrics format](#sending-data-via-opentelemetry).
 * It supports powerful [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), which can be used as a [statsd](https://github.com/statsd/statsd) alternative.
 * It supports metrics [relabeling](#relabeling).
 * It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and
@@ -1176,6 +1177,7 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data
 * DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details.
 * InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
 * Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
+* OpenTelemetry http API. See [these docs](#sending-data-via-opentelemetry) for details.
 * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
 * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
 * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
@@ -1359,6 +1361,13 @@ Note that it could be required to flush response cache after importing historica
 
 VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
 
+## Sending data via OpenTelemetry
+
+VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentemetry/api/v1/push` path.
+
+VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`.
+Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`.
+
 ## Relabeling
 
 VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md
index cdd5c3e376..2c3517e9b8 100644
--- a/docs/Single-server-VictoriaMetrics.md
+++ b/docs/Single-server-VictoriaMetrics.md
@@ -97,6 +97,7 @@ VictoriaMetrics has the following prominent features:
   * [Arbitrary CSV data](#how-to-import-csv-data).
   * [Native binary format](#how-to-import-data-in-native-format).
   * [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent).
+  * [OpenTelemetry metrics format](#sending-data-via-opentelemetry).
 * It supports powerful [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), which can be used as a [statsd](https://github.com/statsd/statsd) alternative.
 * It supports metrics [relabeling](#relabeling).
 * It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and
@@ -1184,6 +1185,7 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data
 * DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details.
 * InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details.
 * Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details.
+* OpenTelemetry http API. See [these docs](#sending-data-via-opentelemetry) for details.
 * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
 * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
 * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
@@ -1367,6 +1369,13 @@ Note that it could be required to flush response cache after importing historica
 
 VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter).
 
+## Sending data via OpenTelemetry
+
+VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentemetry/api/v1/push` path.
+
+VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`.
+Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`.
+
 ## Relabeling
 
 VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points
diff --git a/docs/vmagent.md b/docs/vmagent.md
index c2011f2dce..585ada0195 100644
--- a/docs/vmagent.md
+++ b/docs/vmagent.md
@@ -104,6 +104,7 @@ additionally to pull-based Prometheus-compatible targets' scraping:
 * DataDog "submit metrics" API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent).
 * InfluxDB line protocol via `http://<vmagent>:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf).
 * Graphite plaintext protocol if `-graphiteListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd).
+* OpenTelemetry http API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry).
 * OpenTSDB telnet and http protocols if `-opentsdbListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents).
 * Prometheus remote write protocol via `http://<vmagent>:8429/api/v1/write`.
 * JSON lines import protocol via `http://<vmagent>:8429/api/v1/import`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format).
diff --git a/docs/vmalert.md b/docs/vmalert.md
index 8585af0e86..fc4d6604b6 100644
--- a/docs/vmalert.md
+++ b/docs/vmalert.md
@@ -214,6 +214,10 @@ expr: <string>
 # as firing once they return.
 [ for: <duration> | default = 0s ]
 
+# Alert will continue firing for this long even when the alerting expression no longer has results.
+# This allows you to delay alert resolution.
+[ keep_firing_for: <duration> | default = 0s ]
+
 # Whether to print debug information into logs.
 # Information includes alerts state changes and requests sent to the datasource.
 # Please note, that if rule's query params contain sensitive
@@ -368,19 +372,24 @@ For recording rules to work `-remoteWrite.url` must be specified.
 
 ### Alerts state on restarts
 
-`vmalert` has no local storage, so alerts state is stored in the process memory. Hence, after restart of `vmalert`
-the process alerts state will be lost. To avoid this situation, `vmalert` should be configured via the following flags:
+`vmalert` is stateless, it holds alerts state in the process memory. Restarting of `vmalert` process
+will reset alerts state in memory. To prevent `vmalert` from losing alerts state it should be configured
+to persist the state to the remote destination via the following flags:
 
 * `-remoteWrite.url` - URL to VictoriaMetrics (Single) or vminsert (Cluster). `vmalert` will persist alerts state
-  into the configured address in the form of time series named `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
-  These are regular time series and maybe queried from VM just as any other time series.
-  The state is stored to the configured address on every rule evaluation.
+  to the configured address in the form of [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
+  `ALERTS` and `ALERTS_FOR_STATE` via remote-write protocol.
+  These time series can be queried from VictoriaMetrics just as any other time series.
+  The state will be persisted to the configured address on each evaluation.
 * `-remoteRead.url` - URL to VictoriaMetrics (Single) or vmselect (Cluster). `vmalert` will try to restore alerts state
-  from configured address by querying time series with name `ALERTS_FOR_STATE`.
+  from the configured address by querying time series with name `ALERTS_FOR_STATE`. The restore happens only once when
+  `vmalert` process starts, and only for the configured rules. Config [hot reload](#hot-config-reload) doesn't trigger 
+  state restore.
 
 Both flags are required for proper state restoration. Restore process may fail if time series are missing
 in configured `-remoteRead.url`, weren't updated in the last `1h` (controlled by `-remoteRead.lookback`)
-or received state doesn't match current `vmalert` rules configuration.
+or received state doesn't match current `vmalert` rules configuration. `vmalert` marks successfully restored rules
+with `restored` label in [web UI](#WEB).
 
 ### Multitenancy
 
@@ -742,6 +751,7 @@ See full description for these flags in `./vmalert -help`.
 * Graphite engine isn't supported yet;
 * `query` template function is disabled for performance reasons (might be changed in future);
 * `limit` group's param has no effect during replay (might be changed in future);
+* `keep_firing_for` alerting rule param has no effect during replay (might be changed in future).
 
 ## Unit Testing for Rules
 
diff --git a/lib/promscrape/discovery/azure/azure.go b/lib/promscrape/discovery/azure/azure.go
index b1fe35fe96..f9ec794e2d 100644
--- a/lib/promscrape/discovery/azure/azure.go
+++ b/lib/promscrape/discovery/azure/azure.go
@@ -58,7 +58,11 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
 
 // MustStop stops further usage for sdc.
 func (sdc *SDConfig) MustStop() {
-	configMap.Delete(sdc)
+	v := configMap.Delete(sdc)
+	if v != nil {
+		cfg := v.(*apiConfig)
+		cfg.c.Stop()
+	}
 }
 
 func appendMachineLabels(vms []virtualMachine, port int, sdc *SDConfig) []*promutils.Labels {
diff --git a/lib/promscrape/discovery/azure/machine_test.go b/lib/promscrape/discovery/azure/machine_test.go
index e323c9202c..bb13a2f5af 100644
--- a/lib/promscrape/discovery/azure/machine_test.go
+++ b/lib/promscrape/discovery/azure/machine_test.go
@@ -71,6 +71,7 @@ func TestGetVirtualMachinesSuccess(t *testing.T) {
 			if err != nil {
 				t.Fatalf("unexpected error at client create: %s", err)
 			}
+			defer c.Stop()
 			ac := &apiConfig{
 				c:              c,
 				subscriptionID: "some-id",
diff --git a/lib/promscrape/discovery/consul/api.go b/lib/promscrape/discovery/consul/api.go
index 630a31d277..8d815a39f9 100644
--- a/lib/promscrape/discovery/consul/api.go
+++ b/lib/promscrape/discovery/consul/api.go
@@ -90,6 +90,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
 	}
 	dc, err := getDatacenter(client, sdc.Datacenter)
 	if err != nil {
+		client.Stop()
 		return nil, fmt.Errorf("cannot obtain consul datacenter: %w", err)
 	}
 
diff --git a/lib/promscrape/discovery/consulagent/api.go b/lib/promscrape/discovery/consulagent/api.go
index a859bcf931..ba773ac182 100644
--- a/lib/promscrape/discovery/consulagent/api.go
+++ b/lib/promscrape/discovery/consulagent/api.go
@@ -84,6 +84,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
 	}
 	agent, err := consul.GetAgentInfo(client)
 	if err != nil {
+		client.Stop()
 		return nil, fmt.Errorf("cannot obtain consul datacenter: %w", err)
 	}
 	dc := sdc.Datacenter
diff --git a/lib/promscrape/discovery/digitalocean/digitalocean.go b/lib/promscrape/discovery/digitalocean/digitalocean.go
index cf25f2da3c..aa4534c587 100644
--- a/lib/promscrape/discovery/digitalocean/digitalocean.go
+++ b/lib/promscrape/discovery/digitalocean/digitalocean.go
@@ -155,5 +155,9 @@ func addDropletLabels(droplets []droplet, defaultPort int) []*promutils.Labels {
 
 // MustStop stops further usage for sdc.
 func (sdc *SDConfig) MustStop() {
-	configMap.Delete(sdc)
+	v := configMap.Delete(sdc)
+	if v != nil {
+		cfg := v.(*apiConfig)
+		cfg.client.Stop()
+	}
 }
diff --git a/lib/promscrape/discovery/docker/docker.go b/lib/promscrape/discovery/docker/docker.go
index 6be9086541..ade73572f9 100644
--- a/lib/promscrape/discovery/docker/docker.go
+++ b/lib/promscrape/discovery/docker/docker.go
@@ -47,5 +47,9 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
 
 // MustStop stops further usage for sdc.
 func (sdc *SDConfig) MustStop() {
-	configMap.Delete(sdc)
+	v := configMap.Delete(sdc)
+	if v != nil {
+		cfg := v.(*apiConfig)
+		cfg.client.Stop()
+	}
 }
diff --git a/lib/promscrape/discovery/dockerswarm/dockerswarm.go b/lib/promscrape/discovery/dockerswarm/dockerswarm.go
index bfe7a06971..f527a51fc1 100644
--- a/lib/promscrape/discovery/dockerswarm/dockerswarm.go
+++ b/lib/promscrape/discovery/dockerswarm/dockerswarm.go
@@ -56,5 +56,9 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
 
 // MustStop stops further usage for sdc.
 func (sdc *SDConfig) MustStop() {
-	configMap.Delete(sdc)
+	v := configMap.Delete(sdc)
+	if v != nil {
+		cfg := v.(*apiConfig)
+		cfg.client.Stop()
+	}
 }
diff --git a/lib/promscrape/discovery/eureka/eureka.go b/lib/promscrape/discovery/eureka/eureka.go
index 4b73cf81c4..e677e1ef23 100644
--- a/lib/promscrape/discovery/eureka/eureka.go
+++ b/lib/promscrape/discovery/eureka/eureka.go
@@ -101,7 +101,11 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
 
 // MustStop stops further usage for sdc.
 func (sdc *SDConfig) MustStop() {
-	configMap.Delete(sdc)
+	v := configMap.Delete(sdc)
+	if v != nil {
+		cfg := v.(*apiConfig)
+		cfg.client.Stop()
+	}
 }
 
 func addInstanceLabels(apps *applications) []*promutils.Labels {
diff --git a/lib/promscrape/discovery/gce/api.go b/lib/promscrape/discovery/gce/api.go
index 8fb9d76ecc..c2953ebfcf 100644
--- a/lib/promscrape/discovery/gce/api.go
+++ b/lib/promscrape/discovery/gce/api.go
@@ -42,6 +42,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
 	if len(project) == 0 {
 		proj, err := getCurrentProject()
 		if err != nil {
+			client.CloseIdleConnections()
 			return nil, fmt.Errorf("cannot determine the current project; make sure `vmagent` runs inside GCE; error: %w", err)
 		}
 		project = proj
@@ -52,6 +53,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
 		// Autodetect the current zone.
 		zone, err := getCurrentZone()
 		if err != nil {
+			client.CloseIdleConnections()
 			return nil, fmt.Errorf("cannot determine the current zone; make sure `vmagent` runs inside GCE; error: %w", err)
 		}
 		zones = append(zones, zone)
@@ -62,6 +64,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
 		// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3202
 		zs, err := getZonesForProject(client, project)
 		if err != nil {
+			client.CloseIdleConnections()
 			return nil, fmt.Errorf("cannot obtain zones for project %q: %w", project, err)
 		}
 		zones = zs
diff --git a/lib/promscrape/discovery/gce/gce.go b/lib/promscrape/discovery/gce/gce.go
index 70ece8a04f..a52e359626 100644
--- a/lib/promscrape/discovery/gce/gce.go
+++ b/lib/promscrape/discovery/gce/gce.go
@@ -73,5 +73,9 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
 
 // MustStop stops further usage for sdc.
 func (sdc *SDConfig) MustStop() {
-	configMap.Delete(sdc)
+	v := configMap.Delete(sdc)
+	if v != nil {
+		cfg := v.(*apiConfig)
+		cfg.client.CloseIdleConnections()
+	}
 }
diff --git a/lib/promscrape/discovery/http/http.go b/lib/promscrape/discovery/http/http.go
index 23f4a5c013..479895131b 100644
--- a/lib/promscrape/discovery/http/http.go
+++ b/lib/promscrape/discovery/http/http.go
@@ -57,5 +57,9 @@ func addHTTPTargetLabels(src []httpGroupTarget, sourceURL string) []*promutils.L
 
 // MustStop stops further usage for sdc.
 func (sdc *SDConfig) MustStop() {
-	configMap.Delete(sdc)
+	v := configMap.Delete(sdc)
+	if v != nil {
+		cfg := v.(*apiConfig)
+		cfg.client.Stop()
+	}
 }
diff --git a/lib/promscrape/discovery/kuma/api.go b/lib/promscrape/discovery/kuma/api.go
index ce93572d07..26b62f0812 100644
--- a/lib/promscrape/discovery/kuma/api.go
+++ b/lib/promscrape/discovery/kuma/api.go
@@ -80,6 +80,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
 	// The synchronous targets' update is needed for returning non-empty list of targets
 	// just after the initialization.
 	if err := cfg.updateTargetsLabels(ctx); err != nil {
+		client.Stop()
 		return nil, fmt.Errorf("cannot discover Kuma targets: %w", err)
 	}
 	cfg.wg.Add(1)
diff --git a/lib/promscrape/discovery/openstack/api.go b/lib/promscrape/discovery/openstack/api.go
index c946a08030..fa04aadbda 100644
--- a/lib/promscrape/discovery/openstack/api.go
+++ b/lib/promscrape/discovery/openstack/api.go
@@ -91,6 +91,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
 		}
 		ac, err := opts.NewConfig()
 		if err != nil {
+			cfg.client.CloseIdleConnections()
 			return nil, err
 		}
 		cfg.client.Transport = &http.Transport{
@@ -111,6 +112,7 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
 		sdcAuth = readCredentialsFromEnv()
 	}
 	if strings.HasSuffix(sdcAuth.IdentityEndpoint, "v2.0") {
+		cfg.client.CloseIdleConnections()
 		return nil, errors.New("identity_endpoint v2.0 is not supported")
 	}
 	// trim .0 from v3.0 for prometheus cfg compatibility
@@ -118,11 +120,13 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
 
 	parsedURL, err := url.Parse(sdcAuth.IdentityEndpoint)
 	if err != nil {
+		cfg.client.CloseIdleConnections()
 		return nil, fmt.Errorf("cannot parse identity_endpoint: %s as url, err: %w", sdcAuth.IdentityEndpoint, err)
 	}
 	cfg.endpoint = parsedURL
 	tokenReq, err := buildAuthRequestBody(&sdcAuth)
 	if err != nil {
+		cfg.client.CloseIdleConnections()
 		return nil, err
 	}
 	cfg.authTokenReq = tokenReq
diff --git a/lib/promscrape/discovery/openstack/openstack.go b/lib/promscrape/discovery/openstack/openstack.go
index df26b92b1c..e380f66f8d 100644
--- a/lib/promscrape/discovery/openstack/openstack.go
+++ b/lib/promscrape/discovery/openstack/openstack.go
@@ -57,5 +57,9 @@ func (sdc *SDConfig) GetLabels(baseDir string) ([]*promutils.Labels, error) {
 
 // MustStop stops further usage for sdc.
 func (sdc *SDConfig) MustStop() {
-	configMap.Delete(sdc)
+	v := configMap.Delete(sdc)
+	if v != nil {
+		cfg := v.(*apiConfig)
+		cfg.client.CloseIdleConnections()
+	}
 }
diff --git a/lib/promscrape/discoveryutils/client.go b/lib/promscrape/discoveryutils/client.go
index ad54f3bebc..161c207af1 100644
--- a/lib/promscrape/discoveryutils/client.go
+++ b/lib/promscrape/discoveryutils/client.go
@@ -81,6 +81,12 @@ type HTTPClient struct {
 	ReadTimeout time.Duration
 }
 
+func (hc *HTTPClient) stop() {
+	// Close idle connections to server in order to free up resources.
+	// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4724
+	hc.client.CloseIdleConnections()
+}
+
 var defaultDialer = &net.Dialer{}
 
 // NewClient returns new Client for the given args.
@@ -276,6 +282,8 @@ func (c *Client) APIServer() string {
 // Stop cancels all in-flight requests
 func (c *Client) Stop() {
 	c.clientCancel()
+	c.client.stop()
+	c.blockingClient.stop()
 }
 
 func doRequestWithPossibleRetry(hc *HTTPClient, req *http.Request) (*http.Response, error) {
diff --git a/lib/protoparser/common/extra_labels.go b/lib/protoparser/common/extra_labels.go
index 6f9e9255c2..b68d94fe5f 100644
--- a/lib/protoparser/common/extra_labels.go
+++ b/lib/protoparser/common/extra_labels.go
@@ -64,7 +64,7 @@ func getPushgatewayLabels(path string) ([]prompbmarshal.Label, error) {
 			s = s[n+1:]
 		}
 		if isBase64 {
-			data, err := base64.URLEncoding.DecodeString(value)
+			data, err := base64.RawURLEncoding.DecodeString(strings.TrimRight(value, "="))
 			if err != nil {
 				return nil, fmt.Errorf("cannot base64-decode value=%q for label=%q: %w", value, name, err)
 			}
diff --git a/lib/protoparser/common/extra_labels_test.go b/lib/protoparser/common/extra_labels_test.go
index 42b0af96f6..483fb88654 100644
--- a/lib/protoparser/common/extra_labels_test.go
+++ b/lib/protoparser/common/extra_labels_test.go
@@ -62,6 +62,9 @@ func TestGetPushgatewayLabelsSuccess(t *testing.T) {
 	f("/foo/metrics/job@base64/Zm9v", `{job="foo"}`)
 	f("/foo/metrics/job/x/a/foo/aaa/bar", `{a="foo",aaa="bar",job="x"}`)
 	f("/foo/metrics/job/x/a@base64/Zm9v", `{a="foo",job="x"}`)
+	f("/metrics/job/test/region@base64/YXotc291dGhlYXN0LTEtZjAxL3d6eS1hei1zb3V0aGVhc3QtMQ", `{job="test",region="az-southeast-1-f01/wzy-az-southeast-1"}`)
+	f("/metrics/job/test/empty@base64/=", `{job="test"}`)
+	f("/metrics/job/test/test@base64/PT0vPT0", `{job="test",test="==/=="}`)
 }
 
 func TestGetPushgatewayLabelsFailure(t *testing.T) {
diff --git a/lib/protoparser/opentelemetry/pb/common.pb.go b/lib/protoparser/opentelemetry/pb/common.pb.go
new file mode 100644
index 0000000000..6e2b25c291
--- /dev/null
+++ b/lib/protoparser/opentelemetry/pb/common.pb.go
@@ -0,0 +1,120 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.28.1
+// 	protoc        v3.21.12
+// source: lib/protoparser/opentelemetry/proto/common.proto
+
+package pb
+
+// AnyValue is used to represent any type of attribute value. AnyValue may contain a
+// primitive value such as a string or integer or it may contain an arbitrary nested
+// object containing arrays, key-value lists and primitives.
+type AnyValue struct {
+	unknownFields []byte
+
+	// The value is one of the listed fields. It is valid for all values to be unspecified
+	// in which case this AnyValue is considered to be "empty".
+	//
+	// Types that are assignable to Value:
+	//
+	//	*AnyValue_StringValue
+	//	*AnyValue_BoolValue
+	//	*AnyValue_IntValue
+	//	*AnyValue_DoubleValue
+	//	*AnyValue_ArrayValue
+	//	*AnyValue_KvlistValue
+	//	*AnyValue_BytesValue
+	Value isAnyValue_Value `protobuf_oneof:"value"`
+}
+
+type isAnyValue_Value interface {
+	isAnyValue_Value()
+}
+
+type AnyValue_StringValue struct {
+	StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type AnyValue_BoolValue struct {
+	BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type AnyValue_IntValue struct {
+	IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type AnyValue_DoubleValue struct {
+	DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type AnyValue_ArrayValue struct {
+	ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"`
+}
+
+type AnyValue_KvlistValue struct {
+	KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"`
+}
+
+type AnyValue_BytesValue struct {
+	BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+func (*AnyValue_StringValue) isAnyValue_Value() {}
+
+func (*AnyValue_BoolValue) isAnyValue_Value() {}
+
+func (*AnyValue_IntValue) isAnyValue_Value() {}
+
+func (*AnyValue_DoubleValue) isAnyValue_Value() {}
+
+func (*AnyValue_ArrayValue) isAnyValue_Value() {}
+
+func (*AnyValue_KvlistValue) isAnyValue_Value() {}
+
+func (*AnyValue_BytesValue) isAnyValue_Value() {}
+
+// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
+// since oneof in AnyValue does not allow repeated fields.
+type ArrayValue struct {
+	unknownFields []byte
+	// Array of values. The array may be empty (contain 0 elements).
+	Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
+// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
+// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
+// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
+// are semantically equivalent.
+type KeyValueList struct {
+	unknownFields []byte
+
+	// A collection of key/value pairs of key-value pairs. The list may be empty (may
+	// contain 0 elements).
+	// The keys MUST be unique (it is not allowed to have more than one
+	// value with the same key).
+	Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+// KeyValue is a key-value pair that is used to store Span attributes, Link
+// attributes, etc.
+type KeyValue struct {
+	unknownFields []byte
+
+	Key   string    `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
diff --git a/lib/protoparser/opentelemetry/pb/common_vtproto.pb.go b/lib/protoparser/opentelemetry/pb/common_vtproto.pb.go
new file mode 100644
index 0000000000..15036b2e77
--- /dev/null
+++ b/lib/protoparser/opentelemetry/pb/common_vtproto.pb.go
@@ -0,0 +1,1079 @@
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// protoc-gen-go-vtproto version: v0.4.0
+// source: lib/protoparser/opentelemetry/proto/common.proto
+
+package pb
+
+import (
+	binary "encoding/binary"
+	fmt "fmt"
+	io "io"
+	math "math"
+	bits "math/bits"
+)
+
+func (m *AnyValue) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AnyValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *AnyValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if vtmsg, ok := m.Value.(interface {
+		MarshalToSizedBufferVT([]byte) (int, error)
+	}); ok {
+		size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AnyValue_StringValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *AnyValue_StringValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i -= len(m.StringValue)
+	copy(dAtA[i:], m.StringValue)
+	i = encodeVarint(dAtA, i, uint64(len(m.StringValue)))
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+func (m *AnyValue_BoolValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *AnyValue_BoolValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i--
+	if m.BoolValue {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x10
+	return len(dAtA) - i, nil
+}
+func (m *AnyValue_IntValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *AnyValue_IntValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarint(dAtA, i, uint64(m.IntValue))
+	i--
+	dAtA[i] = 0x18
+	return len(dAtA) - i, nil
+}
+func (m *AnyValue_DoubleValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *AnyValue_DoubleValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i -= 8
+	binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue))))
+	i--
+	dAtA[i] = 0x21
+	return len(dAtA) - i, nil
+}
+func (m *AnyValue_ArrayValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *AnyValue_ArrayValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ArrayValue != nil {
+		size, err := m.ArrayValue.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x2a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *AnyValue_KvlistValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *AnyValue_KvlistValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.KvlistValue != nil {
+		size, err := m.KvlistValue.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x32
+	}
+	return len(dAtA) - i, nil
+}
+func (m *AnyValue_BytesValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *AnyValue_BytesValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i -= len(m.BytesValue)
+	copy(dAtA[i:], m.BytesValue)
+	i = encodeVarint(dAtA, i, uint64(len(m.BytesValue)))
+	i--
+	dAtA[i] = 0x3a
+	return len(dAtA) - i, nil
+}
+func (m *ArrayValue) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ArrayValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ArrayValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if len(m.Values) > 0 {
+		for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Values[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *KeyValueList) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *KeyValueList) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *KeyValueList) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if len(m.Values) > 0 {
+		for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Values[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *KeyValue) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *KeyValue) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *KeyValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if m.Value != nil {
+		size, err := m.Value.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarint(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarint(dAtA []byte, offset int, v uint64) int {
+	offset -= sov(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *AnyValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if vtmsg, ok := m.Value.(interface{ SizeVT() int }); ok {
+		n += vtmsg.SizeVT()
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *AnyValue_StringValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.StringValue)
+	n += 1 + l + sov(uint64(l))
+	return n
+}
+func (m *AnyValue_BoolValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 2
+	return n
+}
+func (m *AnyValue_IntValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sov(uint64(m.IntValue))
+	return n
+}
+func (m *AnyValue_DoubleValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 9
+	return n
+}
+func (m *AnyValue_ArrayValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ArrayValue != nil {
+		l = m.ArrayValue.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	return n
+}
+func (m *AnyValue_KvlistValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.KvlistValue != nil {
+		l = m.KvlistValue.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	return n
+}
+func (m *AnyValue_BytesValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.BytesValue)
+	n += 1 + l + sov(uint64(l))
+	return n
+}
+func (m *ArrayValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Values) > 0 {
+		for _, e := range m.Values {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *KeyValueList) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Values) > 0 {
+		for _, e := range m.Values {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *KeyValue) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sov(uint64(l))
+	}
+	if m.Value != nil {
+		l = m.Value.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func sov(x uint64) (n int) {
+	return (bits.Len64(x|1) + 6) / 7
+}
+func soz(x uint64) (n int) {
+	return sov(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *AnyValue) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AnyValue: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AnyValue: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = &AnyValue_StringValue{StringValue: string(dAtA[iNdEx:postIndex])}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Value = &AnyValue_BoolValue{BoolValue: b}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Value = &AnyValue_IntValue{IntValue: v}
+		case 4:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Value = &AnyValue_DoubleValue{DoubleValue: float64(math.Float64frombits(v))}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if oneof, ok := m.Value.(*AnyValue_ArrayValue); ok {
+				if err := oneof.ArrayValue.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+			} else {
+				v := &ArrayValue{}
+				if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+				m.Value = &AnyValue_ArrayValue{ArrayValue: v}
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if oneof, ok := m.Value.(*AnyValue_KvlistValue); ok {
+				if err := oneof.KvlistValue.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+			} else {
+				v := &KeyValueList{}
+				if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+				m.Value = &AnyValue_KvlistValue{KvlistValue: v}
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := make([]byte, postIndex-iNdEx)
+			copy(v, dAtA[iNdEx:postIndex])
+			m.Value = &AnyValue_BytesValue{BytesValue: v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ArrayValue) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ArrayValue: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ArrayValue: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Values = append(m.Values, &AnyValue{})
+			if err := m.Values[len(m.Values)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *KeyValueList) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: KeyValueList: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: KeyValueList: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Values = append(m.Values, &KeyValue{})
+			if err := m.Values[len(m.Values)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *KeyValue) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Value == nil {
+				m.Value = &AnyValue{}
+			}
+			if err := m.Value.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+func skip(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLength
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroup
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLength
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLength        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflow          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/lib/protoparser/opentelemetry/pb/helpers.go b/lib/protoparser/opentelemetry/pb/helpers.go
new file mode 100644
index 0000000000..b8683101ba
--- /dev/null
+++ b/lib/protoparser/opentelemetry/pb/helpers.go
@@ -0,0 +1,69 @@
+package pb
+
+import (
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"math"
+	"strconv"
+)
+
+// FormatString formats strings
+func (x *AnyValue) FormatString() string {
+	switch v := x.Value.(type) {
+	case *AnyValue_StringValue:
+		return v.StringValue
+
+	case *AnyValue_BoolValue:
+		return strconv.FormatBool(v.BoolValue)
+
+	case *AnyValue_DoubleValue:
+		return float64AsString(v.DoubleValue)
+
+	case *AnyValue_IntValue:
+		return strconv.FormatInt(v.IntValue, 10)
+
+	case *AnyValue_KvlistValue:
+		jsonStr, _ := json.Marshal(v.KvlistValue.Values)
+		return string(jsonStr)
+
+	case *AnyValue_BytesValue:
+		return base64.StdEncoding.EncodeToString(v.BytesValue)
+
+	case *AnyValue_ArrayValue:
+		jsonStr, _ := json.Marshal(v.ArrayValue.Values)
+		return string(jsonStr)
+
+	default:
+		return ""
+	}
+}
+
+func float64AsString(f float64) string {
+	if math.IsInf(f, 0) || math.IsNaN(f) {
+		return fmt.Sprintf("json: unsupported value: %s", strconv.FormatFloat(f, 'g', -1, 64))
+	}
+
+	// Convert as if by ES6 number to string conversion.
+	// This matches most other JSON generators.
+	// See golang.org/issue/6384 and golang.org/issue/14135.
+	// Like fmt %g, but the exponent cutoffs are different
+	// and exponents themselves are not padded to two digits.
+	scratch := [64]byte{}
+	b := scratch[:0]
+	abs := math.Abs(f)
+	fmt := byte('f')
+	if abs != 0 && (abs < 1e-6 || abs >= 1e21) {
+		fmt = 'e'
+	}
+	b = strconv.AppendFloat(b, f, fmt, -1, 64)
+	if fmt == 'e' {
+		// clean up e-09 to e-9
+		n := len(b)
+		if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
+			b[n-2] = b[n-1]
+			b = b[:n-1]
+		}
+	}
+	return string(b)
+}
diff --git a/lib/protoparser/opentelemetry/pb/metrics.pb.go b/lib/protoparser/opentelemetry/pb/metrics.pb.go
new file mode 100644
index 0000000000..57cf5ea337
--- /dev/null
+++ b/lib/protoparser/opentelemetry/pb/metrics.pb.go
@@ -0,0 +1,736 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.28.1
+// 	protoc        v3.21.12
+// source: lib/protoparser/opentelemetry/proto/metrics.proto
+
+package pb
+
+// AggregationTemporality defines how a metric aggregator reports aggregated
+// values. It describes how those values relate to the time interval over
+// which they are aggregated.
+type AggregationTemporality int32
+
+const (
+	// UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
+	AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0
+	// DELTA is an AggregationTemporality for a metric aggregator which reports
+	// changes since last report time. Successive metrics contain aggregation of
+	// values from continuous and non-overlapping intervals.
+	//
+	// The values for a DELTA metric are based only on the time interval
+	// associated with one measurement cycle. There is no dependency on
+	// previous measurements like is the case for CUMULATIVE metrics.
+	//
+	// For example, consider a system measuring the number of requests that
+	// it receives and reports the sum of these requests every second as a
+	// DELTA metric:
+	//
+	//  1. The system starts receiving at time=t_0.
+	//  2. A request is received, the system measures 1 request.
+	//  3. A request is received, the system measures 1 request.
+	//  4. A request is received, the system measures 1 request.
+	//  5. The 1 second collection cycle ends. A metric is exported for the
+	//     number of requests received over the interval of time t_0 to
+	//     t_0+1 with a value of 3.
+	//  6. A request is received, the system measures 1 request.
+	//  7. A request is received, the system measures 1 request.
+	//  8. The 1 second collection cycle ends. A metric is exported for the
+	//     number of requests received over the interval of time t_0+1 to
+	//     t_0+2 with a value of 2.
+	AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1
+	// CUMULATIVE is an AggregationTemporality for a metric aggregator which
+	// reports changes since a fixed start time. This means that current values
+	// of a CUMULATIVE metric depend on all previous measurements since the
+	// start time. Because of this, the sender is required to retain this state
+	// in some form. If this state is lost or invalidated, the CUMULATIVE metric
+	// values MUST be reset and a new fixed start time following the last
+	// reported measurement time sent MUST be used.
+	//
+	// For example, consider a system measuring the number of requests that
+	// it receives and reports the sum of these requests every second as a
+	// CUMULATIVE metric:
+	//
+	//  1. The system starts receiving at time=t_0.
+	//  2. A request is received, the system measures 1 request.
+	//  3. A request is received, the system measures 1 request.
+	//  4. A request is received, the system measures 1 request.
+	//  5. The 1 second collection cycle ends. A metric is exported for the
+	//     number of requests received over the interval of time t_0 to
+	//     t_0+1 with a value of 3.
+	//  6. A request is received, the system measures 1 request.
+	//  7. A request is received, the system measures 1 request.
+	//  8. The 1 second collection cycle ends. A metric is exported for the
+	//     number of requests received over the interval of time t_0 to
+	//     t_0+2 with a value of 5.
+	//  9. The system experiences a fault and loses state.
+	//  10. The system recovers and resumes receiving at time=t_1.
+	//  11. A request is received, the system measures 1 request.
+	//  12. The 1 second collection cycle ends. A metric is exported for the
+	//     number of requests received over the interval of time t_1 to
+	//     t_0+1 with a value of 1.
+	//
+	// Note: Even though, when reporting changes since last report time, using
+	// CUMULATIVE is valid, it is not recommended. This may cause problems for
+	// systems that do not use start_time to determine when the aggregation
+	// value was reset (e.g. Prometheus).
+	AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2
+)
+
+// Enum value maps for AggregationTemporality.
+var (
+	AggregationTemporality_name = map[int32]string{
+		0: "AGGREGATION_TEMPORALITY_UNSPECIFIED",
+		1: "AGGREGATION_TEMPORALITY_DELTA",
+		2: "AGGREGATION_TEMPORALITY_CUMULATIVE",
+	}
+	AggregationTemporality_value = map[string]int32{
+		"AGGREGATION_TEMPORALITY_UNSPECIFIED": 0,
+		"AGGREGATION_TEMPORALITY_DELTA":       1,
+		"AGGREGATION_TEMPORALITY_CUMULATIVE":  2,
+	}
+)
+
+func (x AggregationTemporality) Enum() *AggregationTemporality {
+	p := new(AggregationTemporality)
+	*p = x
+	return p
+}
+
+// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
+// bit-field representing 32 distinct boolean flags.  Each flag defined in this
+// enum is a bit-mask.  To test the presence of a single flag in the flags of
+// a data point, for example, use an expression like:
+//
+//	(point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE
+type DataPointFlags int32
+
+const (
+	DataPointFlags_FLAG_NONE DataPointFlags = 0
+	// This DataPoint is valid but has no recorded value.  This value
+	// SHOULD be used to reflect explicitly missing data in a series, as
+	// for an equivalent to the Prometheus "staleness marker".
+	DataPointFlags_FLAG_NO_RECORDED_VALUE DataPointFlags = 1
+)
+
+// Enum value maps for DataPointFlags.
+var (
+	DataPointFlags_name = map[int32]string{
+		0: "FLAG_NONE",
+		1: "FLAG_NO_RECORDED_VALUE",
+	}
+	DataPointFlags_value = map[string]int32{
+		"FLAG_NONE":              0,
+		"FLAG_NO_RECORDED_VALUE": 1,
+	}
+)
+
+func (x DataPointFlags) Enum() *DataPointFlags {
+	p := new(DataPointFlags)
+	*p = x
+	return p
+}
+
+// MetricsData represents the metrics data that can be stored in a persistent
+// storage, OR can be embedded by other protocols that transfer OTLP metrics
+// data but do not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type MetricsData struct {
+	unknownFields []byte
+
+	// An array of ResourceMetrics.
+	// For data coming from a single resource this array will typically contain
+	// one element. Intermediary nodes that receive data from multiple origins
+	// typically batch the data before forwarding further and in that case this
+	// array will contain multiple elements.
+	ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
+}
+
+// A collection of ScopeMetrics from a Resource.
+type ResourceMetrics struct {
+	unknownFields []byte
+
+	// The resource for the metrics in this message.
+	// If this field is not set then no resource info is known.
+	Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+	// A list of metrics that originate from a resource.
+	ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
+	// This schema_url applies to the data in the "resource" field. It does not apply
+	// to the data in the "scope_metrics" field which have their own schema_url field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+// A collection of Metrics produced by an Scope.
+type ScopeMetrics struct {
+	unknownFields []byte
+
+	// A list of metrics that originate from an instrumentation library.
+	Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
+	// This schema_url applies to all metrics in the "metrics" field.
+	SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+// Defines a Metric which has one or more timeseries.  The following is a
+// brief summary of the Metric data model.  For more details, see:
+//
+//	https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
+//
+// The data model and relation between entities is shown in the
+// diagram below. Here, "DataPoint" is the term used to refer to any
+// one of the specific data point value types, and "points" is the term used
+// to refer to any one of the lists of points contained in the Metric.
+//
+//   - Metric is composed of a metadata and data.
+//
+//   - Metadata part contains a name, description, unit.
+//
+//   - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
+//
+//   - DataPoint contains timestamps, attributes, and one of the possible value type
+//     fields.
+//
+//     Metric
+//     +------------+
+//     |name        |
+//     |description |
+//     |unit        |     +------------------------------------+
+//     |data        |---> |Gauge, Sum, Histogram, Summary, ... |
+//     +------------+     +------------------------------------+
+//
+//     Data [One of Gauge, Sum, Histogram, Summary, ...]
+//     +-----------+
+//     |...        |  // Metadata about the Data.
+//     |points     |--+
+//     +-----------+  |
+//     |      +---------------------------+
+//     |      |DataPoint 1                |
+//     v      |+------+------+   +------+ |
+//     +-----+   ||label |label |...|label | |
+//     |  1  |-->||value1|value2|...|valueN| |
+//     +-----+   |+------+------+   +------+ |
+//     |  .  |   |+-----+                    |
+//     |  .  |   ||value|                    |
+//     |  .  |   |+-----+                    |
+//     |  .  |   +---------------------------+
+//     |  .  |                   .
+//     |  .  |                   .
+//     |  .  |                   .
+//     |  .  |   +---------------------------+
+//     |  .  |   |DataPoint M                |
+//     +-----+   |+------+------+   +------+ |
+//     |  M  |-->||label |label |...|label | |
+//     +-----+   ||value1|value2|...|valueN| |
+//     |+------+------+   +------+ |
+//     |+-----+                    |
+//     ||value|                    |
+//     |+-----+                    |
+//     +---------------------------+
+//
+// Each distinct type of DataPoint represents the output of a specific
+// aggregation function, the result of applying the DataPoint's
+// associated function of to one or more measurements.
+//
+// All DataPoint types have three common fields:
+//   - Attributes includes key-value pairs associated with the data point
+//   - TimeUnixNano is required, set to the end time of the aggregation
+//   - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
+//     having an AggregationTemporality field, as discussed below.
+//
+// Both TimeUnixNano and StartTimeUnixNano values are expressed as
+// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+//
+// # TimeUnixNano
+//
+// This field is required, having consistent interpretation across
+// DataPoint types.  TimeUnixNano is the moment corresponding to when
+// the data point's aggregate value was captured.
+//
+// Data points with the 0 value for TimeUnixNano SHOULD be rejected
+// by consumers.
+//
+// # StartTimeUnixNano
+//
+// StartTimeUnixNano in general allows detecting when a sequence of
+// observations is unbroken.  This field indicates to consumers the
+// start time for points with cumulative and delta
+// AggregationTemporality, and it should be included whenever possible
+// to support correct rate calculation.  Although it may be omitted
+// when the start time is truly unknown, setting StartTimeUnixNano is
+// strongly encouraged.
+type Metric struct {
+	unknownFields []byte
+
+	// name of the metric, including its DNS name prefix. It must be unique.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// description of the metric, which can be used in documentation.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// unit in which the metric value is reported. Follows the format
+	// described by http://unitsofmeasure.org/ucum.html.
+	Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
+	// Data determines the aggregation type (if any) of the metric, what is the
+	// reported value type for the data points, as well as the relatationship to
+	// the time interval over which they are reported.
+	//
+	// Types that are assignable to Data:
+	//
+	//	*Metric_Gauge
+	//	*Metric_Sum
+	//	*Metric_Histogram
+	//	*Metric_ExponentialHistogram
+	//	*Metric_Summary
+	Data isMetric_Data `protobuf_oneof:"data"`
+}
+
+type isMetric_Data interface {
+	isMetric_Data()
+}
+
+type Metric_Gauge struct {
+	Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof"`
+}
+
+type Metric_Sum struct {
+	Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof"`
+}
+
+type Metric_Histogram struct {
+	Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof"`
+}
+
+type Metric_ExponentialHistogram struct {
+	ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof"`
+}
+
+type Metric_Summary struct {
+	Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof"`
+}
+
+func (*Metric_Gauge) isMetric_Data() {}
+
+func (*Metric_Sum) isMetric_Data() {}
+
+func (*Metric_Histogram) isMetric_Data() {}
+
+func (*Metric_ExponentialHistogram) isMetric_Data() {}
+
+func (*Metric_Summary) isMetric_Data() {}
+
+// Gauge represents the type of a scalar metric that always exports the
+// "current value" for every data point. It should be used for an "unknown"
+// aggregation.
+//
+// A Gauge does not support different aggregation temporalities. Given the
+// aggregation is unknown, points cannot be combined using the same
+// aggregation, regardless of aggregation temporalities. Therefore,
+// AggregationTemporality is not included. Consequently, this also means
+// "StartTimeUnixNano" is ignored for all data points.
+type Gauge struct {
+	unknownFields []byte
+
+	DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+}
+
+// Sum represents the type of a scalar metric that is calculated as a sum of all
+// reported measurements over a time interval.
+type Sum struct {
+	unknownFields []byte
+
+	DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+	// aggregation_temporality describes if the aggregator reports delta changes
+	// since last report time, or cumulative changes since a fixed start time.
+	AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
+	// If "true" means that the sum is monotonic.
+	IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"`
+}
+
+// Histogram represents the type of a metric that is calculated by aggregating
+// as a Histogram of all reported measurements over a time interval.
+type Histogram struct {
+	unknownFields []byte
+
+	DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+	// aggregation_temporality describes if the aggregator reports delta changes
+	// since last report time, or cumulative changes since a fixed start time.
+	AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
+}
+
+// ExponentialHistogram represents the type of a metric that is calculated by aggregating
+// as a ExponentialHistogram of all reported double measurements over a time interval.
+type ExponentialHistogram struct {
+	unknownFields []byte
+
+	DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+	// aggregation_temporality describes if the aggregator reports delta changes
+	// since last report time, or cumulative changes since a fixed start time.
+	AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"`
+}
+
+// Summary metric data are used to convey quantile summaries,
+// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
+// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
+// data type. These data points cannot always be merged in a meaningful way.
+// While they can be useful in some applications, histogram data points are
+// recommended for new applications.
+type Summary struct {
+	unknownFields []byte
+
+	DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"`
+}
+
+// NumberDataPoint is a single data point in a timeseries that describes the
+// time-varying scalar value of a metric.
+type NumberDataPoint struct {
+	unknownFields []byte
+
+	// The set of key/value pairs that uniquely identify the timeseries from
+	// where this point belongs. The list may be empty (may contain 0 elements).
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// StartTimeUnixNano is optional but strongly encouraged, see the
+	// the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// TimeUnixNano is required, see the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// The value itself.  A point is considered invalid when one of the recognized
+	// value fields is not present inside this oneof.
+	//
+	// Types that are assignable to Value:
+	//
+	//	*NumberDataPoint_AsDouble
+	//	*NumberDataPoint_AsInt
+	Value isNumberDataPoint_Value `protobuf_oneof:"value"`
+	// (Optional) List of exemplars collected from
+	// measurements that were used to form the data point
+	Exemplars []*Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+	// Flags that apply to this specific data point.  See DataPointFlags
+	// for the available flags and their meaning.
+	Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
+}
+
+type isNumberDataPoint_Value interface {
+	isNumberDataPoint_Value()
+}
+
+type NumberDataPoint_AsDouble struct {
+	AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof"`
+}
+
+type NumberDataPoint_AsInt struct {
+	AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
+}
+
+func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {}
+
+func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {}
+
+// HistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Histogram. A Histogram contains summary statistics
+// for a population of values, it may optionally contain the distribution of
+// those values across a set of buckets.
+//
+// If the histogram contains the distribution of values, then both
+// "explicit_bounds" and "bucket counts" fields must be defined.
+// If the histogram does not contain the distribution of values, then both
+// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
+// "sum" are known.
+type HistogramDataPoint struct {
+	unknownFields []byte
+
+	// The set of key/value pairs that uniquely identify the timeseries from
+	// where this point belongs. The list may be empty (may contain 0 elements).
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// StartTimeUnixNano is optional but strongly encouraged, see the
+	// the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// TimeUnixNano is required, see the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// count is the number of values in the population. Must be non-negative. This
+	// value must be equal to the sum of the "count" fields in buckets if a
+	// histogram is provided.
+	Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
+	// sum of the values in the population. If count is zero then this field
+	// must be zero.
+	//
+	// Note: Sum should only be filled out when measuring non-negative discrete
+	// events, and is assumed to be monotonic over the values of these events.
+	// Negative events *can* be recorded, but sum should not be filled out when
+	// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
+	// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
+	Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
+	// bucket_counts is an optional field contains the count values of histogram
+	// for each bucket.
+	//
+	// The sum of the bucket_counts must equal the value in the count field.
+	//
+	// The number of elements in bucket_counts array must be by one greater than
+	// the number of elements in explicit_bounds array.
+	BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
+	// explicit_bounds specifies buckets with explicitly defined bounds for values.
+	//
+	// The boundaries for bucket at index i are:
+	//
+	// (-infinity, explicit_bounds[i]] for i == 0
+	// (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
+	// (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
+	//
+	// The values in the explicit_bounds array must be strictly increasing.
+	//
+	// Histogram buckets are inclusive of their upper boundary, except the last
+	// bucket where the boundary is at infinity. This format is intentionally
+	// compatible with the OpenMetrics histogram definition.
+	ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"`
+	// (Optional) List of exemplars collected from
+	// measurements that were used to form the data point
+	Exemplars []*Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+	// Flags that apply to this specific data point.  See DataPointFlags
+	// for the available flags and their meaning.
+	Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
+	// min is the minimum value over (start_time, end_time].
+	Min *float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"`
+	// max is the maximum value over (start_time, end_time].
+	Max *float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"`
+}
+
+// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
+// summary statistics for a population of values, it may optionally contain the
+// distribution of those values across a set of buckets.
+type ExponentialHistogramDataPoint struct {
+	unknownFields []byte
+
+	// The set of key/value pairs that uniquely identify the timeseries from
+	// where this point belongs. The list may be empty (may contain 0 elements).
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// StartTimeUnixNano is optional but strongly encouraged, see the
+	// the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// TimeUnixNano is required, see the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// count is the number of values in the population. Must be
+	// non-negative. This value must be equal to the sum of the "bucket_counts"
+	// values in the positive and negative Buckets plus the "zero_count" field.
+	Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
+	// sum of the values in the population. If count is zero then this field
+	// must be zero.
+	//
+	// Note: Sum should only be filled out when measuring non-negative discrete
+	// events, and is assumed to be monotonic over the values of these events.
+	// Negative events *can* be recorded, but sum should not be filled out when
+	// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
+	// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
+	Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
+	// scale describes the resolution of the histogram.  Boundaries are
+	// located at powers of the base, where:
+	//
+	//	base = (2^(2^-scale))
+	//
+	// The histogram bucket identified by `index`, a signed integer,
+	// contains values that are greater than (base^index) and
+	// less than or equal to (base^(index+1)).
+	//
+	// The positive and negative ranges of the histogram are expressed
+	// separately.  Negative values are mapped by their absolute value
+	// into the negative range using the same scale as the positive range.
+	//
+	// scale is not restricted by the protocol, as the permissible
+	// values depend on the range of the data.
+	Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"`
+	// zero_count is the count of values that are either exactly zero or
+	// within the region considered zero by the instrumentation at the
+	// tolerated degree of precision.  This bucket stores values that
+	// cannot be expressed using the standard exponential formula as
+	// well as values that have been rounded to zero.
+	//
+	// Implementations MAY consider the zero bucket to have probability
+	// mass equal to (zero_count / count).
+	ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"`
+	// positive carries the positive range of exponential bucket counts.
+	Positive *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive,omitempty"`
+	// negative carries the negative range of exponential bucket counts.
+	Negative *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative,omitempty"`
+	// Flags that apply to this specific data point.  See DataPointFlags
+	// for the available flags and their meaning.
+	Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"`
+	// (Optional) List of exemplars collected from
+	// measurements that were used to form the data point
+	Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"`
+	// min is the minimum value over (start_time, end_time].
+	Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"`
+	// max is the maximum value over (start_time, end_time].
+	Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"`
+}
+
+// SummaryDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Summary metric.
+type SummaryDataPoint struct {
+	unknownFields []byte
+
+	// The set of key/value pairs that uniquely identify the timeseries from
+	// where this point belongs. The list may be empty (may contain 0 elements).
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// StartTimeUnixNano is optional but strongly encouraged, see the
+	// the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+	// TimeUnixNano is required, see the detailed comments above Metric.
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// count is the number of values in the population. Must be non-negative.
+	Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"`
+	// sum of the values in the population. If count is zero then this field
+	// must be zero.
+	//
+	// Note: Sum should only be filled out when measuring non-negative discrete
+	// events, and is assumed to be monotonic over the values of these events.
+	// Negative events *can* be recorded, but sum should not be filled out when
+	// doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
+	// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
+	Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
+	// (Optional) list of values at different quantiles of the distribution calculated
+	// from the current snapshot. The quantiles must be strictly increasing.
+	QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"`
+	// Flags that apply to this specific data point.  See DataPointFlags
+	// for the available flags and their meaning.
+	Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"`
+}
+
+// A representation of an exemplar, which is a sample input measurement.
+// Exemplars also hold information about the environment when the measurement
+// was recorded, for example the span and trace ID of the active span when the
+// exemplar was recorded.
+type Exemplar struct {
+	unknownFields []byte
+
+	// The set of key/value pairs that were filtered out by the aggregator, but
+	// recorded alongside the original measurement. Only key/value pairs that were
+	// filtered out by the aggregator should be included
+	FilteredAttributes []*KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes,omitempty"`
+	// time_unix_nano is the exact time when this exemplar was recorded
+	//
+	// Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+	// 1970.
+	TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+	// The value of the measurement that was recorded. An exemplar is
+	// considered invalid when one of the recognized value fields is not present
+	// inside this oneof.
+	//
+	// Types that are assignable to Value:
+	//
+	//	*Exemplar_AsDouble
+	//	*Exemplar_AsInt
+	Value isExemplar_Value `protobuf_oneof:"value"`
+	// (Optional) Span ID of the exemplar trace.
+	// span_id may be missing if the measurement is not recorded inside a trace
+	// or if the trace is not sampled.
+	SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+	// (Optional) Trace ID of the exemplar trace.
+	// trace_id may be missing if the measurement is not recorded inside a trace
+	// or if the trace is not sampled.
+	TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+}
+
+type isExemplar_Value interface {
+	isExemplar_Value()
+}
+
+type Exemplar_AsDouble struct {
+	AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof"`
+}
+
+type Exemplar_AsInt struct {
+	AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"`
+}
+
+func (*Exemplar_AsDouble) isExemplar_Value() {}
+
+func (*Exemplar_AsInt) isExemplar_Value() {}
+
+// Buckets are a set of bucket counts, encoded in a contiguous array
+// of counts.
+type ExponentialHistogramDataPoint_Buckets struct {
+	unknownFields []byte
+
+	// Offset is the bucket index of the first entry in the bucket_counts array.
+	//
+	// Note: This uses a varint encoding as a simple form of compression.
+	Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
+	// Count is an array of counts, where count[i] carries the count
+	// of the bucket at index (offset+i).  count[i] is the count of
+	// values greater than base^(offset+i) and less or equal to than
+	// base^(offset+i+1).
+	//
+	// Note: By contrast, the explicit HistogramDataPoint uses
+	// fixed64.  This field is expected to have many buckets,
+	// especially zeros, so uint64 has been selected to ensure
+	// varint encoding.
+	BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"`
+}
+
+// Represents the value at a given quantile of a distribution.
+//
+// To record Min and Max values following conventions are used:
+// - The 1.0 quantile is equivalent to the maximum value observed.
+// - The 0.0 quantile is equivalent to the minimum value observed.
+//
+// See the following issue for more context:
+// https://github.com/open-telemetry/opentelemetry-proto/issues/125
+type SummaryDataPoint_ValueAtQuantile struct {
+	unknownFields []byte
+
+	// The quantile of a distribution. Must be in the interval
+	// [0.0, 1.0].
+	Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"`
+	// The value at the given quantile of a distribution.
+	//
+	// Quantile values must NOT be negative.
+	Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+}
diff --git a/lib/protoparser/opentelemetry/pb/metrics_service.pb.go b/lib/protoparser/opentelemetry/pb/metrics_service.pb.go
new file mode 100644
index 0000000000..b7ef375fe3
--- /dev/null
+++ b/lib/protoparser/opentelemetry/pb/metrics_service.pb.go
@@ -0,0 +1,32 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.28.1
+// 	protoc        v3.21.12
+// source: lib/protoparser/opentelemetry/proto/metrics_service.proto
+
+package pb
+
+type ExportMetricsServiceRequest struct {
+	unknownFields []byte
+
+	// An array of ResourceMetrics.
+	// For data coming from a single resource this array will typically contain one
+	// element. Intermediary nodes (such as OpenTelemetry Collector) that receive
+	// data from multiple origins typically batch the data before forwarding further and
+	// in that case this array will contain multiple elements.
+	ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"`
+}
diff --git a/lib/protoparser/opentelemetry/pb/metrics_service_vtproto.pb.go b/lib/protoparser/opentelemetry/pb/metrics_service_vtproto.pb.go
new file mode 100644
index 0000000000..32aac28677
--- /dev/null
+++ b/lib/protoparser/opentelemetry/pb/metrics_service_vtproto.pb.go
@@ -0,0 +1,157 @@
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// protoc-gen-go-vtproto version: v0.4.0
+// source: lib/protoparser/opentelemetry/proto/metrics_service.proto
+
+package pb
+
+import (
+	fmt "fmt"
+	io "io"
+)
+
+func (m *ExportMetricsServiceRequest) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExportMetricsServiceRequest) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ExportMetricsServiceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if len(m.ResourceMetrics) > 0 {
+		for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ExportMetricsServiceRequest) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.ResourceMetrics) > 0 {
+		for _, e := range m.ResourceMetrics {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *ExportMetricsServiceRequest) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{})
+			if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/lib/protoparser/opentelemetry/pb/metrics_vtproto.pb.go b/lib/protoparser/opentelemetry/pb/metrics_vtproto.pb.go
new file mode 100644
index 0000000000..f89703fcf8
--- /dev/null
+++ b/lib/protoparser/opentelemetry/pb/metrics_vtproto.pb.go
@@ -0,0 +1,4331 @@
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// protoc-gen-go-vtproto version: v0.4.0
+// source: lib/protoparser/opentelemetry/proto/metrics.proto
+
+package pb
+
+import (
+	binary "encoding/binary"
+	fmt "fmt"
+	io "io"
+	math "math"
+)
+
+func (m *MetricsData) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MetricsData) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *MetricsData) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if len(m.ResourceMetrics) > 0 {
+		for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ResourceMetrics) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceMetrics) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ResourceMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if len(m.SchemaUrl) > 0 {
+		i -= len(m.SchemaUrl)
+		copy(dAtA[i:], m.SchemaUrl)
+		i = encodeVarint(dAtA, i, uint64(len(m.SchemaUrl)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.ScopeMetrics) > 0 {
+		for iNdEx := len(m.ScopeMetrics) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.ScopeMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Resource != nil {
+		size, err := m.Resource.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ScopeMetrics) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ScopeMetrics) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ScopeMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if len(m.SchemaUrl) > 0 {
+		i -= len(m.SchemaUrl)
+		copy(dAtA[i:], m.SchemaUrl)
+		i = encodeVarint(dAtA, i, uint64(len(m.SchemaUrl)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Metrics) > 0 {
+		for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Metric) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Metric) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Metric) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if vtmsg, ok := m.Data.(interface {
+		MarshalToSizedBufferVT([]byte) (int, error)
+	}); ok {
+		size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+	}
+	if len(m.Unit) > 0 {
+		i -= len(m.Unit)
+		copy(dAtA[i:], m.Unit)
+		i = encodeVarint(dAtA, i, uint64(len(m.Unit)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Description) > 0 {
+		i -= len(m.Description)
+		copy(dAtA[i:], m.Description)
+		i = encodeVarint(dAtA, i, uint64(len(m.Description)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarint(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Metric_Gauge) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Metric_Gauge) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.Gauge != nil {
+		size, err := m.Gauge.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x2a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Metric_Sum) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Metric_Sum) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.Sum != nil {
+		size, err := m.Sum.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x3a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Metric_Histogram) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Metric_Histogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.Histogram != nil {
+		size, err := m.Histogram.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x4a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Metric_ExponentialHistogram) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Metric_ExponentialHistogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ExponentialHistogram != nil {
+		size, err := m.ExponentialHistogram.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x52
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Metric_Summary) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Metric_Summary) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.Summary != nil {
+		size, err := m.Summary.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x5a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Gauge) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Gauge) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Gauge) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if len(m.DataPoints) > 0 {
+		for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Sum) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Sum) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Sum) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if m.IsMonotonic {
+		i--
+		if m.IsMonotonic {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.AggregationTemporality != 0 {
+		i = encodeVarint(dAtA, i, uint64(m.AggregationTemporality))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.DataPoints) > 0 {
+		for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Histogram) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Histogram) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Histogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if m.AggregationTemporality != 0 {
+		i = encodeVarint(dAtA, i, uint64(m.AggregationTemporality))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.DataPoints) > 0 {
+		for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ExponentialHistogram) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExponentialHistogram) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ExponentialHistogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if m.AggregationTemporality != 0 {
+		i = encodeVarint(dAtA, i, uint64(m.AggregationTemporality))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.DataPoints) > 0 {
+		for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Summary) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Summary) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Summary) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if len(m.DataPoints) > 0 {
+		for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NumberDataPoint) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NumberDataPoint) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *NumberDataPoint) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if vtmsg, ok := m.Value.(interface {
+		MarshalToSizedBufferVT([]byte) (int, error)
+	}); ok {
+		size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+	}
+	if m.Flags != 0 {
+		i = encodeVarint(dAtA, i, uint64(m.Flags))
+		i--
+		dAtA[i] = 0x40
+	}
+	if len(m.Attributes) > 0 {
+		for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if len(m.Exemplars) > 0 {
+		for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Exemplars[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if m.TimeUnixNano != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
+		i--
+		dAtA[i] = 0x19
+	}
+	if m.StartTimeUnixNano != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
+		i--
+		dAtA[i] = 0x11
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *NumberDataPoint_AsDouble) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *NumberDataPoint_AsDouble) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i -= 8
+	binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble))))
+	i--
+	dAtA[i] = 0x21
+	return len(dAtA) - i, nil
+}
+func (m *NumberDataPoint_AsInt) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *NumberDataPoint_AsInt) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i -= 8
+	binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt))
+	i--
+	dAtA[i] = 0x31
+	return len(dAtA) - i, nil
+}
+func (m *HistogramDataPoint) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HistogramDataPoint) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *HistogramDataPoint) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if m.Max != nil {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Max))))
+		i--
+		dAtA[i] = 0x61
+	}
+	if m.Min != nil {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Min))))
+		i--
+		dAtA[i] = 0x59
+	}
+	if m.Flags != 0 {
+		i = encodeVarint(dAtA, i, uint64(m.Flags))
+		i--
+		dAtA[i] = 0x50
+	}
+	if len(m.Attributes) > 0 {
+		for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x4a
+		}
+	}
+	if len(m.Exemplars) > 0 {
+		for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Exemplars[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if len(m.ExplicitBounds) > 0 {
+		for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- {
+			f1 := math.Float64bits(float64(m.ExplicitBounds[iNdEx]))
+			i -= 8
+			binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1))
+		}
+		i = encodeVarint(dAtA, i, uint64(len(m.ExplicitBounds)*8))
+		i--
+		dAtA[i] = 0x3a
+	}
+	if len(m.BucketCounts) > 0 {
+		for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- {
+			i -= 8
+			binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx]))
+		}
+		i = encodeVarint(dAtA, i, uint64(len(m.BucketCounts)*8))
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.Sum != nil {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Sum))))
+		i--
+		dAtA[i] = 0x29
+	}
+	if m.Count != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
+		i--
+		dAtA[i] = 0x21
+	}
+	if m.TimeUnixNano != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
+		i--
+		dAtA[i] = 0x19
+	}
+	if m.StartTimeUnixNano != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
+		i--
+		dAtA[i] = 0x11
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ExponentialHistogramDataPoint_Buckets) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExponentialHistogramDataPoint_Buckets) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ExponentialHistogramDataPoint_Buckets) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if len(m.BucketCounts) > 0 {
+		var pksize2 int
+		for _, num := range m.BucketCounts {
+			pksize2 += sov(uint64(num))
+		}
+		i -= pksize2
+		j1 := i
+		for _, num := range m.BucketCounts {
+			for num >= 1<<7 {
+				dAtA[j1] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j1++
+			}
+			dAtA[j1] = uint8(num)
+			j1++
+		}
+		i = encodeVarint(dAtA, i, uint64(pksize2))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Offset != 0 {
+		i = encodeVarint(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ExponentialHistogramDataPoint) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExponentialHistogramDataPoint) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *ExponentialHistogramDataPoint) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if m.Max != nil {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Max))))
+		i--
+		dAtA[i] = 0x69
+	}
+	if m.Min != nil {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Min))))
+		i--
+		dAtA[i] = 0x61
+	}
+	if len(m.Exemplars) > 0 {
+		for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Exemplars[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x5a
+		}
+	}
+	if m.Flags != 0 {
+		i = encodeVarint(dAtA, i, uint64(m.Flags))
+		i--
+		dAtA[i] = 0x50
+	}
+	if m.Negative != nil {
+		size, err := m.Negative.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.Positive != nil {
+		size, err := m.Positive.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarint(dAtA, i, uint64(size))
+		i--
+		dAtA[i] = 0x42
+	}
+	if m.ZeroCount != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ZeroCount))
+		i--
+		dAtA[i] = 0x39
+	}
+	if m.Scale != 0 {
+		i = encodeVarint(dAtA, i, uint64((uint32(m.Scale)<<1)^uint32((m.Scale>>31))))
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.Sum != nil {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Sum))))
+		i--
+		dAtA[i] = 0x29
+	}
+	if m.Count != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
+		i--
+		dAtA[i] = 0x21
+	}
+	if m.TimeUnixNano != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
+		i--
+		dAtA[i] = 0x19
+	}
+	if m.StartTimeUnixNano != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
+		i--
+		dAtA[i] = 0x11
+	}
+	if len(m.Attributes) > 0 {
+		for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SummaryDataPoint_ValueAtQuantile) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SummaryDataPoint_ValueAtQuantile) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *SummaryDataPoint_ValueAtQuantile) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if m.Value != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+		i--
+		dAtA[i] = 0x11
+	}
+	if m.Quantile != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile))))
+		i--
+		dAtA[i] = 0x9
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SummaryDataPoint) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SummaryDataPoint) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *SummaryDataPoint) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if m.Flags != 0 {
+		i = encodeVarint(dAtA, i, uint64(m.Flags))
+		i--
+		dAtA[i] = 0x40
+	}
+	if len(m.Attributes) > 0 {
+		for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if len(m.QuantileValues) > 0 {
+		for iNdEx := len(m.QuantileValues) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.QuantileValues[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x32
+		}
+	}
+	if m.Sum != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
+		i--
+		dAtA[i] = 0x29
+	}
+	if m.Count != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count))
+		i--
+		dAtA[i] = 0x21
+	}
+	if m.TimeUnixNano != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
+		i--
+		dAtA[i] = 0x19
+	}
+	if m.StartTimeUnixNano != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano))
+		i--
+		dAtA[i] = 0x11
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Exemplar) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Exemplar) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Exemplar) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if vtmsg, ok := m.Value.(interface {
+		MarshalToSizedBufferVT([]byte) (int, error)
+	}); ok {
+		size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+	}
+	if len(m.FilteredAttributes) > 0 {
+		for iNdEx := len(m.FilteredAttributes) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.FilteredAttributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	if len(m.TraceId) > 0 {
+		i -= len(m.TraceId)
+		copy(dAtA[i:], m.TraceId)
+		i = encodeVarint(dAtA, i, uint64(len(m.TraceId)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if len(m.SpanId) > 0 {
+		i -= len(m.SpanId)
+		copy(dAtA[i:], m.SpanId)
+		i = encodeVarint(dAtA, i, uint64(len(m.SpanId)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.TimeUnixNano != 0 {
+		i -= 8
+		binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano))
+		i--
+		dAtA[i] = 0x11
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Exemplar_AsDouble) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Exemplar_AsDouble) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i -= 8
+	binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble))))
+	i--
+	dAtA[i] = 0x19
+	return len(dAtA) - i, nil
+}
+func (m *Exemplar_AsInt) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Exemplar_AsInt) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i -= 8
+	binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt))
+	i--
+	dAtA[i] = 0x31
+	return len(dAtA) - i, nil
+}
+func (m *MetricsData) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.ResourceMetrics) > 0 {
+		for _, e := range m.ResourceMetrics {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *ResourceMetrics) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Resource != nil {
+		l = m.Resource.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	if len(m.ScopeMetrics) > 0 {
+		for _, e := range m.ScopeMetrics {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	l = len(m.SchemaUrl)
+	if l > 0 {
+		n += 1 + l + sov(uint64(l))
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *ScopeMetrics) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Metrics) > 0 {
+		for _, e := range m.Metrics {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	l = len(m.SchemaUrl)
+	if l > 0 {
+		n += 1 + l + sov(uint64(l))
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *Metric) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sov(uint64(l))
+	}
+	l = len(m.Description)
+	if l > 0 {
+		n += 1 + l + sov(uint64(l))
+	}
+	l = len(m.Unit)
+	if l > 0 {
+		n += 1 + l + sov(uint64(l))
+	}
+	if vtmsg, ok := m.Data.(interface{ SizeVT() int }); ok {
+		n += vtmsg.SizeVT()
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *Metric_Gauge) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Gauge != nil {
+		l = m.Gauge.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	return n
+}
+func (m *Metric_Sum) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Sum != nil {
+		l = m.Sum.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	return n
+}
+func (m *Metric_Histogram) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Histogram != nil {
+		l = m.Histogram.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	return n
+}
+func (m *Metric_ExponentialHistogram) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ExponentialHistogram != nil {
+		l = m.ExponentialHistogram.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	return n
+}
+func (m *Metric_Summary) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Summary != nil {
+		l = m.Summary.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	return n
+}
+func (m *Gauge) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.DataPoints) > 0 {
+		for _, e := range m.DataPoints {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *Sum) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.DataPoints) > 0 {
+		for _, e := range m.DataPoints {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if m.AggregationTemporality != 0 {
+		n += 1 + sov(uint64(m.AggregationTemporality))
+	}
+	if m.IsMonotonic {
+		n += 2
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *Histogram) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.DataPoints) > 0 {
+		for _, e := range m.DataPoints {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if m.AggregationTemporality != 0 {
+		n += 1 + sov(uint64(m.AggregationTemporality))
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *ExponentialHistogram) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.DataPoints) > 0 {
+		for _, e := range m.DataPoints {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if m.AggregationTemporality != 0 {
+		n += 1 + sov(uint64(m.AggregationTemporality))
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *Summary) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.DataPoints) > 0 {
+		for _, e := range m.DataPoints {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *NumberDataPoint) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.StartTimeUnixNano != 0 {
+		n += 9
+	}
+	if m.TimeUnixNano != 0 {
+		n += 9
+	}
+	if vtmsg, ok := m.Value.(interface{ SizeVT() int }); ok {
+		n += vtmsg.SizeVT()
+	}
+	if len(m.Exemplars) > 0 {
+		for _, e := range m.Exemplars {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if len(m.Attributes) > 0 {
+		for _, e := range m.Attributes {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if m.Flags != 0 {
+		n += 1 + sov(uint64(m.Flags))
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *NumberDataPoint_AsDouble) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 9
+	return n
+}
+func (m *NumberDataPoint_AsInt) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 9
+	return n
+}
+func (m *HistogramDataPoint) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.StartTimeUnixNano != 0 {
+		n += 9
+	}
+	if m.TimeUnixNano != 0 {
+		n += 9
+	}
+	if m.Count != 0 {
+		n += 9
+	}
+	if m.Sum != nil {
+		n += 9
+	}
+	if len(m.BucketCounts) > 0 {
+		n += 1 + sov(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8
+	}
+	if len(m.ExplicitBounds) > 0 {
+		n += 1 + sov(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8
+	}
+	if len(m.Exemplars) > 0 {
+		for _, e := range m.Exemplars {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if len(m.Attributes) > 0 {
+		for _, e := range m.Attributes {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if m.Flags != 0 {
+		n += 1 + sov(uint64(m.Flags))
+	}
+	if m.Min != nil {
+		n += 9
+	}
+	if m.Max != nil {
+		n += 9
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *ExponentialHistogramDataPoint_Buckets) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Offset != 0 {
+		n += 1 + soz(uint64(m.Offset))
+	}
+	if len(m.BucketCounts) > 0 {
+		l = 0
+		for _, e := range m.BucketCounts {
+			l += sov(uint64(e))
+		}
+		n += 1 + sov(uint64(l)) + l
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *ExponentialHistogramDataPoint) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Attributes) > 0 {
+		for _, e := range m.Attributes {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if m.StartTimeUnixNano != 0 {
+		n += 9
+	}
+	if m.TimeUnixNano != 0 {
+		n += 9
+	}
+	if m.Count != 0 {
+		n += 9
+	}
+	if m.Sum != nil {
+		n += 9
+	}
+	if m.Scale != 0 {
+		n += 1 + soz(uint64(m.Scale))
+	}
+	if m.ZeroCount != 0 {
+		n += 9
+	}
+	if m.Positive != nil {
+		l = m.Positive.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	if m.Negative != nil {
+		l = m.Negative.SizeVT()
+		n += 1 + l + sov(uint64(l))
+	}
+	if m.Flags != 0 {
+		n += 1 + sov(uint64(m.Flags))
+	}
+	if len(m.Exemplars) > 0 {
+		for _, e := range m.Exemplars {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if m.Min != nil {
+		n += 9
+	}
+	if m.Max != nil {
+		n += 9
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *SummaryDataPoint_ValueAtQuantile) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Quantile != 0 {
+		n += 9
+	}
+	if m.Value != 0 {
+		n += 9
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *SummaryDataPoint) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.StartTimeUnixNano != 0 {
+		n += 9
+	}
+	if m.TimeUnixNano != 0 {
+		n += 9
+	}
+	if m.Count != 0 {
+		n += 9
+	}
+	if m.Sum != 0 {
+		n += 9
+	}
+	if len(m.QuantileValues) > 0 {
+		for _, e := range m.QuantileValues {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if len(m.Attributes) > 0 {
+		for _, e := range m.Attributes {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if m.Flags != 0 {
+		n += 1 + sov(uint64(m.Flags))
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *Exemplar) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.TimeUnixNano != 0 {
+		n += 9
+	}
+	if vtmsg, ok := m.Value.(interface{ SizeVT() int }); ok {
+		n += vtmsg.SizeVT()
+	}
+	l = len(m.SpanId)
+	if l > 0 {
+		n += 1 + l + sov(uint64(l))
+	}
+	l = len(m.TraceId)
+	if l > 0 {
+		n += 1 + l + sov(uint64(l))
+	}
+	if len(m.FilteredAttributes) > 0 {
+		for _, e := range m.FilteredAttributes {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *Exemplar_AsDouble) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 9
+	return n
+}
+func (m *Exemplar_AsInt) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 9
+	return n
+}
+func (m *MetricsData) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MetricsData: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MetricsData: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{})
+			if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceMetrics) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceMetrics: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceMetrics: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Resource == nil {
+				m.Resource = &Resource{}
+			}
+			if err := m.Resource.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ScopeMetrics = append(m.ScopeMetrics, &ScopeMetrics{})
+			if err := m.ScopeMetrics[len(m.ScopeMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SchemaUrl = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ScopeMetrics) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ScopeMetrics: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ScopeMetrics: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Metrics = append(m.Metrics, &Metric{})
+			if err := m.Metrics[len(m.Metrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SchemaUrl = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Metric) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Metric: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Description = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Unit = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if oneof, ok := m.Data.(*Metric_Gauge); ok {
+				if err := oneof.Gauge.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+			} else {
+				v := &Gauge{}
+				if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+				m.Data = &Metric_Gauge{Gauge: v}
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if oneof, ok := m.Data.(*Metric_Sum); ok {
+				if err := oneof.Sum.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+			} else {
+				v := &Sum{}
+				if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+				m.Data = &Metric_Sum{Sum: v}
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if oneof, ok := m.Data.(*Metric_Histogram); ok {
+				if err := oneof.Histogram.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+			} else {
+				v := &Histogram{}
+				if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+				m.Data = &Metric_Histogram{Histogram: v}
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if oneof, ok := m.Data.(*Metric_ExponentialHistogram); ok {
+				if err := oneof.ExponentialHistogram.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+			} else {
+				v := &ExponentialHistogram{}
+				if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+				m.Data = &Metric_ExponentialHistogram{ExponentialHistogram: v}
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if oneof, ok := m.Data.(*Metric_Summary); ok {
+				if err := oneof.Summary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+			} else {
+				v := &Summary{}
+				if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+					return err
+				}
+				m.Data = &Metric_Summary{Summary: v}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Gauge) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Gauge: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DataPoints = append(m.DataPoints, &NumberDataPoint{})
+			if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Sum) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Sum: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Sum: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DataPoints = append(m.DataPoints, &NumberDataPoint{})
+			if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
+			}
+			m.AggregationTemporality = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IsMonotonic = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Histogram) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Histogram: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DataPoints = append(m.DataPoints, &HistogramDataPoint{})
+			if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
+			}
+			m.AggregationTemporality = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExponentialHistogram) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExponentialHistogram: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExponentialHistogram: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DataPoints = append(m.DataPoints, &ExponentialHistogramDataPoint{})
+			if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType)
+			}
+			m.AggregationTemporality = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Summary) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Summary: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.DataPoints = append(m.DataPoints, &SummaryDataPoint{})
+			if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NumberDataPoint) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NumberDataPoint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NumberDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
+			}
+			m.StartTimeUnixNano = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StartTimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 3:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+			}
+			m.TimeUnixNano = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 4:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Value = &NumberDataPoint_AsDouble{AsDouble: float64(math.Float64frombits(v))}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Exemplars = append(m.Exemplars, &Exemplar{})
+			if err := m.Exemplars[len(m.Exemplars)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
+			}
+			var v int64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = int64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Value = &NumberDataPoint_AsInt{AsInt: v}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Attributes = append(m.Attributes, &KeyValue{})
+			if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+			}
+			m.Flags = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Flags |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HistogramDataPoint) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HistogramDataPoint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
+			}
+			m.StartTimeUnixNano = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StartTimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 3:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+			}
+			m.TimeUnixNano = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 4:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Count = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 5:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			v2 := float64(math.Float64frombits(v))
+			m.Sum = &v2
+		case 6:
+			if wireType == 1 {
+				var v uint64
+				if (iNdEx + 8) > l {
+					return io.ErrUnexpectedEOF
+				}
+				v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+				iNdEx += 8
+				m.BucketCounts = append(m.BucketCounts, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflow
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLength
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLength
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				elementCount = packedLen / 8
+				if elementCount != 0 && len(m.BucketCounts) == 0 {
+					m.BucketCounts = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					if (iNdEx + 8) > l {
+						return io.ErrUnexpectedEOF
+					}
+					v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+					iNdEx += 8
+					m.BucketCounts = append(m.BucketCounts, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
+			}
+		case 7:
+			if wireType == 1 {
+				var v uint64
+				if (iNdEx + 8) > l {
+					return io.ErrUnexpectedEOF
+				}
+				v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+				iNdEx += 8
+				v2 := float64(math.Float64frombits(v))
+				m.ExplicitBounds = append(m.ExplicitBounds, v2)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflow
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLength
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLength
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				elementCount = packedLen / 8
+				if elementCount != 0 && len(m.ExplicitBounds) == 0 {
+					m.ExplicitBounds = make([]float64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					if (iNdEx + 8) > l {
+						return io.ErrUnexpectedEOF
+					}
+					v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+					iNdEx += 8
+					v2 := float64(math.Float64frombits(v))
+					m.ExplicitBounds = append(m.ExplicitBounds, v2)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType)
+			}
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Exemplars = append(m.Exemplars, &Exemplar{})
+			if err := m.Exemplars[len(m.Exemplars)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Attributes = append(m.Attributes, &KeyValue{})
+			if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+			}
+			m.Flags = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Flags |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 11:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			v2 := float64(math.Float64frombits(v))
+			m.Min = &v2
+		case 12:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			v2 := float64(math.Float64frombits(v))
+			m.Max = &v2
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExponentialHistogramDataPoint_Buckets) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExponentialHistogramDataPoint_Buckets: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExponentialHistogramDataPoint_Buckets: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
+			m.Offset = v
+		case 2:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflow
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.BucketCounts = append(m.BucketCounts, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflow
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLength
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLength
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.BucketCounts) == 0 {
+					m.BucketCounts = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflow
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.BucketCounts = append(m.BucketCounts, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType)
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExponentialHistogramDataPoint) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExponentialHistogramDataPoint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExponentialHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Attributes = append(m.Attributes, &KeyValue{})
+			if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
+			}
+			m.StartTimeUnixNano = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StartTimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 3:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+			}
+			m.TimeUnixNano = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 4:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Count = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 5:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			v2 := float64(math.Float64frombits(v))
+			m.Sum = &v2
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType)
+			}
+			var v int32
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31))
+			m.Scale = v
+		case 7:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType)
+			}
+			m.ZeroCount = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ZeroCount = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Positive == nil {
+				m.Positive = &ExponentialHistogramDataPoint_Buckets{}
+			}
+			if err := m.Positive.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Negative == nil {
+				m.Negative = &ExponentialHistogramDataPoint_Buckets{}
+			}
+			if err := m.Negative.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+			}
+			m.Flags = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Flags |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Exemplars = append(m.Exemplars, &Exemplar{})
+			if err := m.Exemplars[len(m.Exemplars)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 12:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			v2 := float64(math.Float64frombits(v))
+			m.Min = &v2
+		case 13:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			v2 := float64(math.Float64frombits(v))
+			m.Max = &v2
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SummaryDataPoint_ValueAtQuantile) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SummaryDataPoint_ValueAtQuantile: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SummaryDataPoint_ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Quantile = float64(math.Float64frombits(v))
+		case 2:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Value = float64(math.Float64frombits(v))
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SummaryDataPoint) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SummaryDataPoint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SummaryDataPoint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType)
+			}
+			m.StartTimeUnixNano = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StartTimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 3:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+			}
+			m.TimeUnixNano = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 4:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Count = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 5:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Sum = float64(math.Float64frombits(v))
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.QuantileValues = append(m.QuantileValues, &SummaryDataPoint_ValueAtQuantile{})
+			if err := m.QuantileValues[len(m.QuantileValues)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Attributes = append(m.Attributes, &KeyValue{})
+			if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
+			}
+			m.Flags = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Flags |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Exemplar) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Exemplar: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 2:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType)
+			}
+			m.TimeUnixNano = 0
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+		case 3:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType)
+			}
+			var v uint64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Value = &Exemplar_AsDouble{AsDouble: float64(math.Float64frombits(v))}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SpanId = append(m.SpanId[:0], dAtA[iNdEx:postIndex]...)
+			if m.SpanId == nil {
+				m.SpanId = []byte{}
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TraceId = append(m.TraceId[:0], dAtA[iNdEx:postIndex]...)
+			if m.TraceId == nil {
+				m.TraceId = []byte{}
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 1 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType)
+			}
+			var v int64
+			if (iNdEx + 8) > l {
+				return io.ErrUnexpectedEOF
+			}
+			v = int64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+			iNdEx += 8
+			m.Value = &Exemplar_AsInt{AsInt: v}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FilteredAttributes = append(m.FilteredAttributes, &KeyValue{})
+			if err := m.FilteredAttributes[len(m.FilteredAttributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/lib/protoparser/opentelemetry/pb/resource.pb.go b/lib/protoparser/opentelemetry/pb/resource.pb.go
new file mode 100644
index 0000000000..a89c817b29
--- /dev/null
+++ b/lib/protoparser/opentelemetry/pb/resource.pb.go
@@ -0,0 +1,48 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.28.1
+// 	protoc        v3.21.12
+// source: lib/protoparser/opentelemetry/proto/resource.proto
+
+package pb
+
+// Resource information.
+type Resource struct {
+	unknownFields []byte
+
+	// Set of attributes that describe the resource.
+	// Attribute keys MUST be unique (it is not allowed to have more than one
+	// attribute with the same key).
+	Attributes []*KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	// dropped_attributes_count is the number of dropped attributes. If the value is 0, then
+	// no attributes were dropped.
+	DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Resource) GetAttributes() []*KeyValue {
+	if x != nil {
+		return x.Attributes
+	}
+	return nil
+}
+
+func (x *Resource) GetDroppedAttributesCount() uint32 {
+	if x != nil {
+		return x.DroppedAttributesCount
+	}
+	return 0
+}
diff --git a/lib/protoparser/opentelemetry/pb/resource_vtproto.pb.go b/lib/protoparser/opentelemetry/pb/resource_vtproto.pb.go
new file mode 100644
index 0000000000..27eb573e06
--- /dev/null
+++ b/lib/protoparser/opentelemetry/pb/resource_vtproto.pb.go
@@ -0,0 +1,184 @@
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// protoc-gen-go-vtproto version: v0.4.0
+// source: lib/protoparser/opentelemetry/proto/resource.proto
+
+package pb
+
+import (
+	fmt "fmt"
+	io "io"
+)
+
+func (m *Resource) MarshalVT() (dAtA []byte, err error) {
+	if m == nil {
+		return nil, nil
+	}
+	size := m.SizeVT()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBufferVT(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Resource) MarshalToVT(dAtA []byte) (int, error) {
+	size := m.SizeVT()
+	return m.MarshalToSizedBufferVT(dAtA[:size])
+}
+
+func (m *Resource) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
+	if m == nil {
+		return 0, nil
+	}
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.unknownFields != nil {
+		i -= len(m.unknownFields)
+		copy(dAtA[i:], m.unknownFields)
+	}
+	if m.DroppedAttributesCount != 0 {
+		i = encodeVarint(dAtA, i, uint64(m.DroppedAttributesCount))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.Attributes) > 0 {
+		for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- {
+			size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarint(dAtA, i, uint64(size))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Resource) SizeVT() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Attributes) > 0 {
+		for _, e := range m.Attributes {
+			l = e.SizeVT()
+			n += 1 + l + sov(uint64(l))
+		}
+	}
+	if m.DroppedAttributesCount != 0 {
+		n += 1 + sov(uint64(m.DroppedAttributesCount))
+	}
+	n += len(m.unknownFields)
+	return n
+}
+
+func (m *Resource) UnmarshalVT(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflow
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Resource: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLength
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLength
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Attributes = append(m.Attributes, &KeyValue{})
+			if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType)
+			}
+			m.DroppedAttributesCount = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflow
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.DroppedAttributesCount |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skip(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLength
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
diff --git a/lib/protoparser/opentelemetry/proto/README.md b/lib/protoparser/opentelemetry/proto/README.md
new file mode 100644
index 0000000000..13cba6806f
--- /dev/null
+++ b/lib/protoparser/opentelemetry/proto/README.md
@@ -0,0 +1,32 @@
+# Opentelemetry proto files
+
+Content copied from https://github.com/open-telemetry/opentelemetry-proto/tree/main/opentelemetry/proto
+
+## Requirements
+- protoc binary [link](http://google.github.io/proto-lens/installing-protoc.html)
+- golang-proto-gen[link](https://developers.google.com/protocol-buffers/docs/reference/go-generated)
+- custom marshaller [link](https://github.com/planetscale/vtprotobuf)
+
+## Modifications
+
+ Original proto files were modified:
+1) changed package name for `package opentelemetry`.
+2) changed import paths - changed directory names.
+3) changed go_package for  `opentelemetry/pb`.
+
+
+## How to generate pbs
+
+ run command:
+ ```bash
+export GOBIN=~/go/bin protoc
+protoc -I=. --go_out=./lib/protoparser/opentelemetry --go-vtproto_out=./lib/protoparser/opentelemetry --plugin protoc-gen-go-vtproto="$GOBIN/protoc-gen-go-vtproto" --go-vtproto_opt=features=marshal+unmarshal+size  lib/protoparser/opentelemetry/proto/*.proto
+ ```
+
+Generated code will be at `lib/protoparser/opentelemetry/opentelemetry/`
+
+ manually edit it:
+ 
+1) remove all external imports
+2) remove all unneeded methods
+3) replace `unknownFields` with `unknownFields []byte`
\ No newline at end of file
diff --git a/lib/protoparser/opentelemetry/proto/common.proto b/lib/protoparser/opentelemetry/proto/common.proto
new file mode 100644
index 0000000000..7517786223
--- /dev/null
+++ b/lib/protoparser/opentelemetry/proto/common.proto
@@ -0,0 +1,67 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package opentelemetry;
+
+option csharp_namespace = "OpenTelemetry.Proto.Common.V1";
+option java_multiple_files = true;
+option java_package = "io.opentelemetry.proto.common.v1";
+option java_outer_classname = "CommonProto";
+option go_package = "opentelemetry/pb";
+
+// AnyValue is used to represent any type of attribute value. AnyValue may contain a
+// primitive value such as a string or integer or it may contain an arbitrary nested
+// object containing arrays, key-value lists and primitives.
+message AnyValue {
+  // The value is one of the listed fields. It is valid for all values to be unspecified
+  // in which case this AnyValue is considered to be "empty".
+  oneof value {
+    string string_value = 1;
+    bool bool_value = 2;
+    int64 int_value = 3;
+    double double_value = 4;
+    ArrayValue array_value = 5;
+    KeyValueList kvlist_value = 6;
+    bytes bytes_value = 7;
+  }
+}
+
+// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
+// since oneof in AnyValue does not allow repeated fields.
+message ArrayValue {
+  // Array of values. The array may be empty (contain 0 elements).
+  repeated AnyValue values = 1;
+}
+
+// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
+// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
+// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
+// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
+// are semantically equivalent.
+message KeyValueList {
+  // A collection of key/value pairs of key-value pairs. The list may be empty (may
+  // contain 0 elements).
+  // The keys MUST be unique (it is not allowed to have more than one
+  // value with the same key).
+  repeated KeyValue values = 1;
+}
+
+// KeyValue is a key-value pair that is used to store Span attributes, Link
+// attributes, etc.
+message KeyValue {
+  string key = 1;
+  AnyValue value = 2;
+}
diff --git a/lib/protoparser/opentelemetry/proto/metrics.proto b/lib/protoparser/opentelemetry/proto/metrics.proto
new file mode 100644
index 0000000000..b2dae7c149
--- /dev/null
+++ b/lib/protoparser/opentelemetry/proto/metrics.proto
@@ -0,0 +1,661 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package opentelemetry;
+
+import "lib/protoparser/opentelemetry/proto/common.proto";
+import "lib/protoparser/opentelemetry/proto/resource.proto";
+
+option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1";
+option java_multiple_files = true;
+option java_package = "io.opentelemetry.proto.metrics.v1";
+option java_outer_classname = "MetricsProto";
+option go_package = "opentelemetry/pb";
+
+// MetricsData represents the metrics data that can be stored in a persistent
+// storage, OR can be embedded by other protocols that transfer OTLP metrics
+// data but do not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+message MetricsData {
+  // An array of ResourceMetrics.
+  // For data coming from a single resource this array will typically contain
+  // one element. Intermediary nodes that receive data from multiple origins
+  // typically batch the data before forwarding further and in that case this
+  // array will contain multiple elements.
+  repeated ResourceMetrics resource_metrics = 1;
+}
+
+// A collection of ScopeMetrics from a Resource.
+message ResourceMetrics {
+  reserved 1000;
+
+  // The resource for the metrics in this message.
+  // If this field is not set then no resource info is known.
+  Resource resource = 1;
+
+  // A list of metrics that originate from a resource.
+  repeated ScopeMetrics scope_metrics = 2;
+
+  // This schema_url applies to the data in the "resource" field. It does not apply
+  // to the data in the "scope_metrics" field which have their own schema_url field.
+  string schema_url = 3;
+}
+
+// A collection of Metrics produced by an Scope.
+message ScopeMetrics {
+  // A list of metrics that originate from an instrumentation library.
+  repeated Metric metrics = 2;
+
+  // This schema_url applies to all metrics in the "metrics" field.
+  string schema_url = 3;
+}
+
+// Defines a Metric which has one or more timeseries.  The following is a
+// brief summary of the Metric data model.  For more details, see:
+//
+//   https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md
+//
+//
+// The data model and relation between entities is shown in the
+// diagram below. Here, "DataPoint" is the term used to refer to any
+// one of the specific data point value types, and "points" is the term used
+// to refer to any one of the lists of points contained in the Metric.
+//
+// - Metric is composed of a metadata and data.
+// - Metadata part contains a name, description, unit.
+// - Data is one of the possible types (Sum, Gauge, Histogram, Summary).
+// - DataPoint contains timestamps, attributes, and one of the possible value type
+//   fields.
+//
+//     Metric
+//  +------------+
+//  |name        |
+//  |description |
+//  |unit        |     +------------------------------------+
+//  |data        |---> |Gauge, Sum, Histogram, Summary, ... |
+//  +------------+     +------------------------------------+
+//
+//    Data [One of Gauge, Sum, Histogram, Summary, ...]
+//  +-----------+
+//  |...        |  // Metadata about the Data.
+//  |points     |--+
+//  +-----------+  |
+//                 |      +---------------------------+
+//                 |      |DataPoint 1                |
+//                 v      |+------+------+   +------+ |
+//              +-----+   ||label |label |...|label | |
+//              |  1  |-->||value1|value2|...|valueN| |
+//              +-----+   |+------+------+   +------+ |
+//              |  .  |   |+-----+                    |
+//              |  .  |   ||value|                    |
+//              |  .  |   |+-----+                    |
+//              |  .  |   +---------------------------+
+//              |  .  |                   .
+//              |  .  |                   .
+//              |  .  |                   .
+//              |  .  |   +---------------------------+
+//              |  .  |   |DataPoint M                |
+//              +-----+   |+------+------+   +------+ |
+//              |  M  |-->||label |label |...|label | |
+//              +-----+   ||value1|value2|...|valueN| |
+//                        |+------+------+   +------+ |
+//                        |+-----+                    |
+//                        ||value|                    |
+//                        |+-----+                    |
+//                        +---------------------------+
+//
+// Each distinct type of DataPoint represents the output of a specific
+// aggregation function, the result of applying the DataPoint's
+// associated function of to one or more measurements.
+//
+// All DataPoint types have three common fields:
+// - Attributes includes key-value pairs associated with the data point
+// - TimeUnixNano is required, set to the end time of the aggregation
+// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints
+//   having an AggregationTemporality field, as discussed below.
+//
+// Both TimeUnixNano and StartTimeUnixNano values are expressed as
+// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+//
+// # TimeUnixNano
+//
+// This field is required, having consistent interpretation across
+// DataPoint types.  TimeUnixNano is the moment corresponding to when
+// the data point's aggregate value was captured.
+//
+// Data points with the 0 value for TimeUnixNano SHOULD be rejected
+// by consumers.
+//
+// # StartTimeUnixNano
+//
+// StartTimeUnixNano in general allows detecting when a sequence of
+// observations is unbroken.  This field indicates to consumers the
+// start time for points with cumulative and delta
+// AggregationTemporality, and it should be included whenever possible
+// to support correct rate calculation.  Although it may be omitted
+// when the start time is truly unknown, setting StartTimeUnixNano is
+// strongly encouraged.
+message Metric {
+  reserved 4, 6, 8;
+
+  // name of the metric, including its DNS name prefix. It must be unique.
+  string name = 1;
+
+  // description of the metric, which can be used in documentation.
+  string description = 2;
+
+  // unit in which the metric value is reported. Follows the format
+  // described by http://unitsofmeasure.org/ucum.html.
+  string unit = 3;
+
+  // Data determines the aggregation type (if any) of the metric, what is the
+  // reported value type for the data points, as well as the relatationship to
+  // the time interval over which they are reported.
+  oneof data {
+    Gauge gauge = 5;
+    Sum sum = 7;
+    Histogram histogram = 9;
+    ExponentialHistogram exponential_histogram = 10;
+    Summary summary = 11;
+  }
+}
+
+// Gauge represents the type of a scalar metric that always exports the
+// "current value" for every data point. It should be used for an "unknown"
+// aggregation.
+//
+// A Gauge does not support different aggregation temporalities. Given the
+// aggregation is unknown, points cannot be combined using the same
+// aggregation, regardless of aggregation temporalities. Therefore,
+// AggregationTemporality is not included. Consequently, this also means
+// "StartTimeUnixNano" is ignored for all data points.
+message Gauge {
+  repeated NumberDataPoint data_points = 1;
+}
+
+// Sum represents the type of a scalar metric that is calculated as a sum of all
+// reported measurements over a time interval.
+message Sum {
+  repeated NumberDataPoint data_points = 1;
+
+  // aggregation_temporality describes if the aggregator reports delta changes
+  // since last report time, or cumulative changes since a fixed start time.
+  AggregationTemporality aggregation_temporality = 2;
+
+  // If "true" means that the sum is monotonic.
+  bool is_monotonic = 3;
+}
+
+// Histogram represents the type of a metric that is calculated by aggregating
+// as a Histogram of all reported measurements over a time interval.
+message Histogram {
+  repeated HistogramDataPoint data_points = 1;
+
+  // aggregation_temporality describes if the aggregator reports delta changes
+  // since last report time, or cumulative changes since a fixed start time.
+  AggregationTemporality aggregation_temporality = 2;
+}
+
+// ExponentialHistogram represents the type of a metric that is calculated by aggregating
+// as a ExponentialHistogram of all reported double measurements over a time interval.
+message ExponentialHistogram {
+  repeated ExponentialHistogramDataPoint data_points = 1;
+
+  // aggregation_temporality describes if the aggregator reports delta changes
+  // since last report time, or cumulative changes since a fixed start time.
+  AggregationTemporality aggregation_temporality = 2;
+}
+
+// Summary metric data are used to convey quantile summaries,
+// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary)
+// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45)
+// data type. These data points cannot always be merged in a meaningful way.
+// While they can be useful in some applications, histogram data points are
+// recommended for new applications.
+message Summary {
+  repeated SummaryDataPoint data_points = 1;
+}
+
+// AggregationTemporality defines how a metric aggregator reports aggregated
+// values. It describes how those values relate to the time interval over
+// which they are aggregated.
+enum AggregationTemporality {
+  // UNSPECIFIED is the default AggregationTemporality, it MUST not be used.
+  AGGREGATION_TEMPORALITY_UNSPECIFIED = 0;
+
+  // DELTA is an AggregationTemporality for a metric aggregator which reports
+  // changes since last report time. Successive metrics contain aggregation of
+  // values from continuous and non-overlapping intervals.
+  //
+  // The values for a DELTA metric are based only on the time interval
+  // associated with one measurement cycle. There is no dependency on
+  // previous measurements like is the case for CUMULATIVE metrics.
+  //
+  // For example, consider a system measuring the number of requests that
+  // it receives and reports the sum of these requests every second as a
+  // DELTA metric:
+  //
+  //   1. The system starts receiving at time=t_0.
+  //   2. A request is received, the system measures 1 request.
+  //   3. A request is received, the system measures 1 request.
+  //   4. A request is received, the system measures 1 request.
+  //   5. The 1 second collection cycle ends. A metric is exported for the
+  //      number of requests received over the interval of time t_0 to
+  //      t_0+1 with a value of 3.
+  //   6. A request is received, the system measures 1 request.
+  //   7. A request is received, the system measures 1 request.
+  //   8. The 1 second collection cycle ends. A metric is exported for the
+  //      number of requests received over the interval of time t_0+1 to
+  //      t_0+2 with a value of 2.
+  AGGREGATION_TEMPORALITY_DELTA = 1;
+
+  // CUMULATIVE is an AggregationTemporality for a metric aggregator which
+  // reports changes since a fixed start time. This means that current values
+  // of a CUMULATIVE metric depend on all previous measurements since the
+  // start time. Because of this, the sender is required to retain this state
+  // in some form. If this state is lost or invalidated, the CUMULATIVE metric
+  // values MUST be reset and a new fixed start time following the last
+  // reported measurement time sent MUST be used.
+  //
+  // For example, consider a system measuring the number of requests that
+  // it receives and reports the sum of these requests every second as a
+  // CUMULATIVE metric:
+  //
+  //   1. The system starts receiving at time=t_0.
+  //   2. A request is received, the system measures 1 request.
+  //   3. A request is received, the system measures 1 request.
+  //   4. A request is received, the system measures 1 request.
+  //   5. The 1 second collection cycle ends. A metric is exported for the
+  //      number of requests received over the interval of time t_0 to
+  //      t_0+1 with a value of 3.
+  //   6. A request is received, the system measures 1 request.
+  //   7. A request is received, the system measures 1 request.
+  //   8. The 1 second collection cycle ends. A metric is exported for the
+  //      number of requests received over the interval of time t_0 to
+  //      t_0+2 with a value of 5.
+  //   9. The system experiences a fault and loses state.
+  //   10. The system recovers and resumes receiving at time=t_1.
+  //   11. A request is received, the system measures 1 request.
+  //   12. The 1 second collection cycle ends. A metric is exported for the
+  //      number of requests received over the interval of time t_1 to
+  //      t_0+1 with a value of 1.
+  //
+  // Note: Even though, when reporting changes since last report time, using
+  // CUMULATIVE is valid, it is not recommended. This may cause problems for
+  // systems that do not use start_time to determine when the aggregation
+  // value was reset (e.g. Prometheus).
+  AGGREGATION_TEMPORALITY_CUMULATIVE = 2;
+}
+
+// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a
+// bit-field representing 32 distinct boolean flags.  Each flag defined in this
+// enum is a bit-mask.  To test the presence of a single flag in the flags of
+// a data point, for example, use an expression like:
+//
+//   (point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE
+//
+enum DataPointFlags {
+  FLAG_NONE = 0;
+
+  // This DataPoint is valid but has no recorded value.  This value
+  // SHOULD be used to reflect explicitly missing data in a series, as
+  // for an equivalent to the Prometheus "staleness marker".
+  FLAG_NO_RECORDED_VALUE = 1;
+
+  // Bits 2-31 are reserved for future use.
+}
+
+// NumberDataPoint is a single data point in a timeseries that describes the
+// time-varying scalar value of a metric.
+message NumberDataPoint {
+  reserved 1;
+
+  // The set of key/value pairs that uniquely identify the timeseries from
+  // where this point belongs. The list may be empty (may contain 0 elements).
+  // Attribute keys MUST be unique (it is not allowed to have more than one
+  // attribute with the same key).
+  repeated KeyValue attributes = 7;
+
+  // StartTimeUnixNano is optional but strongly encouraged, see the
+  // the detailed comments above Metric.
+  //
+  // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+  // 1970.
+  fixed64 start_time_unix_nano = 2;
+
+  // TimeUnixNano is required, see the detailed comments above Metric.
+  //
+  // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+  // 1970.
+  fixed64 time_unix_nano = 3;
+
+  // The value itself.  A point is considered invalid when one of the recognized
+  // value fields is not present inside this oneof.
+  oneof value {
+    double as_double = 4;
+    sfixed64 as_int = 6;
+  }
+
+  // (Optional) List of exemplars collected from
+  // measurements that were used to form the data point
+  repeated Exemplar exemplars = 5;
+
+  // Flags that apply to this specific data point.  See DataPointFlags
+  // for the available flags and their meaning.
+  uint32 flags = 8;
+}
+
+// HistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Histogram. A Histogram contains summary statistics
+// for a population of values, it may optionally contain the distribution of
+// those values across a set of buckets.
+//
+// If the histogram contains the distribution of values, then both
+// "explicit_bounds" and "bucket counts" fields must be defined.
+// If the histogram does not contain the distribution of values, then both
+// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and
+// "sum" are known.
+message HistogramDataPoint {
+  reserved 1;
+
+  // The set of key/value pairs that uniquely identify the timeseries from
+  // where this point belongs. The list may be empty (may contain 0 elements).
+  // Attribute keys MUST be unique (it is not allowed to have more than one
+  // attribute with the same key).
+  repeated KeyValue attributes = 9;
+
+  // StartTimeUnixNano is optional but strongly encouraged, see the
+  // the detailed comments above Metric.
+  //
+  // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+  // 1970.
+  fixed64 start_time_unix_nano = 2;
+
+  // TimeUnixNano is required, see the detailed comments above Metric.
+  //
+  // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+  // 1970.
+  fixed64 time_unix_nano = 3;
+
+  // count is the number of values in the population. Must be non-negative. This
+  // value must be equal to the sum of the "count" fields in buckets if a
+  // histogram is provided.
+  fixed64 count = 4;
+
+  // sum of the values in the population. If count is zero then this field
+  // must be zero.
+  //
+  // Note: Sum should only be filled out when measuring non-negative discrete
+  // events, and is assumed to be monotonic over the values of these events.
+  // Negative events *can* be recorded, but sum should not be filled out when
+  // doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
+  // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
+  optional double sum = 5;
+
+  // bucket_counts is an optional field contains the count values of histogram
+  // for each bucket.
+  //
+  // The sum of the bucket_counts must equal the value in the count field.
+  //
+  // The number of elements in bucket_counts array must be by one greater than
+  // the number of elements in explicit_bounds array.
+  repeated fixed64 bucket_counts = 6;
+
+  // explicit_bounds specifies buckets with explicitly defined bounds for values.
+  //
+  // The boundaries for bucket at index i are:
+  //
+  // (-infinity, explicit_bounds[i]] for i == 0
+  // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds)
+  // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds)
+  //
+  // The values in the explicit_bounds array must be strictly increasing.
+  //
+  // Histogram buckets are inclusive of their upper boundary, except the last
+  // bucket where the boundary is at infinity. This format is intentionally
+  // compatible with the OpenMetrics histogram definition.
+  repeated double explicit_bounds = 7;
+
+  // (Optional) List of exemplars collected from
+  // measurements that were used to form the data point
+  repeated Exemplar exemplars = 8;
+
+  // Flags that apply to this specific data point.  See DataPointFlags
+  // for the available flags and their meaning.
+  uint32 flags = 10;
+
+  // min is the minimum value over (start_time, end_time].
+  optional double min = 11;
+
+  // max is the maximum value over (start_time, end_time].
+  optional double max = 12;
+}
+
+// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains
+// summary statistics for a population of values, it may optionally contain the
+// distribution of those values across a set of buckets.
+//
+message ExponentialHistogramDataPoint {
+  // The set of key/value pairs that uniquely identify the timeseries from
+  // where this point belongs. The list may be empty (may contain 0 elements).
+  // Attribute keys MUST be unique (it is not allowed to have more than one
+  // attribute with the same key).
+  repeated KeyValue attributes = 1;
+
+  // StartTimeUnixNano is optional but strongly encouraged, see the
+  // the detailed comments above Metric.
+  //
+  // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+  // 1970.
+  fixed64 start_time_unix_nano = 2;
+
+  // TimeUnixNano is required, see the detailed comments above Metric.
+  //
+  // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+  // 1970.
+  fixed64 time_unix_nano = 3;
+
+  // count is the number of values in the population. Must be
+  // non-negative. This value must be equal to the sum of the "bucket_counts"
+  // values in the positive and negative Buckets plus the "zero_count" field.
+  fixed64 count = 4;
+
+  // sum of the values in the population. If count is zero then this field
+  // must be zero.
+  //
+  // Note: Sum should only be filled out when measuring non-negative discrete
+  // events, and is assumed to be monotonic over the values of these events.
+  // Negative events *can* be recorded, but sum should not be filled out when
+  // doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
+  // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
+  optional double sum = 5;
+  
+  // scale describes the resolution of the histogram.  Boundaries are
+  // located at powers of the base, where:
+  //
+  //   base = (2^(2^-scale))
+  //
+  // The histogram bucket identified by `index`, a signed integer,
+  // contains values that are greater than (base^index) and
+  // less than or equal to (base^(index+1)).
+  //
+  // The positive and negative ranges of the histogram are expressed
+  // separately.  Negative values are mapped by their absolute value
+  // into the negative range using the same scale as the positive range.
+  //
+  // scale is not restricted by the protocol, as the permissible
+  // values depend on the range of the data.
+  sint32 scale = 6;
+
+  // zero_count is the count of values that are either exactly zero or
+  // within the region considered zero by the instrumentation at the
+  // tolerated degree of precision.  This bucket stores values that
+  // cannot be expressed using the standard exponential formula as
+  // well as values that have been rounded to zero.
+  //
+  // Implementations MAY consider the zero bucket to have probability
+  // mass equal to (zero_count / count).
+  fixed64 zero_count = 7;
+
+  // positive carries the positive range of exponential bucket counts.
+  Buckets positive = 8;
+
+  // negative carries the negative range of exponential bucket counts.
+  Buckets negative = 9;
+
+  // Buckets are a set of bucket counts, encoded in a contiguous array
+  // of counts.
+  message Buckets {
+    // Offset is the bucket index of the first entry in the bucket_counts array.
+    // 
+    // Note: This uses a varint encoding as a simple form of compression.
+    sint32 offset = 1;
+
+    // Count is an array of counts, where count[i] carries the count
+    // of the bucket at index (offset+i).  count[i] is the count of
+    // values greater than base^(offset+i) and less or equal to than
+    // base^(offset+i+1).
+    //
+    // Note: By contrast, the explicit HistogramDataPoint uses
+    // fixed64.  This field is expected to have many buckets,
+    // especially zeros, so uint64 has been selected to ensure
+    // varint encoding.
+    repeated uint64 bucket_counts = 2;
+  } 
+
+  // Flags that apply to this specific data point.  See DataPointFlags
+  // for the available flags and their meaning.
+  uint32 flags = 10;
+
+  // (Optional) List of exemplars collected from
+  // measurements that were used to form the data point
+  repeated Exemplar exemplars = 11;
+
+  // min is the minimum value over (start_time, end_time].
+  optional double min = 12;
+
+  // max is the maximum value over (start_time, end_time].
+  optional double max = 13;
+}
+
+// SummaryDataPoint is a single data point in a timeseries that describes the
+// time-varying values of a Summary metric.
+message SummaryDataPoint {
+  reserved 1;
+
+  // The set of key/value pairs that uniquely identify the timeseries from
+  // where this point belongs. The list may be empty (may contain 0 elements).
+  // Attribute keys MUST be unique (it is not allowed to have more than one
+  // attribute with the same key).
+  repeated KeyValue attributes = 7;
+
+  // StartTimeUnixNano is optional but strongly encouraged, see the
+  // the detailed comments above Metric.
+  //
+  // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+  // 1970.
+  fixed64 start_time_unix_nano = 2;
+
+  // TimeUnixNano is required, see the detailed comments above Metric.
+  //
+  // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+  // 1970.
+  fixed64 time_unix_nano = 3;
+
+  // count is the number of values in the population. Must be non-negative.
+  fixed64 count = 4;
+
+  // sum of the values in the population. If count is zero then this field
+  // must be zero.
+  //
+  // Note: Sum should only be filled out when measuring non-negative discrete
+  // events, and is assumed to be monotonic over the values of these events.
+  // Negative events *can* be recorded, but sum should not be filled out when
+  // doing so.  This is specifically to enforce compatibility w/ OpenMetrics,
+  // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
+  double sum = 5;
+
+  // Represents the value at a given quantile of a distribution.
+  //
+  // To record Min and Max values following conventions are used:
+  // - The 1.0 quantile is equivalent to the maximum value observed.
+  // - The 0.0 quantile is equivalent to the minimum value observed.
+  //
+  // See the following issue for more context:
+  // https://github.com/open-telemetry/opentelemetry-proto/issues/125
+  message ValueAtQuantile {
+    // The quantile of a distribution. Must be in the interval
+    // [0.0, 1.0].
+    double quantile = 1;
+
+    // The value at the given quantile of a distribution.
+    //
+    // Quantile values must NOT be negative.
+    double value = 2;
+  }
+
+  // (Optional) list of values at different quantiles of the distribution calculated
+  // from the current snapshot. The quantiles must be strictly increasing.
+  repeated ValueAtQuantile quantile_values = 6;
+
+  // Flags that apply to this specific data point.  See DataPointFlags
+  // for the available flags and their meaning.
+  uint32 flags = 8;
+}
+
+// A representation of an exemplar, which is a sample input measurement.
+// Exemplars also hold information about the environment when the measurement
+// was recorded, for example the span and trace ID of the active span when the
+// exemplar was recorded.
+message Exemplar {
+  reserved 1;
+
+  // The set of key/value pairs that were filtered out by the aggregator, but
+  // recorded alongside the original measurement. Only key/value pairs that were
+  // filtered out by the aggregator should be included
+  repeated KeyValue filtered_attributes = 7;
+
+  // time_unix_nano is the exact time when this exemplar was recorded
+  //
+  // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January
+  // 1970.
+  fixed64 time_unix_nano = 2;
+
+  // The value of the measurement that was recorded. An exemplar is
+  // considered invalid when one of the recognized value fields is not present
+  // inside this oneof.
+  oneof value {
+    double as_double = 3;
+    sfixed64 as_int = 6;
+  }
+
+  // (Optional) Span ID of the exemplar trace.
+  // span_id may be missing if the measurement is not recorded inside a trace
+  // or if the trace is not sampled.
+  bytes span_id = 4;
+
+  // (Optional) Trace ID of the exemplar trace.
+  // trace_id may be missing if the measurement is not recorded inside a trace
+  // or if the trace is not sampled.
+  bytes trace_id = 5;
+}
diff --git a/lib/protoparser/opentelemetry/proto/metrics_service.proto b/lib/protoparser/opentelemetry/proto/metrics_service.proto
new file mode 100644
index 0000000000..505f676823
--- /dev/null
+++ b/lib/protoparser/opentelemetry/proto/metrics_service.proto
@@ -0,0 +1,30 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package opentelemetry;
+
+import "lib/protoparser/opentelemetry/proto/metrics.proto";
+
+option go_package = "opentelemetry/pb";
+
+message ExportMetricsServiceRequest {
+  // An array of ResourceMetrics.
+  // For data coming from a single resource this array will typically contain one
+  // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
+  // data from multiple origins typically batch the data before forwarding further and
+  // in that case this array will contain multiple elements.
+  repeated ResourceMetrics resource_metrics = 1;
+}
diff --git a/lib/protoparser/opentelemetry/proto/resource.proto b/lib/protoparser/opentelemetry/proto/resource.proto
new file mode 100644
index 0000000000..572ccf1b67
--- /dev/null
+++ b/lib/protoparser/opentelemetry/proto/resource.proto
@@ -0,0 +1,37 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package opentelemetry;
+
+import "lib/protoparser/opentelemetry/proto/common.proto";
+
+option csharp_namespace = "OpenTelemetry.Proto.Resource.V1";
+option java_multiple_files = true;
+option java_package = "io.opentelemetry.proto.resource.v1";
+option java_outer_classname = "ResourceProto";
+option go_package = "opentelemetry/pb";
+
+// Resource information.
+message Resource {
+  // Set of attributes that describe the resource.
+  // Attribute keys MUST be unique (it is not allowed to have more than one
+  // attribute with the same key).
+  repeated KeyValue attributes = 1;
+
+  // dropped_attributes_count is the number of dropped attributes. If the value is 0, then
+  // no attributes were dropped.
+  uint32 dropped_attributes_count = 2;
+}
diff --git a/lib/protoparser/opentelemetry/stream/streamparser.go b/lib/protoparser/opentelemetry/stream/streamparser.go
new file mode 100644
index 0000000000..f5b79d5d85
--- /dev/null
+++ b/lib/protoparser/opentelemetry/stream/streamparser.go
@@ -0,0 +1,298 @@
+package stream
+
+import (
+	"fmt"
+	"io"
+	"strconv"
+	"sync"
+
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
+	"github.com/VictoriaMetrics/metrics"
+)
+
+// ParseStream parses OpenTelemetry protobuf or json data from r and calls callback for the parsed rows.
+//
+// callback shouldn't hold tss items after returning.
+func ParseStream(r io.Reader, isGzipped bool, callback func(tss []prompbmarshal.TimeSeries) error) error {
+	wcr := writeconcurrencylimiter.GetReader(r)
+	defer writeconcurrencylimiter.PutReader(wcr)
+	r = wcr
+
+	if isGzipped {
+		zr, err := common.GetGzipReader(r)
+		if err != nil {
+			return fmt.Errorf("cannot read gzip-compressed OpenTelemetry protocol data: %w", err)
+		}
+		defer common.PutGzipReader(zr)
+		r = zr
+	}
+
+	wr := getWriteContext()
+	defer putWriteContext(wr)
+	req, err := wr.readAndUnpackRequest(r)
+	if err != nil {
+		return fmt.Errorf("cannot unpack OpenTelemetry metrics: %w", err)
+	}
+	wr.parseRequestToTss(req)
+
+	if err := callback(wr.tss); err != nil {
+		return fmt.Errorf("error when processing OpenTelemetry samples: %w", err)
+	}
+
+	return nil
+}
+
+func (wr *writeContext) appendSamplesFromScopeMetrics(sc *pb.ScopeMetrics) {
+	for _, m := range sc.Metrics {
+		if len(m.Name) == 0 {
+			// skip metrics without names
+			continue
+		}
+		switch t := m.Data.(type) {
+		case *pb.Metric_Gauge:
+			for _, p := range t.Gauge.DataPoints {
+				wr.appendSampleFromNumericPoint(m.Name, p)
+			}
+		case *pb.Metric_Sum:
+			if t.Sum.AggregationTemporality != pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE {
+				rowsDroppedUnsupportedSum.Inc()
+				continue
+			}
+			for _, p := range t.Sum.DataPoints {
+				wr.appendSampleFromNumericPoint(m.Name, p)
+			}
+		case *pb.Metric_Summary:
+			for _, p := range t.Summary.DataPoints {
+				wr.appendSamplesFromSummary(m.Name, p)
+			}
+		case *pb.Metric_Histogram:
+			if t.Histogram.AggregationTemporality != pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE {
+				rowsDroppedUnsupportedHistogram.Inc()
+				continue
+			}
+			for _, p := range t.Histogram.DataPoints {
+				wr.appendSamplesFromHistogram(m.Name, p)
+			}
+		default:
+			rowsDroppedUnsupportedMetricType.Inc()
+			logger.Warnf("unsupported type %T for metric %q", t, m.Name)
+		}
+	}
+}
+
+// appendSampleFromNumericPoint appends p to wr.tss
+func (wr *writeContext) appendSampleFromNumericPoint(metricName string, p *pb.NumberDataPoint) {
+	var v float64
+	switch t := p.Value.(type) {
+	case *pb.NumberDataPoint_AsInt:
+		v = float64(t.AsInt)
+	case *pb.NumberDataPoint_AsDouble:
+		v = t.AsDouble
+	}
+
+	t := int64(p.TimeUnixNano / 1e6)
+	isStale := (p.Flags)&uint32(1) != 0
+	wr.pointLabels = appendAttributesToPromLabels(wr.pointLabels[:0], p.Attributes)
+
+	wr.appendSample(metricName, t, v, isStale)
+}
+
+// appendSamplesFromSummary appends summary p to wr.tss
+func (wr *writeContext) appendSamplesFromSummary(metricName string, p *pb.SummaryDataPoint) {
+	t := int64(p.TimeUnixNano / 1e6)
+	isStale := (p.Flags)&uint32(1) != 0
+	wr.pointLabels = appendAttributesToPromLabels(wr.pointLabels[:0], p.Attributes)
+
+	wr.appendSample(metricName+"_sum", t, p.Sum, isStale)
+	wr.appendSample(metricName+"_count", t, float64(p.Count), isStale)
+	for _, q := range p.QuantileValues {
+		qValue := strconv.FormatFloat(q.Quantile, 'f', -1, 64)
+		wr.appendSampleWithExtraLabel(metricName, "quantile", qValue, t, q.Value, isStale)
+	}
+}
+
+// appendSamplesFromHistogram appends histogram p to wr.tss
+func (wr *writeContext) appendSamplesFromHistogram(metricName string, p *pb.HistogramDataPoint) {
+	if len(p.BucketCounts) == 0 {
+		// nothing to append
+		return
+	}
+	if len(p.BucketCounts) != len(p.ExplicitBounds)+1 {
+		// fast path, broken data format
+		logger.Warnf("opentelemetry bad histogram format: %q, size of buckets: %d, size of bounds: %d", metricName, len(p.BucketCounts), len(p.ExplicitBounds))
+		return
+	}
+
+	t := int64(p.TimeUnixNano / 1e6)
+	isStale := (p.Flags)&uint32(1) != 0
+	wr.pointLabels = appendAttributesToPromLabels(wr.pointLabels[:0], p.Attributes)
+
+	wr.appendSample(metricName+"_sum", t, *p.Sum, isStale)
+	wr.appendSample(metricName+"_count", t, float64(p.Count), isStale)
+
+	var cumulative uint64
+	for index, bound := range p.ExplicitBounds {
+		cumulative += p.BucketCounts[index]
+		boundLabelValue := strconv.FormatFloat(bound, 'f', -1, 64)
+		wr.appendSampleWithExtraLabel(metricName+"_bucket", "le", boundLabelValue, t, float64(cumulative), isStale)
+	}
+	cumulative += p.BucketCounts[len(p.BucketCounts)-1]
+	wr.appendSampleWithExtraLabel(metricName+"_bucket", "le", "+Inf", t, float64(cumulative), isStale)
+}
+
+// appendSample appends sample with the given metricName to wr.tss
+func (wr *writeContext) appendSample(metricName string, t int64, v float64, isStale bool) {
+	wr.appendSampleWithExtraLabel(metricName, "", "", t, v, isStale)
+}
+
+// appendSampleWithExtraLabel appends sample with the given metricName and the given (labelName=labelValue) extra label to wr.tss
+func (wr *writeContext) appendSampleWithExtraLabel(metricName, labelName, labelValue string, t int64, v float64, isStale bool) {
+	if isStale {
+		v = decimal.StaleNaN
+	}
+	if t <= 0 {
+		// Set the current timestamp if t isn't set.
+		t = int64(fasttime.UnixTimestamp()) * 1000
+	}
+
+	labelsPool := wr.labelsPool
+	labelsLen := len(labelsPool)
+	labelsPool = append(labelsPool, prompbmarshal.Label{
+		Name:  "__name__",
+		Value: metricName,
+	})
+	labelsPool = append(labelsPool, wr.baseLabels...)
+	labelsPool = append(labelsPool, wr.pointLabels...)
+	if labelName != "" && labelValue != "" {
+		labelsPool = append(labelsPool, prompbmarshal.Label{
+			Name:  labelName,
+			Value: labelValue,
+		})
+	}
+
+	samplesPool := wr.samplesPool
+	samplesLen := len(samplesPool)
+	samplesPool = append(samplesPool, prompbmarshal.Sample{
+		Timestamp: t,
+		Value:     v,
+	})
+
+	wr.tss = append(wr.tss, prompbmarshal.TimeSeries{
+		Labels:  labelsPool[labelsLen:],
+		Samples: samplesPool[samplesLen:],
+	})
+
+	wr.labelsPool = labelsPool
+	wr.samplesPool = samplesPool
+
+	rowsRead.Inc()
+}
+
+// appendAttributesToPromLabels appends attributes to dst and returns the result.
+func appendAttributesToPromLabels(dst []prompbmarshal.Label, attributes []*pb.KeyValue) []prompbmarshal.Label {
+	for _, at := range attributes {
+		dst = append(dst, prompbmarshal.Label{
+			Name:  at.Key,
+			Value: at.Value.FormatString(),
+		})
+	}
+	return dst
+}
+
+type writeContext struct {
+	// bb holds the original data (json or protobuf), which must be parsed.
+	bb bytesutil.ByteBuffer
+
+	// tss holds parsed time series
+	tss []prompbmarshal.TimeSeries
+
+	// baseLabels are labels, which must be added to all the ingested samples
+	baseLabels []prompbmarshal.Label
+
+	// pointLabels are labels, which must be added to the ingested OpenTelemetry points
+	pointLabels []prompbmarshal.Label
+
+	// pools are used for reducing memory allocations when parsing time series
+	labelsPool  []prompbmarshal.Label
+	samplesPool []prompbmarshal.Sample
+}
+
+func (wr *writeContext) reset() {
+	wr.bb.Reset()
+
+	tss := wr.tss
+	for i := range tss {
+		ts := &tss[i]
+		ts.Labels = nil
+		ts.Samples = nil
+	}
+	wr.tss = tss[:0]
+
+	wr.baseLabels = resetLabels(wr.baseLabels)
+	wr.pointLabels = resetLabels(wr.pointLabels)
+
+	wr.labelsPool = resetLabels(wr.labelsPool)
+	wr.samplesPool = wr.samplesPool[:0]
+}
+
+func resetLabels(labels []prompbmarshal.Label) []prompbmarshal.Label {
+	for i := range labels {
+		label := &labels[i]
+		label.Name = ""
+		label.Value = ""
+	}
+	return labels[:0]
+}
+
+func (wr *writeContext) readAndUnpackRequest(r io.Reader) (*pb.ExportMetricsServiceRequest, error) {
+	if _, err := wr.bb.ReadFrom(r); err != nil {
+		return nil, fmt.Errorf("cannot read request: %w", err)
+	}
+	var req pb.ExportMetricsServiceRequest
+	if err := req.UnmarshalVT(wr.bb.B); err != nil {
+		return nil, fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(wr.bb.B), err)
+	}
+	return &req, nil
+}
+
+func (wr *writeContext) parseRequestToTss(req *pb.ExportMetricsServiceRequest) {
+	for _, rm := range req.ResourceMetrics {
+		if rm.Resource == nil {
+			// skip metrics without resource part.
+			continue
+		}
+		wr.baseLabels = appendAttributesToPromLabels(wr.baseLabels[:0], rm.Resource.Attributes)
+		for _, sc := range rm.ScopeMetrics {
+			wr.appendSamplesFromScopeMetrics(sc)
+		}
+	}
+}
+
+var wrPool sync.Pool
+
+func getWriteContext() *writeContext {
+	v := wrPool.Get()
+	if v == nil {
+		return &writeContext{}
+	}
+	return v.(*writeContext)
+}
+
+func putWriteContext(wr *writeContext) {
+	wr.reset()
+	wrPool.Put(wr)
+}
+
+var (
+	rowsRead                         = metrics.NewCounter(`vm_protoparser_rows_read_total{type="opentelemetry"}`)
+	rowsDroppedUnsupportedHistogram  = metrics.NewCounter(`vm_protoparser_rows_dropped_total{type="opentelemetry",reason="unsupported_histogram_aggregation"}`)
+	rowsDroppedUnsupportedSum        = metrics.NewCounter(`vm_protoparser_rows_dropped_total{type="opentelemetry",reason="unsupported_sum_aggregation"}`)
+	rowsDroppedUnsupportedMetricType = metrics.NewCounter(`vm_protoparser_rows_dropped_total{type="opentelemetry",reason="unsupported_metric_type"}`)
+)
diff --git a/lib/protoparser/opentelemetry/stream/streamparser_test.go b/lib/protoparser/opentelemetry/stream/streamparser_test.go
new file mode 100644
index 0000000000..6b4f4c9f88
--- /dev/null
+++ b/lib/protoparser/opentelemetry/stream/streamparser_test.go
@@ -0,0 +1,317 @@
+package stream
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"reflect"
+	"sort"
+	"testing"
+	"time"
+
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
+)
+
+func TestParseStream(t *testing.T) {
+	f := func(samples []*pb.Metric, tssExpected []prompbmarshal.TimeSeries) {
+		t.Helper()
+
+		checkSeries := func(tss []prompbmarshal.TimeSeries) error {
+			if len(tss) != len(tssExpected) {
+				return fmt.Errorf("not expected tss count, got: %d, want: %d", len(tss), len(tssExpected))
+			}
+			sortByMetricName(tss)
+			sortByMetricName(tssExpected)
+			for i := 0; i < len(tss); i++ {
+				ts := tss[i]
+				tsExpected := tssExpected[i]
+				if len(ts.Labels) != len(tsExpected.Labels) {
+					return fmt.Errorf("idx: %d, not expected labels count, got: %d, want: %d", i, len(ts.Labels), len(tsExpected.Labels))
+				}
+				sortLabels(ts.Labels)
+				sortLabels(tsExpected.Labels)
+				for j, label := range ts.Labels {
+					labelExpected := tsExpected.Labels[j]
+					if !reflect.DeepEqual(label, labelExpected) {
+						return fmt.Errorf("idx: %d, label idx: %d, not equal label pairs, \ngot: \n%s, \nwant: \n%s",
+							i, j, prettifyLabel(label), prettifyLabel(labelExpected))
+					}
+				}
+				if len(ts.Samples) != len(tsExpected.Samples) {
+					return fmt.Errorf("idx: %d, not expected samples count, got: %d, want: %d", i, len(ts.Samples), len(tsExpected.Samples))
+				}
+				for j, sample := range ts.Samples {
+					sampleExpected := tsExpected.Samples[j]
+					if !reflect.DeepEqual(sample, sampleExpected) {
+						return fmt.Errorf("idx: %d, label idx: %d, not equal sample pairs, \ngot: \n%s,\nwant: \n%s",
+							i, j, prettifySample(sample), prettifySample(sampleExpected))
+					}
+				}
+			}
+			return nil
+		}
+
+		req := &pb.ExportMetricsServiceRequest{
+			ResourceMetrics: []*pb.ResourceMetrics{
+				generateOTLPSamples(samples),
+			},
+		}
+
+		// Verify protobuf parsing
+		pbData, err := req.MarshalVT()
+		if err != nil {
+			t.Fatalf("cannot marshal to protobuf: %s", err)
+		}
+		if err := checkParseStream(pbData, checkSeries); err != nil {
+			t.Fatalf("cannot parse protobuf: %s", err)
+		}
+	}
+
+	jobLabelValue := prompbmarshal.Label{
+		Name:  "job",
+		Value: "vm",
+	}
+	leLabel := func(value string) prompbmarshal.Label {
+		return prompbmarshal.Label{
+			Name:  "le",
+			Value: value,
+		}
+	}
+	kvLabel := func(k, v string) prompbmarshal.Label {
+		return prompbmarshal.Label{
+			Name:  k,
+			Value: v,
+		}
+	}
+
+	// Test all metric types
+	f(
+		[]*pb.Metric{
+			generateGauge("my-gauge"),
+			generateHistogram("my-histogram"),
+			generateSum("my-sum"),
+			generateSummary("my-summary"),
+		},
+		[]prompbmarshal.TimeSeries{
+			newPromPBTs("my-gauge", 15000, 15.0, jobLabelValue, kvLabel("label1", "value1")),
+			newPromPBTs("my-histogram_count", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2")),
+			newPromPBTs("my-histogram_sum", 30000, 30.0, jobLabelValue, kvLabel("label2", "value2")),
+			newPromPBTs("my-histogram_bucket", 30000, 0.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("0.1")),
+			newPromPBTs("my-histogram_bucket", 30000, 5.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("0.5")),
+			newPromPBTs("my-histogram_bucket", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("1")),
+			newPromPBTs("my-histogram_bucket", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("5")),
+			newPromPBTs("my-histogram_bucket", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("+Inf")),
+			newPromPBTs("my-sum", 150000, 15.5, jobLabelValue, kvLabel("label5", "value5")),
+			newPromPBTs("my-summary_sum", 35000, 32.5, jobLabelValue, kvLabel("label6", "value6")),
+			newPromPBTs("my-summary_count", 35000, 5.0, jobLabelValue, kvLabel("label6", "value6")),
+			newPromPBTs("my-summary", 35000, 7.5, jobLabelValue, kvLabel("label6", "value6"), kvLabel("quantile", "0.1")),
+			newPromPBTs("my-summary", 35000, 10.0, jobLabelValue, kvLabel("label6", "value6"), kvLabel("quantile", "0.5")),
+			newPromPBTs("my-summary", 35000, 15.0, jobLabelValue, kvLabel("label6", "value6"), kvLabel("quantile", "1")),
+		})
+
+	// Test gauge
+	f(
+		[]*pb.Metric{
+			generateGauge("my-gauge"),
+		},
+		[]prompbmarshal.TimeSeries{
+			newPromPBTs("my-gauge", 15000, 15.0, jobLabelValue, kvLabel("label1", "value1")),
+		},
+	)
+}
+
+func checkParseStream(data []byte, checkSeries func(tss []prompbmarshal.TimeSeries) error) error {
+	// Verify parsing without compression
+	if err := ParseStream(bytes.NewBuffer(data), false, checkSeries); err != nil {
+		return fmt.Errorf("error when parsing data: %w", err)
+	}
+
+	// Verify parsing with compression
+	var bb bytes.Buffer
+	zw := gzip.NewWriter(&bb)
+	if _, err := zw.Write(data); err != nil {
+		return fmt.Errorf("cannot compress data: %s", err)
+	}
+	if err := zw.Close(); err != nil {
+		return fmt.Errorf("cannot close gzip writer: %s", err)
+	}
+	if err := ParseStream(&bb, true, checkSeries); err != nil {
+		return fmt.Errorf("error when parsing compressed data: %w", err)
+	}
+
+	return nil
+}
+
+func attributesFromKV(k, v string) []*pb.KeyValue {
+	return []*pb.KeyValue{
+		{
+			Key: k,
+			Value: &pb.AnyValue{
+				Value: &pb.AnyValue_StringValue{
+					StringValue: v,
+				},
+			},
+		},
+	}
+}
+
+func generateGauge(name string) *pb.Metric {
+	points := []*pb.NumberDataPoint{
+		{
+			Attributes:   attributesFromKV("label1", "value1"),
+			Value:        &pb.NumberDataPoint_AsInt{AsInt: 15},
+			TimeUnixNano: uint64(15 * time.Second),
+		},
+	}
+	return &pb.Metric{
+		Name: name,
+		Data: &pb.Metric_Gauge{
+			Gauge: &pb.Gauge{
+				DataPoints: points,
+			},
+		},
+	}
+}
+
+func generateHistogram(name string) *pb.Metric {
+	points := []*pb.HistogramDataPoint{
+		{
+
+			Attributes:     attributesFromKV("label2", "value2"),
+			Count:          15,
+			Sum:            func() *float64 { v := 30.0; return &v }(),
+			ExplicitBounds: []float64{0.1, 0.5, 1.0, 5.0},
+			BucketCounts:   []uint64{0, 5, 10, 0, 0},
+			TimeUnixNano:   uint64(30 * time.Second),
+		},
+	}
+	return &pb.Metric{
+		Name: name,
+		Data: &pb.Metric_Histogram{
+			Histogram: &pb.Histogram{
+				AggregationTemporality: pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
+				DataPoints:             points,
+			},
+		},
+	}
+}
+
+func generateSum(name string) *pb.Metric {
+	points := []*pb.NumberDataPoint{
+		{
+			Attributes:   attributesFromKV("label5", "value5"),
+			Value:        &pb.NumberDataPoint_AsDouble{AsDouble: 15.5},
+			TimeUnixNano: uint64(150 * time.Second),
+		},
+	}
+	return &pb.Metric{
+		Name: name,
+		Data: &pb.Metric_Sum{
+			Sum: &pb.Sum{
+				AggregationTemporality: pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE,
+				DataPoints:             points,
+			},
+		},
+	}
+}
+
+func generateSummary(name string) *pb.Metric {
+	points := []*pb.SummaryDataPoint{
+		{
+			Attributes:   attributesFromKV("label6", "value6"),
+			TimeUnixNano: uint64(35 * time.Second),
+			Sum:          32.5,
+			Count:        5,
+			QuantileValues: []*pb.SummaryDataPoint_ValueAtQuantile{
+				{
+					Quantile: 0.1,
+					Value:    7.5,
+				},
+				{
+					Quantile: 0.5,
+					Value:    10.0,
+				},
+				{
+					Quantile: 1.0,
+					Value:    15.0,
+				},
+			},
+		},
+	}
+	return &pb.Metric{
+		Name: name,
+		Data: &pb.Metric_Summary{
+			Summary: &pb.Summary{
+				DataPoints: points,
+			},
+		},
+	}
+}
+
+func generateOTLPSamples(srcs []*pb.Metric) *pb.ResourceMetrics {
+	otlpMetrics := &pb.ResourceMetrics{
+		Resource: &pb.Resource{
+			Attributes: attributesFromKV("job", "vm"),
+		},
+	}
+	otlpMetrics.ScopeMetrics = []*pb.ScopeMetrics{
+		{
+			Metrics: append([]*pb.Metric{}, srcs...),
+		},
+	}
+	return otlpMetrics
+}
+
+func newPromPBTs(metricName string, t int64, v float64, extraLabels ...prompbmarshal.Label) prompbmarshal.TimeSeries {
+	if t <= 0 {
+		// Set the current timestamp if t isn't set.
+		t = int64(fasttime.UnixTimestamp()) * 1000
+	}
+	ts := prompbmarshal.TimeSeries{
+		Labels: []prompbmarshal.Label{
+			{
+				Name:  "__name__",
+				Value: metricName,
+			},
+		},
+		Samples: []prompbmarshal.Sample{
+			{
+				Value:     v,
+				Timestamp: t,
+			},
+		},
+	}
+	ts.Labels = append(ts.Labels, extraLabels...)
+	return ts
+}
+
+func prettifyLabel(label prompbmarshal.Label) string {
+	return fmt.Sprintf("name=%q value=%q", label.Name, label.Value)
+}
+
+func prettifySample(sample prompbmarshal.Sample) string {
+	return fmt.Sprintf("sample=%f timestamp: %d", sample.Value, sample.Timestamp)
+}
+
+func sortByMetricName(tss []prompbmarshal.TimeSeries) {
+	sort.Slice(tss, func(i, j int) bool {
+		return getMetricName(tss[i].Labels) < getMetricName(tss[j].Labels)
+	})
+}
+
+func getMetricName(labels []prompbmarshal.Label) string {
+	for _, l := range labels {
+		if l.Name == "__name__" {
+			return l.Value
+		}
+	}
+	return ""
+}
+
+func sortLabels(labels []prompbmarshal.Label) {
+	sort.Slice(labels, func(i, j int) bool {
+		return labels[i].Name < labels[j].Name
+	})
+}
diff --git a/lib/protoparser/opentelemetry/stream/streamparser_timing_test.go b/lib/protoparser/opentelemetry/stream/streamparser_timing_test.go
new file mode 100644
index 0000000000..368c0309d3
--- /dev/null
+++ b/lib/protoparser/opentelemetry/stream/streamparser_timing_test.go
@@ -0,0 +1,39 @@
+package stream
+
+import (
+	"bytes"
+	"testing"
+
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb"
+)
+
+func BenchmarkParseStream(b *testing.B) {
+	samples := []*pb.Metric{
+		generateGauge("my-gauge"),
+		generateHistogram("my-histogram"),
+		generateSum("my-sum"),
+		generateSummary("my-summary"),
+	}
+	b.SetBytes(1)
+	b.ReportAllocs()
+	b.RunParallel(func(p *testing.PB) {
+		pbRequest := pb.ExportMetricsServiceRequest{
+			ResourceMetrics: []*pb.ResourceMetrics{generateOTLPSamples(samples)},
+		}
+		data, err := pbRequest.MarshalVT()
+		if err != nil {
+			b.Fatalf("cannot marshal data: %s", err)
+		}
+
+		for p.Next() {
+			err := ParseStream(bytes.NewBuffer(data), false, func(tss []prompbmarshal.TimeSeries) error {
+				return nil
+			})
+			if err != nil {
+				b.Fatalf("cannot parse stream: %s", err)
+			}
+		}
+	})
+
+}