diff --git a/app/vmagent/README.md b/app/vmagent/README.md
index e4d32a6934..dd4cbcda36 100644
--- a/app/vmagent/README.md
+++ b/app/vmagent/README.md
@@ -140,7 +140,25 @@ While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx
 
 ## Multitenancy
 
-By default `vmagent` collects the data without tenant identifiers and routes it to the configured `-remoteWrite.url`. But it can accept multitenant data if `-remoteWrite.multitenantURL` is set. In this case it accepts multitenant data at `http://vmagent:8429/insert/<accountID>/...` in the same way as cluster version of VictoriaMetrics does according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and routes it to `<-remoteWrite.multitenantURL>/insert/<accountID>/prometheus/api/v1/write`. If multiple `-remoteWrite.multitenantURL` command-line options are set, then `vmagent` replicates the collected data across all the configured urls. This allows using a single `vmagent` instance in front of VictoriaMetrics clusters for processing the data from all the tenants.
+By default `vmagent` collects the data without tenant identifiers and routes it to the configured `-remoteWrite.url`.
+
+[Multitenancy](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) support is enabled when `-remoteWrite.multitenantURL` command-line flag is set. In this case `vmagent` accepts multitenant data at `http://vmagent:8429/insert/<accountID>/...` in the same way as cluster version of VictoriaMetrics does according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and routes it to `<-remoteWrite.multitenantURL>/insert/<accountID>/prometheus/api/v1/write`. If multiple `-remoteWrite.multitenantURL` command-line options are set, then `vmagent` replicates the collected data across all the configured urls. This allows using a single `vmagent` instance in front of VictoriaMetrics clusters for processing the data from all the tenants.
+
+If `-remoteWrite.multitenantURL` command-line flag is set and `vmagent` is configured to scrape Prometheus-compatible targets (e.g. if `-promscrape.config` command-line flag is set)
+then `vmagent` reads tenantID from `__tenant_id__` label for the discovered targets and routes all the metrics from this target to the given `__tenant_id__`, e.g. to the url `<-remoteWrite.multitnenatURL>/insert/<__tenant_id__>/prometheus/api/v1/write`.
+
+For example, the following relabeling rule instructs sending metrics to tenantID defined in the `prometheus.io/tenant` annotation of Kubernetes pod deployment:
+
+```yaml
+scrape_configs:
+- kubernetes_sd_configs:
+  - role: pod
+  relabel_configs:
+  - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_tenant]
+    target_label: __tenant_id__
+```
+
+If the target has no associated `__tenant_id__` label, then its' metrics are routed to zero tenantID, e.g. to `<-remoteWrite.multitenantURL>/insert/0/prometheus/api/v1/write`.
 
 ## How to collect metrics in Prometheus format
 
diff --git a/app/vmagent/csvimport/request_handler.go b/app/vmagent/csvimport/request_handler.go
index 6aa1ad2d1f..72dadc888d 100644
--- a/app/vmagent/csvimport/request_handler.go
+++ b/app/vmagent/csvimport/request_handler.go
@@ -67,7 +67,7 @@ func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.L
 	ctx.WriteRequest.Timeseries = tssDst
 	ctx.Labels = labels
 	ctx.Samples = samples
-	remotewrite.PushWithAuthToken(at, &ctx.WriteRequest)
+	remotewrite.Push(at, &ctx.WriteRequest)
 	rowsInserted.Add(len(rows))
 	if at != nil {
 		rowsTenantInserted.Get(at).Add(len(rows))
diff --git a/app/vmagent/datadog/request_handler.go b/app/vmagent/datadog/request_handler.go
index 269a46bfcf..f094f2c26c 100644
--- a/app/vmagent/datadog/request_handler.go
+++ b/app/vmagent/datadog/request_handler.go
@@ -82,7 +82,7 @@ func insertRows(at *auth.Token, series []parser.Series, extraLabels []prompbmars
 	ctx.WriteRequest.Timeseries = tssDst
 	ctx.Labels = labels
 	ctx.Samples = samples
-	remotewrite.PushWithAuthToken(at, &ctx.WriteRequest)
+	remotewrite.Push(at, &ctx.WriteRequest)
 	rowsInserted.Add(rowsTotal)
 	if at != nil {
 		rowsTenantInserted.Get(at).Add(rowsTotal)
diff --git a/app/vmagent/graphite/request_handler.go b/app/vmagent/graphite/request_handler.go
index fd0d96cd6a..c3ef22d8df 100644
--- a/app/vmagent/graphite/request_handler.go
+++ b/app/vmagent/graphite/request_handler.go
@@ -58,7 +58,7 @@ func insertRows(rows []parser.Row) error {
 	ctx.WriteRequest.Timeseries = tssDst
 	ctx.Labels = labels
 	ctx.Samples = samples
-	remotewrite.Push(&ctx.WriteRequest)
+	remotewrite.Push(nil, &ctx.WriteRequest)
 	rowsInserted.Add(len(rows))
 	rowsPerInsert.Update(float64(len(rows)))
 	return nil
diff --git a/app/vmagent/influx/request_handler.go b/app/vmagent/influx/request_handler.go
index ef409ef9c4..19177203d8 100644
--- a/app/vmagent/influx/request_handler.go
+++ b/app/vmagent/influx/request_handler.go
@@ -134,7 +134,7 @@ func insertRows(at *auth.Token, db string, rows []parser.Row, extraLabels []prom
 	ctx.ctx.Labels = labels
 	ctx.ctx.Samples = samples
 	ctx.commonLabels = commonLabels
-	remotewrite.PushWithAuthToken(at, &ctx.ctx.WriteRequest)
+	remotewrite.Push(at, &ctx.ctx.WriteRequest)
 	rowsInserted.Add(rowsTotal)
 	if at != nil {
 		rowsTenantInserted.Get(at).Add(rowsTotal)
diff --git a/app/vmagent/native/request_handler.go b/app/vmagent/native/request_handler.go
index 486e7bf32d..f0869e0425 100644
--- a/app/vmagent/native/request_handler.go
+++ b/app/vmagent/native/request_handler.go
@@ -87,6 +87,6 @@ func insertRows(at *auth.Token, block *parser.Block, extraLabels []prompbmarshal
 	ctx.WriteRequest.Timeseries = tssDst
 	ctx.Labels = labels
 	ctx.Samples = samples
-	remotewrite.PushWithAuthToken(at, &ctx.WriteRequest)
+	remotewrite.Push(at, &ctx.WriteRequest)
 	return nil
 }
diff --git a/app/vmagent/opentsdb/request_handler.go b/app/vmagent/opentsdb/request_handler.go
index 628676de94..2721912d63 100644
--- a/app/vmagent/opentsdb/request_handler.go
+++ b/app/vmagent/opentsdb/request_handler.go
@@ -58,7 +58,7 @@ func insertRows(rows []parser.Row) error {
 	ctx.WriteRequest.Timeseries = tssDst
 	ctx.Labels = labels
 	ctx.Samples = samples
-	remotewrite.Push(&ctx.WriteRequest)
+	remotewrite.Push(nil, &ctx.WriteRequest)
 	rowsInserted.Add(len(rows))
 	rowsPerInsert.Update(float64(len(rows)))
 	return nil
diff --git a/app/vmagent/opentsdbhttp/request_handler.go b/app/vmagent/opentsdbhttp/request_handler.go
index b3026ab861..7d2d409eb2 100644
--- a/app/vmagent/opentsdbhttp/request_handler.go
+++ b/app/vmagent/opentsdbhttp/request_handler.go
@@ -65,7 +65,7 @@ func insertRows(rows []parser.Row, extraLabels []prompbmarshal.Label) error {
 	ctx.WriteRequest.Timeseries = tssDst
 	ctx.Labels = labels
 	ctx.Samples = samples
-	remotewrite.Push(&ctx.WriteRequest)
+	remotewrite.Push(nil, &ctx.WriteRequest)
 	rowsInserted.Add(len(rows))
 	rowsPerInsert.Update(float64(len(rows)))
 	return nil
diff --git a/app/vmagent/prometheusimport/request_handler.go b/app/vmagent/prometheusimport/request_handler.go
index 350bd8be68..e5b86a7992 100644
--- a/app/vmagent/prometheusimport/request_handler.go
+++ b/app/vmagent/prometheusimport/request_handler.go
@@ -82,7 +82,7 @@ func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.L
 	ctx.WriteRequest.Timeseries = tssDst
 	ctx.Labels = labels
 	ctx.Samples = samples
-	remotewrite.PushWithAuthToken(at, &ctx.WriteRequest)
+	remotewrite.Push(at, &ctx.WriteRequest)
 	rowsInserted.Add(len(rows))
 	if at != nil {
 		rowsTenantInserted.Get(at).Add(len(rows))
diff --git a/app/vmagent/promremotewrite/request_handler.go b/app/vmagent/promremotewrite/request_handler.go
index 29af224ed8..f2ed1b6f6b 100644
--- a/app/vmagent/promremotewrite/request_handler.go
+++ b/app/vmagent/promremotewrite/request_handler.go
@@ -81,7 +81,7 @@ func insertRows(at *auth.Token, timeseries []prompb.TimeSeries, extraLabels []pr
 	ctx.WriteRequest.Timeseries = tssDst
 	ctx.Labels = labels
 	ctx.Samples = samples
-	remotewrite.PushWithAuthToken(at, &ctx.WriteRequest)
+	remotewrite.Push(at, &ctx.WriteRequest)
 	rowsInserted.Add(rowsTotal)
 	if at != nil {
 		rowsTenantInserted.Get(at).Add(rowsTotal)
diff --git a/app/vmagent/remotewrite/remotewrite.go b/app/vmagent/remotewrite/remotewrite.go
index ff77572df4..c863765aa1 100644
--- a/app/vmagent/remotewrite/remotewrite.go
+++ b/app/vmagent/remotewrite/remotewrite.go
@@ -234,15 +234,11 @@ func Stop() {
 
 // Push sends wr to remote storage systems set via `-remoteWrite.url`.
 //
-// Note that wr may be modified by Push due to relabeling and rounding.
-func Push(wr *prompbmarshal.WriteRequest) {
-	PushWithAuthToken(nil, wr)
-}
-
-// PushWithAuthToken sends wr to remote storage systems set via `-remoteWrite.multitenantURL`.
+// If at is nil, then the data is pushed to the configured `-remoteWrite.url`.
+// If at isn't nil, the the data is pushed to the configured `-remoteWrite.multitenantURL`.
 //
 // Note that wr may be modified by Push due to relabeling and rounding.
-func PushWithAuthToken(at *auth.Token, wr *prompbmarshal.WriteRequest) {
+func Push(at *auth.Token, wr *prompbmarshal.WriteRequest) {
 	if at == nil && len(*remoteWriteMultitenantURLs) > 0 {
 		// Write data to default tenant if at isn't set while -remoteWrite.multitenantURL is set.
 		at = defaultAuthToken
@@ -252,7 +248,7 @@ func PushWithAuthToken(at *auth.Token, wr *prompbmarshal.WriteRequest) {
 		rwctxs = rwctxsDefault
 	} else {
 		if len(*remoteWriteMultitenantURLs) == 0 {
-			logger.Panicf("BUG: remoteWriteMultitenantURLs must be non-empty for non-nil at")
+			logger.Panicf("BUG: -remoteWrite.multitenantURL command-line flag must be set when __tenant_id__=%q label is set", at)
 		}
 		rwctxsMapLock.Lock()
 		tenantID := tenantmetrics.TenantID{
diff --git a/app/vmagent/vmimport/request_handler.go b/app/vmagent/vmimport/request_handler.go
index c2f4ed71b0..0afe7389b6 100644
--- a/app/vmagent/vmimport/request_handler.go
+++ b/app/vmagent/vmimport/request_handler.go
@@ -88,7 +88,7 @@ func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.L
 	ctx.WriteRequest.Timeseries = tssDst
 	ctx.Labels = labels
 	ctx.Samples = samples
-	remotewrite.PushWithAuthToken(at, &ctx.WriteRequest)
+	remotewrite.Push(at, &ctx.WriteRequest)
 	rowsInserted.Add(rowsTotal)
 	if at != nil {
 		rowsTenantInserted.Get(at).Add(rowsTotal)
diff --git a/app/vminsert/main.go b/app/vminsert/main.go
index 90836da577..05c9fd85ea 100644
--- a/app/vminsert/main.go
+++ b/app/vminsert/main.go
@@ -21,6 +21,7 @@ import (
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/promremotewrite"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/vmimport"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/influxutils"
@@ -29,6 +30,7 @@ import (
 	opentsdbserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdb"
 	opentsdbhttpserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdbhttp"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
@@ -80,7 +82,9 @@ func Init() {
 	if len(*opentsdbHTTPListenAddr) > 0 {
 		opentsdbhttpServer = opentsdbhttpserver.MustStart(*opentsdbHTTPListenAddr, opentsdbhttp.InsertHandler)
 	}
-	promscrape.Init(prompush.Push)
+	promscrape.Init(func(at *auth.Token, wr *prompbmarshal.WriteRequest) {
+		prompush.Push(wr)
+	})
 }
 
 // Stop stops vminsert.
diff --git a/app/vminsert/prompush/push.go b/app/vminsert/prompush/push.go
index 40d495971c..a51e77dc09 100644
--- a/app/vminsert/prompush/push.go
+++ b/app/vminsert/prompush/push.go
@@ -14,7 +14,7 @@ var (
 
 const maxRowsPerBlock = 10000
 
-// Push pushes wr to storage.
+// Push pushes wr for the given at to storage.
 func Push(wr *prompbmarshal.WriteRequest) {
 	ctx := common.GetInsertCtx()
 	defer common.PutInsertCtx(ctx)
diff --git a/dashboards/victoriametrics.json b/dashboards/victoriametrics.json
index 911e686667..5efea695e9 100644
--- a/dashboards/victoriametrics.json
+++ b/dashboards/victoriametrics.json
@@ -1,12 +1,12 @@
 {
   "__inputs": [],
-  "__elements": [],
+  "__elements": {},
   "__requires": [
     {
       "type": "grafana",
       "id": "grafana",
       "name": "Grafana",
-      "version": "8.3.5"
+      "version": "9.0.3"
     },
     {
       "type": "panel",
@@ -37,7 +37,10 @@
     "list": [
       {
         "builtIn": 1,
-        "datasource": "-- Grafana --",
+        "datasource": {
+          "type": "datasource",
+          "uid": "grafana"
+        },
         "enable": true,
         "hide": true,
         "iconColor": "rgba(0, 211, 255, 1)",
@@ -58,7 +61,7 @@
   "gnetId": 10229,
   "graphTooltip": 0,
   "id": null,
-  "iteration": 1650637176348,
+  "iteration": 1659966607833,
   "links": [
     {
       "icon": "doc",
@@ -101,6 +104,14 @@
       },
       "id": 6,
       "panels": [],
+      "targets": [
+        {
+          "datasource": {
+            "uid": "$ds"
+          },
+          "refId": "A"
+        }
+      ],
       "title": "Stats",
       "type": "row"
     },
@@ -120,7 +131,15 @@
         "content": "<div style=\"text-align: center;\">$version</div>",
         "mode": "markdown"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
+      "targets": [
+        {
+          "datasource": {
+            "uid": "$ds"
+          },
+          "refId": "A"
+        }
+      ],
       "title": "Version",
       "type": "text"
     },
@@ -172,9 +191,12 @@
         "text": {},
         "textMode": "auto"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "exemplar": true,
           "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"})",
           "format": "time_series",
@@ -237,7 +259,7 @@
         "text": {},
         "textMode": "auto"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "targets": [
         {
           "datasource": {
@@ -305,9 +327,12 @@
         "text": {},
         "textMode": "auto"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "exemplar": true,
           "expr": "sum(vm_data_size_bytes{job=~\"$job\", type!=\"indexdb\"}) / sum(vm_rows{job=~\"$job\", type!=\"indexdb\"})",
           "format": "time_series",
@@ -369,9 +394,12 @@
         "text": {},
         "textMode": "auto"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "exemplar": true,
           "expr": "sum(vm_allowed_memory_bytes{job=~\"$job\", instance=~\"$instance\"})",
           "format": "time_series",
@@ -434,9 +462,12 @@
         "text": {},
         "textMode": "auto"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "exemplar": true,
           "expr": "vm_app_uptime_seconds{job=~\"$job\", instance=~\"$instance\"}",
           "instant": true,
@@ -496,9 +527,12 @@
         "text": {},
         "textMode": "auto"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "exemplar": true,
           "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type=\"indexdb\"})",
           "format": "time_series",
@@ -560,9 +594,12 @@
         "text": {},
         "textMode": "auto"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "exemplar": true,
           "expr": "min(vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"})",
           "format": "time_series",
@@ -628,9 +665,12 @@
         "text": {},
         "textMode": "auto"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "exemplar": true,
           "expr": "sum(vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"})",
           "format": "time_series",
@@ -692,9 +732,12 @@
         "text": {},
         "textMode": "auto"
       },
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "exemplar": true,
           "expr": "sum(vm_available_memory_bytes{job=~\"$job\", instance=~\"$instance\"})",
           "format": "time_series",
@@ -721,6 +764,14 @@
       },
       "id": 24,
       "panels": [],
+      "targets": [
+        {
+          "datasource": {
+            "uid": "$ds"
+          },
+          "refId": "A"
+        }
+      ],
       "title": "Performance",
       "type": "row"
     },
@@ -769,7 +820,7 @@
         "alertThreshold": true
       },
       "percentage": false,
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "pointradius": 2,
       "points": false,
       "renderer": "flot",
@@ -779,6 +830,9 @@
       "steppedLine": false,
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "expr": "sum(rate(vm_http_requests_total{job=~\"$job\", instance=~\"$instance\", path!~\"/favicon.ico\"}[$__interval])) by (path) > 0",
           "format": "time_series",
           "interval": "",
@@ -864,7 +918,7 @@
         "alertThreshold": true
       },
       "percentage": false,
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "pointradius": 2,
       "points": false,
       "renderer": "flot",
@@ -874,6 +928,9 @@
       "steppedLine": false,
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "expr": "max(vm_request_duration_seconds{job=~\"$job\", instance=~\"$instance\", quantile=~\"(0.5|0.99)\"}) by (path, quantile) > 0",
           "format": "time_series",
           "intervalFactor": 1,
@@ -964,7 +1021,7 @@
         "alertThreshold": true
       },
       "percentage": false,
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "pointradius": 2,
       "points": false,
       "renderer": "flot",
@@ -974,6 +1031,9 @@
       "steppedLine": false,
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "expr": "vm_cache_entries{job=~\"$job\", instance=~\"$instance\", type=\"storage/hour_metric_ids\"}",
           "format": "time_series",
           "intervalFactor": 1,
@@ -1058,7 +1118,7 @@
         "alertThreshold": true
       },
       "percentage": false,
-      "pluginVersion": "8.3.5",
+      "pluginVersion": "9.0.3",
       "pointradius": 2,
       "points": false,
       "renderer": "flot",
@@ -1068,6 +1128,9 @@
       "steppedLine": false,
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "exemplar": false,
           "expr": "sum(rate(vm_http_request_errors_total{job=~\"$job\", instance=~\"$instance\"}[$__interval])) by (path) > 0",
           "format": "time_series",
@@ -1171,6 +1234,9 @@
       "steppedLine": false,
       "targets": [
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "expr": "sum(vm_concurrent_addrows_capacity{job=~\"$job\", instance=~\"$instance\"})",
           "format": "time_series",
           "interval": "",
@@ -1179,6 +1245,9 @@
           "refId": "A"
         },
         {
+          "datasource": {
+            "uid": "$ds"
+          },
           "expr": "sum(vm_concurrent_addrows_current{job=~\"$job\", instance=~\"$instance\"})",
           "format": "time_series",
           "intervalFactor": 1,
@@ -1224,6 +1293,10 @@
     },
     {
       "collapsed": true,
+      "datasource": {
+        "type": "prometheus",
+        "uid": "P4169E866C3094E38"
+      },
       "gridPos": {
         "h": 1,
         "w": 24,
@@ -1248,7 +1321,7 @@
             "h": 8,
             "w": 24,
             "x": 0,
-            "y": 31
+            "y": 7
           },
           "hiddenSeries": false,
           "id": 94,
@@ -1272,7 +1345,7 @@
             "alertThreshold": true
           },
           "percentage": false,
-          "pluginVersion": "8.3.5",
+          "pluginVersion": "9.0.3",
           "pointradius": 2,
           "points": false,
           "renderer": "flot",
@@ -1345,6 +1418,95 @@
             "align": false
           }
         },
+        {
+          "aliasColors": {},
+          "bars": false,
+          "dashLength": 10,
+          "dashes": false,
+          "datasource": {
+            "type": "prometheus",
+            "uid": "$ds"
+          },
+          "description": "Shows the percentage of used cache size from the allowed size by type. \nValues close to 100% show the maximum potential utilization.\nValues close to 0% show that cache is underutilized.",
+          "fill": 0,
+          "fillGradient": 0,
+          "gridPos": {
+            "h": 7,
+            "w": 24,
+            "x": 0,
+            "y": 15
+          },
+          "hiddenSeries": false,
+          "id": 97,
+          "legend": {
+            "avg": false,
+            "current": false,
+            "max": false,
+            "min": false,
+            "show": false,
+            "total": false,
+            "values": false
+          },
+          "lines": true,
+          "linewidth": 1,
+          "nullPointMode": "null",
+          "options": {
+            "alertThreshold": true
+          },
+          "percentage": false,
+          "pluginVersion": "9.0.3",
+          "pointradius": 2,
+          "points": false,
+          "renderer": "flot",
+          "seriesOverrides": [],
+          "spaceLength": 10,
+          "stack": false,
+          "steppedLine": false,
+          "targets": [
+            {
+              "datasource": {
+                "type": "prometheus",
+                "uid": "$ds"
+              },
+              "exemplar": true,
+              "expr": "vm_cache_size_bytes{job=~\"$job\", instance=~\"$instance\"} / vm_cache_size_max_bytes{job=~\"$job\", instance=~\"$instance\"}",
+              "interval": "",
+              "legendFormat": "{{type}}",
+              "refId": "A"
+            }
+          ],
+          "thresholds": [],
+          "timeRegions": [],
+          "title": "Cache usage % ($instance)",
+          "tooltip": {
+            "shared": true,
+            "sort": 2,
+            "value_type": "individual"
+          },
+          "type": "graph",
+          "xaxis": {
+            "mode": "time",
+            "show": true,
+            "values": []
+          },
+          "yaxes": [
+            {
+              "$$hashKey": "object:235",
+              "format": "percentunit",
+              "logBase": 1,
+              "show": true
+            },
+            {
+              "$$hashKey": "object:236",
+              "format": "short",
+              "logBase": 1,
+              "show": true
+            }
+          ],
+          "yaxis": {
+            "align": false
+          }
+        },
         {
           "aliasColors": {},
           "bars": false,
@@ -1355,13 +1517,13 @@
             "uid": "$ds"
           },
           "description": "Cache hit ratio shows cache efficiency. The higher is hit rate the better.",
-          "fill": 1,
+          "fill": 0,
           "fillGradient": 0,
           "gridPos": {
             "h": 8,
             "w": 24,
             "x": 0,
-            "y": 39
+            "y": 22
           },
           "hiddenSeries": false,
           "id": 95,
@@ -1385,7 +1547,7 @@
             "alertThreshold": true
           },
           "percentage": false,
-          "pluginVersion": "8.3.5",
+          "pluginVersion": "9.0.3",
           "pointradius": 2,
           "points": false,
           "renderer": "flot",
@@ -1447,6 +1609,15 @@
           }
         }
       ],
+      "targets": [
+        {
+          "datasource": {
+            "type": "prometheus",
+            "uid": "P4169E866C3094E38"
+          },
+          "refId": "A"
+        }
+      ],
       "title": "Caches",
       "type": "row"
     },
@@ -1519,6 +1690,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(vm_rows_inserted_total{job=~\"$job\", instance=~\"$instance\"}[$__interval])) by (type) > 0",
               "format": "time_series",
               "hide": false,
@@ -1615,6 +1789,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"} / ignoring(path) ((rate(vm_rows_added_to_storage_total{job=~\"$job\", instance=~\"$instance\"}[1d]) - ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job\", instance=~\"$instance\", type=\"merge\"}[1d])) * scalar(sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"})))",
               "format": "time_series",
               "hide": false,
@@ -1716,6 +1893,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type != \"indexdb\"})",
               "format": "time_series",
               "interval": "",
@@ -1724,6 +1904,9 @@
               "refId": "A"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type != \"indexdb\"})",
               "format": "time_series",
               "interval": "",
@@ -1825,6 +2008,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "vm_pending_rows{job=~\"$job\", instance=~\"$instance\", type=\"storage\"}",
               "format": "time_series",
               "hide": false,
@@ -1833,6 +2019,9 @@
               "refId": "A"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "vm_pending_rows{job=~\"$job\", instance=~\"$instance\", type=\"indexdb\"}",
               "format": "time_series",
               "hide": false,
@@ -1930,6 +2119,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"})",
               "format": "time_series",
               "interval": "",
@@ -1938,6 +2130,9 @@
               "refId": "A"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"}",
               "format": "time_series",
               "interval": "",
@@ -2033,6 +2228,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(vm_parts{job=~\"$job\", instance=~\"$instance\"}) by (type)",
               "format": "time_series",
               "intervalFactor": 1,
@@ -2127,6 +2325,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "exemplar": true,
               "expr": "vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=\"indexdb\"}",
               "format": "time_series",
@@ -2222,6 +2423,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(vm_active_merges{job=~\"$job\", instance=~\"$instance\"}) by(type)",
               "legendFormat": "{{type}}",
               "refId": "A"
@@ -2315,6 +2519,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "exemplar": true,
               "expr": "sum(vm_rows_ignored_total{job=~\"$job\", instance=~\"$instance\"}) by (reason)",
               "format": "time_series",
@@ -2411,6 +2618,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(vm_rows_merged_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])) by(type)",
               "legendFormat": "{{type}}",
               "refId": "A"
@@ -2506,6 +2716,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(vm_log_messages_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])) by (level) ",
               "format": "time_series",
               "hide": false,
@@ -2547,6 +2760,14 @@
           }
         }
       ],
+      "targets": [
+        {
+          "datasource": {
+            "uid": "$ds"
+          },
+          "refId": "A"
+        }
+      ],
       "title": "Storage",
       "type": "row"
     },
@@ -2584,7 +2805,7 @@
             "h": 8,
             "w": 12,
             "x": 0,
-            "y": 33
+            "y": 32
           },
           "hiddenSeries": false,
           "id": 66,
@@ -2607,7 +2828,7 @@
             "alertThreshold": true
           },
           "percentage": false,
-          "pluginVersion": "8.3.5",
+          "pluginVersion": "9.0.3",
           "pointradius": 2,
           "points": false,
           "renderer": "flot",
@@ -2622,12 +2843,18 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(vm_new_timeseries_created_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
               "interval": "",
               "legendFormat": "churn rate",
               "refId": "A"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(increase(vm_new_timeseries_created_total{job=~\"$job\", instance=~\"$instance\"}[24h]))",
               "interval": "",
               "legendFormat": "new series over 24h",
@@ -2688,7 +2915,7 @@
             "h": 8,
             "w": 12,
             "x": 12,
-            "y": 33
+            "y": 32
           },
           "hiddenSeries": false,
           "id": 96,
@@ -2711,7 +2938,7 @@
             "alertThreshold": true
           },
           "percentage": false,
-          "pluginVersion": "8.3.5",
+          "pluginVersion": "9.0.3",
           "pointradius": 2,
           "points": false,
           "renderer": "flot",
@@ -2787,7 +3014,7 @@
             "h": 8,
             "w": 12,
             "x": 0,
-            "y": 41
+            "y": 40
           },
           "hiddenSeries": false,
           "id": 68,
@@ -2811,7 +3038,7 @@
             "alertThreshold": true
           },
           "percentage": false,
-          "pluginVersion": "8.3.5",
+          "pluginVersion": "9.0.3",
           "pointradius": 2,
           "points": false,
           "renderer": "flot",
@@ -2821,6 +3048,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(vm_slow_row_inserts_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])) / sum(rate(vm_rows_inserted_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
               "format": "time_series",
               "hide": false,
@@ -2894,7 +3124,7 @@
             "h": 8,
             "w": 12,
             "x": 12,
-            "y": 41
+            "y": 40
           },
           "hiddenSeries": false,
           "id": 60,
@@ -2918,7 +3148,7 @@
             "alertThreshold": true
           },
           "percentage": false,
-          "pluginVersion": "8.3.5",
+          "pluginVersion": "9.0.3",
           "pointradius": 2,
           "points": false,
           "renderer": "flot",
@@ -2928,6 +3158,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(vm_slow_queries_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
               "format": "time_series",
               "hide": false,
@@ -2984,7 +3217,7 @@
             "h": 8,
             "w": 12,
             "x": 0,
-            "y": 49
+            "y": 48
           },
           "hiddenSeries": false,
           "id": 90,
@@ -3004,7 +3237,7 @@
             "alertThreshold": true
           },
           "percentage": false,
-          "pluginVersion": "8.3.5",
+          "pluginVersion": "9.0.3",
           "pointradius": 2,
           "points": false,
           "renderer": "flot",
@@ -3078,7 +3311,7 @@
             "h": 8,
             "w": 12,
             "x": 12,
-            "y": 49
+            "y": 48
           },
           "hiddenSeries": false,
           "id": 74,
@@ -3100,7 +3333,7 @@
             "alertThreshold": true
           },
           "percentage": false,
-          "pluginVersion": "8.3.5",
+          "pluginVersion": "9.0.3",
           "pointradius": 2,
           "points": false,
           "renderer": "flot",
@@ -3110,6 +3343,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "exemplar": true,
               "expr": "sum(increase(vm_metrics_with_dropped_labels_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
               "format": "time_series",
@@ -3154,6 +3390,14 @@
           }
         }
       ],
+      "targets": [
+        {
+          "datasource": {
+            "uid": "$ds"
+          },
+          "refId": "A"
+        }
+      ],
       "title": "Troubleshooting",
       "type": "row"
     },
@@ -3225,6 +3469,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(go_memstats_sys_bytes{job=~\"$job\", instance=~\"$instance\"}) + sum(vm_cache_size_bytes{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
               "hide": false,
@@ -3233,6 +3480,9 @@
               "refId": "A"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(go_memstats_heap_inuse_bytes{job=~\"$job\", instance=~\"$instance\"}) + sum(vm_cache_size_bytes{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
               "hide": false,
@@ -3241,6 +3491,9 @@
               "refId": "B"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(go_memstats_stack_inuse_bytes{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
               "hide": false,
@@ -3249,6 +3502,9 @@
               "refId": "C"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(process_resident_memory_bytes{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
               "hide": false,
@@ -3258,6 +3514,9 @@
               "refId": "D"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "exemplar": true,
               "expr": "sum(process_resident_memory_anon_bytes{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
@@ -3480,6 +3739,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(process_open_fds{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
               "interval": "",
@@ -3488,6 +3750,9 @@
               "refId": "A"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "min(process_max_fds{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
               "interval": "",
@@ -3589,6 +3854,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(process_io_storage_read_bytes_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
               "format": "time_series",
               "hide": false,
@@ -3598,6 +3866,9 @@
               "refId": "A"
             },
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(process_io_storage_written_bytes_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
               "format": "time_series",
               "hide": false,
@@ -3692,6 +3963,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(go_goroutines{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
               "intervalFactor": 2,
@@ -3787,6 +4061,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(go_gc_duration_seconds_sum{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))\n/\nsum(rate(go_gc_duration_seconds_count{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]))",
               "format": "time_series",
               "intervalFactor": 2,
@@ -3880,6 +4157,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(process_num_threads{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
               "intervalFactor": 2,
@@ -3975,6 +4255,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(vm_tcplistener_conns{job=~\"$job\", instance=~\"$instance\"})",
               "format": "time_series",
               "hide": false,
@@ -4070,6 +4353,9 @@
           "steppedLine": false,
           "targets": [
             {
+              "datasource": {
+                "uid": "$ds"
+              },
               "expr": "sum(rate(vm_tcplistener_accepts_total{job=~\"$job\", instance=~\"$instance\"}[$__interval]))",
               "format": "time_series",
               "hide": false,
@@ -4111,12 +4397,20 @@
           }
         }
       ],
+      "targets": [
+        {
+          "datasource": {
+            "uid": "$ds"
+          },
+          "refId": "A"
+        }
+      ],
       "title": "Resource usage",
       "type": "row"
     }
   ],
   "refresh": "30s",
-  "schemaVersion": 34,
+  "schemaVersion": 36,
   "style": "dark",
   "tags": [
     "victoriametrics",
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index f46b46dcbf..b94a8a8560 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -15,16 +15,22 @@ The following tip changes can be tested by building VictoriaMetrics components f
 
 ## tip
 
+
+## [v1.80.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.80.0)
+
+Released at 08-08-2022
+
 * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow configuring additional HTTP request headers for `-datasource.url`, `-remoteWrite.url` and `-remoteRead.url` via `-datasource.headers`, `-remoteWrite.headers` and `-remoteRead.headers` command-line flags. Additional HTTP request headers also can be set on group level via `headers` param - see [these docs](https://docs.victoriametrics.com/vmalert.html#groups) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2860).
 * FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): execute left and right sides of certain operations in parallel. For example, `q1 or q2`, `aggr_func(q1) <op> q2`, `q1 <op> aggr_func(q1)`. This may improve query performance if VictoriaMetrics has enough free resources for parallel processing of both sides of the operation. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2886).
 * FEATURE: [vmauth](https://docs.victoriametrics.com/vmagent.html): allow multiple sections with duplicate `username` but with different `password` values at `-auth.config` file.
 * FEATURE: add ability to push internal metrics (e.g. metrics exposed at `/metrics` page) to the configured remote storage from all the VictoriaMetrics components. See [these docs](https://docs.victoriametrics.com/#push-metrics).
 * FEATURE: improve performance for heavy queries over big number of time series on systems with big number of CPU cores. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2896). Thanks to @zqyzyq for [the idea](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/b596ac3745314fcc170a14e3ded062971cf7ced2).
 * FEATURE: improve performance for registering new time series in `indexdb` by up to 50%. Thanks to @ahfuzhang for [the issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2249).
+* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add ability to specify tenantID in target labels. In this case metrics from the given target are routed to the given `__tenant_id__`. See [these docs](https://docs.victoriametrics.com/vmagent.html#multitenancy) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2943).
 * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add service discovery for [Yandex Cloud](https://cloud.yandex.com/en/). See [these docs](https://docs.victoriametrics.com/sd_configs.html#yandexcloud_sd_configs) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1386).
 * FEATURE: [vmui](https://docs.victoriametrics.com/#vmui). Zoom in the graph by selecting the needed time range in the same way Grafana does. Hold `ctrl` (or `cmd` on MacOS) in order to move the graph to the left/right. Hold `ctrl` (or `cmd` on MacOS) and scroll up/down in order to zoom in/out the area under the cursor. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2812).
 
-* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): fix potential panic in [multi-level cluster setup](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup) when top-level `vmselect` is configured with `-replicationFactor` bigger than 1.
+* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): fix potential panic in [multi-level cluster setup](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup) when top-level `vmselect` is configured with `-replicationFactor` bigger than 1. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2961).
 * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly handle custom `endpoint` value in [ec2_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config). It was ignored since [v1.77.0](https://docs.victoriametrics.com/CHANGELOG.html#v1770) because of a bug in the implementation of [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1287). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2917).
 * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): add missing `__meta_kubernetes_ingress_class_name` meta-label for `role: ingress` service discovery in Kubernetes. See [this commit from Prometheus](https://github.com/prometheus/prometheus/commit/7e65ad3e432bd2837c17e3e63e85dcbcc30f4a8a).
 * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow stale responses from Consul service discovery (aka [consul_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config)) by default in the same way as Prometheus does. This should reduce load on Consul when discovering big number of targets. Stale responses can be disabled by specifying `allow_stale: false` option in `consul_sd_config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2940).
@@ -38,10 +44,34 @@ The following tip changes can be tested by building VictoriaMetrics components f
 * BUGFIX: properly generate http redirects if `-http.pathPrefix` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918).
 
 
+## [v1.79.2](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.2)
+
+Released at 08-08-2022
+
+**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
+The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
+
+* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): fix potential panic in [multi-level cluster setup](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup) when top-level `vmselect` is configured with `-replicationFactor` bigger than 1. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2961).
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly handle custom `endpoint` value in [ec2_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config). It was ignored since [v1.77.0](https://docs.victoriametrics.com/CHANGELOG.html#v1770) because of a bug in the implementation of [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1287).
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): add missing `__meta_kubernetes_ingress_class_name` meta-label for `role: ingress` service discovery in Kubernetes. See [this commit from Prometheus](https://github.com/prometheus/prometheus/commit/7e65ad3e432bd2837c17e3e63e85dcbcc30f4a8a).
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow stale responses from Consul service discovery (aka [consul_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config)) by default in the same way as Prometheus does. This should reduce load on Consul when discovering big number of targets. Stale responses can be disabled by specifying `allow_stale: false` option in `consul_sd_config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2940).
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): [dockerswarm_sd_configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config): properly set `__meta_dockerswarm_container_label_*` labels instead of `__meta_dockerswarm_task_label_*` labels as Prometheus does. See [this issue](https://github.com/prometheus/prometheus/issues/9187).
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): set `up` metric to `0` for partial scrapes in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously the `up` metric was set to `1` when at least a single metric has been scraped before the error. This aligns the behaviour of `vmselect` with Prometheus.
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): restart all the scrape jobs during [config reload](https://docs.victoriametrics.com/vmagent.html#configuration-update) after `global` section is changed inside `-promscrape.config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884).
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly assume role with AWS ECS credentials. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2875). Thanks to @transacid for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2876).
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not split regex in [relabeling rules](https://docs.victoriametrics.com/vmagent.html#relabeling) into multiple lines if it contains groups. This fixes [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2928).
+* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): return series from `q1` if `q2` doesn't return matching time series in the query `q1 ifnot q2`. Previously series from `q1` weren't returned in this case.
+* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly show date picker at `Table` tab. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2874).
+* BUGFIX: properly generate http redirects if `-http.pathPrefix` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918).
+
+
 ## [v1.79.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.1)
 
 Released at 02-08-2022
 
+**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
+The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
+
 * SECURITY FIX: upgrade base docker image (alpine) from 3.16.0 to 3.16.1 . See [alpine 3.16.1 release notes](https://alpinelinux.org/posts/Alpine-3.16.1-released.html).
 
 
@@ -49,6 +79,9 @@ Released at 02-08-2022
 
 Released at 14-07-2022
 
+**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
+The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
+
 **Update note 1:** this release introduces backwards-incompatible changes to `vm_partial_results_total` metric by changing its labels to be consistent with `vm_requests_total` metric. If you use alerting rules or Grafana dashboards, which rely on this metric, then they must be updated. The official dashboards for VictoriaMetrics don't use this metric.
 
 **Update note 2:** [vmalert](https://docs.victoriametrics.com/vmalert.html) adds `/vmalert/` prefix to [web urls](https://docs.victoriametrics.com/vmalert.html#web) according to [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2825). This may affect `vmalert` instances with non-empty `-http.pathPrefix` command-line flag. After the update, configuring this flag is no longer needed. Here's [why](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2799#issuecomment-1171392005).
diff --git a/docs/vmagent.md b/docs/vmagent.md
index f68fb37d46..449cf464c7 100644
--- a/docs/vmagent.md
+++ b/docs/vmagent.md
@@ -144,7 +144,25 @@ While `vmagent` can accept data in several supported protocols (OpenTSDB, Influx
 
 ## Multitenancy
 
-By default `vmagent` collects the data without tenant identifiers and routes it to the configured `-remoteWrite.url`. But it can accept multitenant data if `-remoteWrite.multitenantURL` is set. In this case it accepts multitenant data at `http://vmagent:8429/insert/<accountID>/...` in the same way as cluster version of VictoriaMetrics does according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and routes it to `<-remoteWrite.multitenantURL>/insert/<accountID>/prometheus/api/v1/write`. If multiple `-remoteWrite.multitenantURL` command-line options are set, then `vmagent` replicates the collected data across all the configured urls. This allows using a single `vmagent` instance in front of VictoriaMetrics clusters for processing the data from all the tenants.
+By default `vmagent` collects the data without tenant identifiers and routes it to the configured `-remoteWrite.url`.
+
+[Multitenancy](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) support is enabled when `-remoteWrite.multitenantURL` command-line flag is set. In this case `vmagent` accepts multitenant data at `http://vmagent:8429/insert/<accountID>/...` in the same way as cluster version of VictoriaMetrics does according to [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format) and routes it to `<-remoteWrite.multitenantURL>/insert/<accountID>/prometheus/api/v1/write`. If multiple `-remoteWrite.multitenantURL` command-line options are set, then `vmagent` replicates the collected data across all the configured urls. This allows using a single `vmagent` instance in front of VictoriaMetrics clusters for processing the data from all the tenants.
+
+If `-remoteWrite.multitenantURL` command-line flag is set and `vmagent` is configured to scrape Prometheus-compatible targets (e.g. if `-promscrape.config` command-line flag is set)
+then `vmagent` reads tenantID from `__tenant_id__` label for the discovered targets and routes all the metrics from this target to the given `__tenant_id__`, e.g. to the url `<-remoteWrite.multitnenatURL>/insert/<__tenant_id__>/prometheus/api/v1/write`.
+
+For example, the following relabeling rule instructs sending metrics to tenantID defined in the `prometheus.io/tenant` annotation of Kubernetes pod deployment:
+
+```yaml
+scrape_configs:
+- kubernetes_sd_configs:
+  - role: pod
+  relabel_configs:
+  - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_tenant]
+    target_label: __tenant_id__
+```
+
+If the target has no associated `__tenant_id__` label, then its' metrics are routed to zero tenantID, e.g. to `<-remoteWrite.multitenantURL>/insert/0/prometheus/api/v1/write`.
 
 ## How to collect metrics in Prometheus format
 
diff --git a/go.mod b/go.mod
index a75afdb011..6a1fe25c56 100644
--- a/go.mod
+++ b/go.mod
@@ -9,7 +9,7 @@ require (
 	// Do not use the original github.com/valyala/fasthttp because of issues
 	// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
 	github.com/VictoriaMetrics/fasthttp v1.1.0
-	github.com/VictoriaMetrics/metrics v1.21.0
+	github.com/VictoriaMetrics/metrics v1.22.1
 	github.com/VictoriaMetrics/metricsql v0.44.1
 	github.com/aws/aws-sdk-go v1.44.70
 	github.com/cespare/xxhash/v2 v2.1.2
diff --git a/go.sum b/go.sum
index 6a5ac3ac2f..9f27935ebc 100644
--- a/go.sum
+++ b/go.sum
@@ -109,8 +109,8 @@ github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJ
 github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0=
 github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
 github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
-github.com/VictoriaMetrics/metrics v1.21.0 h1:cjbToD4xrR+ZaDO49h2t67sdmmbCKfHfyTyAH3Sx+DM=
-github.com/VictoriaMetrics/metrics v1.21.0/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
+github.com/VictoriaMetrics/metrics v1.22.1 h1:ExNLLZ0HLI41imYDaWbeVXfMB2+0W4ovBSk3It+Y9+c=
+github.com/VictoriaMetrics/metrics v1.22.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
 github.com/VictoriaMetrics/metricsql v0.44.1 h1:qGoRt0g84uMUscVjS7P3uDZKmjJubWKaIx9v0iHKgck=
 github.com/VictoriaMetrics/metricsql v0.44.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
 github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
diff --git a/lib/auth/auth.go b/lib/auth/auth.go
index 27d1175d1a..6d27943e3b 100644
--- a/lib/auth/auth.go
+++ b/lib/auth/auth.go
@@ -8,8 +8,16 @@ import (
 
 // Token contains settings for request processing
 type Token struct {
-	ProjectID uint32
 	AccountID uint32
+	ProjectID uint32
+}
+
+// String returns string representation of t.
+func (t *Token) String() string {
+	if t.ProjectID == 0 {
+		return fmt.Sprintf("%d", t.AccountID)
+	}
+	return fmt.Sprintf("%d:%d", t.AccountID, t.ProjectID)
 }
 
 // NewToken returns new Token for the given authToken
diff --git a/lib/auth/auth_test.go b/lib/auth/auth_test.go
index edcaa3f2e2..aa2b4b4f92 100644
--- a/lib/auth/auth_test.go
+++ b/lib/auth/auth_test.go
@@ -1,7 +1,6 @@
 package auth
 
 import (
-	"fmt"
 	"testing"
 )
 
@@ -12,13 +11,13 @@ func TestNewTokenSuccess(t *testing.T) {
 		if err != nil {
 			t.Fatalf("unexpected error: %s", err)
 		}
-		got := fmt.Sprintf("%d:%d", newToken.AccountID, newToken.ProjectID)
+		got := newToken.String()
 		if got != want {
 			t.Fatalf("unexpected NewToken() result;got\n%s\nwant\n%s", got, want)
 		}
 	}
 	// token with accountID only
-	f("1", "1:0")
+	f("1", "1")
 	// token with accountID and projecTID
 	f("1:2", "1:2")
 	// max uint32 accountID
diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go
index efedc29f3a..cbb5c654ac 100644
--- a/lib/promscrape/config.go
+++ b/lib/promscrape/config.go
@@ -4,6 +4,7 @@ import (
 	"encoding/json"
 	"flag"
 	"fmt"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
 	"net/url"
 	"path/filepath"
 	"sort"
@@ -1227,6 +1228,17 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
 	if metricsPathRelabeled == "" {
 		metricsPathRelabeled = "/metrics"
 	}
+
+	var at *auth.Token
+	tenantID := promrelabel.GetLabelValueByName(labels, "__tenant_id__")
+	if tenantID != "" {
+		newToken, err := auth.NewToken(tenantID)
+		if err != nil {
+			return nil, fmt.Errorf("cannot parse __tenant_id__=%q for job=%s, err: %w", tenantID, swc.jobName, err)
+		}
+		at = newToken
+	}
+
 	if !strings.HasPrefix(metricsPathRelabeled, "/") {
 		metricsPathRelabeled = "/" + metricsPathRelabeled
 	}
@@ -1308,6 +1320,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
 		ScrapeAlignInterval:  swc.scrapeAlignInterval,
 		ScrapeOffset:         swc.scrapeOffset,
 		SeriesLimit:          seriesLimit,
+		AuthToken:            at,
 
 		jobNameOriginal: swc.jobName,
 	}
diff --git a/lib/promscrape/scraper.go b/lib/promscrape/scraper.go
index eb60d63a04..a4cdeffbc0 100644
--- a/lib/promscrape/scraper.go
+++ b/lib/promscrape/scraper.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"flag"
 	"fmt"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
 	"io"
 	"sync"
 	"sync/atomic"
@@ -53,7 +54,7 @@ func CheckConfig() error {
 // Init initializes Prometheus scraper with config from the `-promscrape.config`.
 //
 // Scraped data is passed to pushData.
-func Init(pushData func(wr *prompbmarshal.WriteRequest)) {
+func Init(pushData func(at *auth.Token, wr *prompbmarshal.WriteRequest)) {
 	mustInitClusterMemberID()
 	globalStopChan = make(chan struct{})
 	scraperWG.Add(1)
@@ -91,7 +92,7 @@ func WriteConfigData(w io.Writer) {
 	_, _ = w.Write(*b)
 }
 
-func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) {
+func runScraper(configFile string, pushData func(at *auth.Token, wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) {
 	if configFile == "" {
 		// Nothing to scrape.
 		return
@@ -185,14 +186,14 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest)
 var configReloads = metrics.NewCounter(`vm_promscrape_config_reloads_total`)
 
 type scrapeConfigs struct {
-	pushData     func(wr *prompbmarshal.WriteRequest)
+	pushData     func(at *auth.Token, wr *prompbmarshal.WriteRequest)
 	wg           sync.WaitGroup
 	stopCh       chan struct{}
 	globalStopCh <-chan struct{}
 	scfgs        []*scrapeConfig
 }
 
-func newScrapeConfigs(pushData func(wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) *scrapeConfigs {
+func newScrapeConfigs(pushData func(at *auth.Token, wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) *scrapeConfigs {
 	return &scrapeConfigs{
 		pushData:     pushData,
 		stopCh:       make(chan struct{}),
@@ -234,7 +235,7 @@ func (scs *scrapeConfigs) stop() {
 
 type scrapeConfig struct {
 	name          string
-	pushData      func(wr *prompbmarshal.WriteRequest)
+	pushData      func(at *auth.Token, wr *prompbmarshal.WriteRequest)
 	getScrapeWork func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork
 	checkInterval time.Duration
 	cfgCh         chan *Config
@@ -287,7 +288,7 @@ type scraperGroup struct {
 	wg       sync.WaitGroup
 	mLock    sync.Mutex
 	m        map[string]*scraper
-	pushData func(wr *prompbmarshal.WriteRequest)
+	pushData func(at *auth.Token, wr *prompbmarshal.WriteRequest)
 
 	changesCount    *metrics.Counter
 	activeScrapers  *metrics.Counter
@@ -297,7 +298,7 @@ type scraperGroup struct {
 	globalStopCh <-chan struct{}
 }
 
-func newScraperGroup(name string, pushData func(wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) *scraperGroup {
+func newScraperGroup(name string, pushData func(at *auth.Token, wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) *scraperGroup {
 	sg := &scraperGroup{
 		name:     name,
 		m:        make(map[string]*scraper),
@@ -413,7 +414,7 @@ type scraper struct {
 	stoppedCh chan struct{}
 }
 
-func newScraper(sw *ScrapeWork, group string, pushData func(wr *prompbmarshal.WriteRequest)) *scraper {
+func newScraper(sw *ScrapeWork, group string, pushData func(at *auth.Token, wr *prompbmarshal.WriteRequest)) *scraper {
 	sc := &scraper{
 		stopCh:    make(chan struct{}),
 		stoppedCh: make(chan struct{}),
diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go
index 529746f05e..eb411a30c6 100644
--- a/lib/promscrape/scrapework.go
+++ b/lib/promscrape/scrapework.go
@@ -3,6 +3,7 @@ package promscrape
 import (
 	"flag"
 	"fmt"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
 	"io/ioutil"
 	"math"
 	"math/bits"
@@ -119,6 +120,9 @@ type ScrapeWork struct {
 	// Optional limit on the number of unique series the scrape target can expose.
 	SeriesLimit int
 
+	//The Tenant Info
+	AuthToken *auth.Token
+
 	// The original 'job_name'
 	jobNameOriginal string
 }
@@ -188,7 +192,7 @@ type scrapeWork struct {
 	GetStreamReader func() (*streamReader, error)
 
 	// PushData is called for pushing collected data.
-	PushData func(wr *prompbmarshal.WriteRequest)
+	PushData func(at *auth.Token, wr *prompbmarshal.WriteRequest)
 
 	// ScrapeGroup is name of ScrapeGroup that
 	// scrapeWork belongs to
@@ -487,7 +491,7 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
 		// See https://github.com/VictoriaMetrics/operator/issues/497
 		sw.addAutoTimeseries(wc, "scrape_samples_limit", float64(sw.Config.SampleLimit), scrapeTimestamp)
 	}
-	sw.pushData(&wc.writeRequest)
+	sw.pushData(sw.Config.AuthToken, &wc.writeRequest)
 	sw.prevLabelsLen = len(wc.labels)
 	sw.prevBodyLen = len(bodyString)
 	wc.reset()
@@ -514,9 +518,9 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
 	return err
 }
 
-func (sw *scrapeWork) pushData(wr *prompbmarshal.WriteRequest) {
+func (sw *scrapeWork) pushData(at *auth.Token, wr *prompbmarshal.WriteRequest) {
 	startTime := time.Now()
-	sw.PushData(wr)
+	sw.PushData(at, wr)
 	pushDataDuration.UpdateDuration(startTime)
 }
 
@@ -568,7 +572,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
 				return fmt.Errorf("the response from %q exceeds sample_limit=%d; "+
 					"either reduce the sample count for the target or increase sample_limit", sw.Config.ScrapeURL, sw.Config.SampleLimit)
 			}
-			sw.pushData(&wc.writeRequest)
+			sw.pushData(sw.Config.AuthToken, &wc.writeRequest)
 			wc.resetNoRows()
 			return nil
 		}, sw.logError)
@@ -603,7 +607,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
 	sw.addAutoTimeseries(wc, "scrape_samples_post_metric_relabeling", float64(samplesPostRelabeling), scrapeTimestamp)
 	sw.addAutoTimeseries(wc, "scrape_series_added", float64(seriesAdded), scrapeTimestamp)
 	sw.addAutoTimeseries(wc, "scrape_timeout_seconds", sw.Config.ScrapeTimeout.Seconds(), scrapeTimestamp)
-	sw.pushData(&wc.writeRequest)
+	sw.pushData(sw.Config.AuthToken, &wc.writeRequest)
 	sw.prevLabelsLen = len(wc.labels)
 	sw.prevBodyLen = sbr.bodyLen
 	wc.reset()
@@ -770,7 +774,7 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i
 		}
 		staleSamplesCreated.Add(len(samples))
 	}
-	sw.pushData(&wc.writeRequest)
+	sw.pushData(sw.Config.AuthToken, &wc.writeRequest)
 }
 
 var staleSamplesCreated = metrics.NewCounter(`vm_promscrape_stale_samples_created_total`)
diff --git a/lib/promscrape/scrapework_test.go b/lib/promscrape/scrapework_test.go
index 91784b611d..1e96449ea4 100644
--- a/lib/promscrape/scrapework_test.go
+++ b/lib/promscrape/scrapework_test.go
@@ -6,6 +6,7 @@ import (
 	"testing"
 	"time"
 
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
 	parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
@@ -62,7 +63,7 @@ func TestScrapeWorkScrapeInternalFailure(t *testing.T) {
 
 	pushDataCalls := 0
 	var pushDataErr error
-	sw.PushData = func(wr *prompbmarshal.WriteRequest) {
+	sw.PushData = func(at *auth.Token, wr *prompbmarshal.WriteRequest) {
 		if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil {
 			pushDataErr = fmt.Errorf("unexpected data pushed: %w\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected)
 		}
@@ -102,7 +103,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
 
 		pushDataCalls := 0
 		var pushDataErr error
-		sw.PushData = func(wr *prompbmarshal.WriteRequest) {
+		sw.PushData = func(at *auth.Token, wr *prompbmarshal.WriteRequest) {
 			pushDataCalls++
 			if len(wr.Timeseries) > len(timeseriesExpected) {
 				pushDataErr = fmt.Errorf("too many time series obtained; got %d; want %d\ngot\n%+v\nwant\n%+v",
diff --git a/lib/promscrape/scrapework_timing_test.go b/lib/promscrape/scrapework_timing_test.go
index 0720d698eb..91e8fc9648 100644
--- a/lib/promscrape/scrapework_timing_test.go
+++ b/lib/promscrape/scrapework_timing_test.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"testing"
 
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
 )
 
@@ -39,7 +40,7 @@ vm_tcplistener_write_calls_total{name="https", addr=":443"} 132356
 		var sw scrapeWork
 		sw.Config = &ScrapeWork{}
 		sw.ReadData = readDataFunc
-		sw.PushData = func(wr *prompbmarshal.WriteRequest) {}
+		sw.PushData = func(at *auth.Token, wr *prompbmarshal.WriteRequest) {}
 		timestamp := int64(0)
 		for pb.Next() {
 			if err := sw.scrapeInternal(timestamp, timestamp); err != nil {
diff --git a/vendor/github.com/VictoriaMetrics/metrics/counter.go b/vendor/github.com/VictoriaMetrics/metrics/counter.go
index a7d9549235..dfe947794a 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/counter.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/counter.go
@@ -11,9 +11,9 @@ import (
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned counter is safe to use from concurrent goroutines.
 func NewCounter(name string) *Counter {
@@ -65,9 +65,9 @@ func (c *Counter) marshalTo(prefix string, w io.Writer) {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned counter is safe to use from concurrent goroutines.
 //
diff --git a/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go b/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go
index d01dd851eb..f898790995 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/floatcounter.go
@@ -11,9 +11,9 @@ import (
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned counter is safe to use from concurrent goroutines.
 func NewFloatCounter(name string) *FloatCounter {
@@ -70,9 +70,9 @@ func (fc *FloatCounter) marshalTo(prefix string, w io.Writer) {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned FloatCounter is safe to use from concurrent goroutines.
 //
diff --git a/vendor/github.com/VictoriaMetrics/metrics/gauge.go b/vendor/github.com/VictoriaMetrics/metrics/gauge.go
index 05bf1473ff..9084fc4d7d 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/gauge.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/gauge.go
@@ -11,9 +11,9 @@ import (
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // f must be safe for concurrent calls.
 //
@@ -53,9 +53,9 @@ func (g *Gauge) marshalTo(prefix string, w io.Writer) {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned gauge is safe to use from concurrent goroutines.
 //
diff --git a/vendor/github.com/VictoriaMetrics/metrics/histogram.go b/vendor/github.com/VictoriaMetrics/metrics/histogram.go
index b0e8d575fb..a576681778 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/histogram.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/histogram.go
@@ -25,20 +25,20 @@ var bucketMultiplier = math.Pow(10, 1.0/bucketsPerDecimal)
 // Each bucket contains a counter for values in the given range.
 // Each non-empty bucket is exposed via the following metric:
 //
-//     <metric_name>_bucket{<optional_tags>,vmrange="<start>...<end>"} <counter>
+//	<metric_name>_bucket{<optional_tags>,vmrange="<start>...<end>"} <counter>
 //
 // Where:
 //
-//     - <metric_name> is the metric name passed to NewHistogram
-//     - <optional_tags> is optional tags for the <metric_name>, which are passed to NewHistogram
-//     - <start> and <end> - start and end values for the given bucket
-//     - <counter> - the number of hits to the given bucket during Update* calls
+//   - <metric_name> is the metric name passed to NewHistogram
+//   - <optional_tags> is optional tags for the <metric_name>, which are passed to NewHistogram
+//   - <start> and <end> - start and end values for the given bucket
+//   - <counter> - the number of hits to the given bucket during Update* calls
 //
 // Histogram buckets can be converted to Prometheus-like buckets with `le` labels
 // with `prometheus_buckets(<metric_name>_bucket)` function from PromQL extensions in VictoriaMetrics.
 // (see https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/MetricsQL ):
 //
-//     prometheus_buckets(request_duration_bucket)
+//	prometheus_buckets(request_duration_bucket)
 //
 // Time series produced by the Histogram have better compression ratio comparing to
 // Prometheus histogram buckets with `le` labels, since they don't include counters
@@ -143,9 +143,9 @@ func (h *Histogram) VisitNonZeroBuckets(f func(vmrange string, count uint64)) {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned histogram is safe to use from concurrent goroutines.
 func NewHistogram(name string) *Histogram {
@@ -159,9 +159,9 @@ func NewHistogram(name string) *Histogram {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned histogram is safe to use from concurrent goroutines.
 //
diff --git a/vendor/github.com/VictoriaMetrics/metrics/metrics.go b/vendor/github.com/VictoriaMetrics/metrics/metrics.go
index 087026d074..532aa02c61 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/metrics.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/metrics.go
@@ -5,15 +5,18 @@
 //
 // Usage:
 //
-//     1. Register the required metrics via New* functions.
-//     2. Expose them to `/metrics` page via WritePrometheus.
-//     3. Update the registered metrics during application lifetime.
+//  1. Register the required metrics via New* functions.
+//  2. Expose them to `/metrics` page via WritePrometheus.
+//  3. Update the registered metrics during application lifetime.
 //
 // The package has been extracted from https://victoriametrics.com/
 package metrics
 
 import (
 	"io"
+	"sort"
+	"sync"
+	"unsafe"
 )
 
 type namedMetric struct {
@@ -27,19 +30,57 @@ type metric interface {
 
 var defaultSet = NewSet()
 
-// WritePrometheus writes all the registered metrics in Prometheus format to w.
+func init() {
+	RegisterSet(defaultSet)
+}
+
+var (
+	registeredSets     = make(map[*Set]struct{})
+	registeredSetsLock sync.Mutex
+)
+
+// RegisterSet registers the given set s for metrics export via global WritePrometheus() call.
+//
+// See also UnregisterSet.
+func RegisterSet(s *Set) {
+	registeredSetsLock.Lock()
+	registeredSets[s] = struct{}{}
+	registeredSetsLock.Unlock()
+}
+
+// UnregisterSet stops exporting metrics for the given s via global WritePrometheus() call.
+func UnregisterSet(s *Set) {
+	registeredSetsLock.Lock()
+	delete(registeredSets, s)
+	registeredSetsLock.Unlock()
+}
+
+// WritePrometheus writes all the metrics from default set and all the registered sets in Prometheus format to w.
+//
+// Additional sets can be registered via RegisterSet() call.
 //
 // If exposeProcessMetrics is true, then various `go_*` and `process_*` metrics
 // are exposed for the current process.
 //
 // The WritePrometheus func is usually called inside "/metrics" handler:
 //
-//     http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
-//         metrics.WritePrometheus(w, true)
-//     })
-//
+//	http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
+//	    metrics.WritePrometheus(w, true)
+//	})
 func WritePrometheus(w io.Writer, exposeProcessMetrics bool) {
-	defaultSet.WritePrometheus(w)
+	registeredSetsLock.Lock()
+	sets := make([]*Set, 0, len(registeredSets))
+	for s := range registeredSets {
+		sets = append(sets, s)
+	}
+	registeredSetsLock.Unlock()
+
+	sort.Slice(sets, func(i, j int) bool {
+		return uintptr(unsafe.Pointer(sets[i])) < uintptr(unsafe.Pointer(sets[j]))
+	})
+	for _, s := range sets {
+		s.WritePrometheus(w)
+	}
 	if exposeProcessMetrics {
 		WriteProcessMetrics(w)
 	}
@@ -50,50 +91,81 @@ func WritePrometheus(w io.Writer, exposeProcessMetrics bool) {
 // The following `go_*` and `process_*` metrics are exposed for the currently
 // running process. Below is a short description for the exposed `process_*` metrics:
 //
-//     - process_cpu_seconds_system_total - CPU time spent in syscalls
-//     - process_cpu_seconds_user_total - CPU time spent in userspace
-//     - process_cpu_seconds_total - CPU time spent by the process
-//     - process_major_pagefaults_total - page faults resulted in disk IO
-//     - process_minor_pagefaults_total - page faults resolved without disk IO
-//     - process_resident_memory_bytes - recently accessed memory (aka RSS or resident memory)
-//     - process_resident_memory_peak_bytes - the maximum RSS memory usage
-//     - process_resident_memory_anon_bytes - RSS for memory-mapped files
-//     - process_resident_memory_file_bytes - RSS for memory allocated by the process
-//     - process_resident_memory_shared_bytes - RSS for memory shared between multiple processes
-//     - process_virtual_memory_bytes - virtual memory usage
-//     - process_virtual_memory_peak_bytes - the maximum virtual memory usage
-//     - process_num_threads - the number of threads
-//     - process_start_time_seconds - process start time as unix timestamp
+//   - process_cpu_seconds_system_total - CPU time spent in syscalls
 //
-//     - process_io_read_bytes_total - the number of bytes read via syscalls
-//     - process_io_written_bytes_total - the number of bytes written via syscalls
-//     - process_io_read_syscalls_total - the number of read syscalls
-//     - process_io_write_syscalls_total - the number of write syscalls
-//     - process_io_storage_read_bytes_total - the number of bytes actually read from disk
-//     - process_io_storage_written_bytes_total - the number of bytes actually written to disk
+//   - process_cpu_seconds_user_total - CPU time spent in userspace
 //
-//     - go_memstats_alloc_bytes - memory usage for Go objects in the heap
-//     - go_memstats_alloc_bytes_total - the cumulative counter for total size of allocated Go objects
-//     - go_memstats_frees_total - the cumulative counter for number of freed Go objects
-//     - go_memstats_gc_cpu_fraction - the fraction of CPU spent in Go garbage collector
-//     - go_memstats_gc_sys_bytes - the size of Go garbage collector metadata
-//     - go_memstats_heap_alloc_bytes - the same as go_memstats_alloc_bytes
-//     - go_memstats_heap_idle_bytes - idle memory ready for new Go object allocations
-//     - go_memstats_heap_objects - the number of Go objects in the heap
-//     - go_memstats_heap_sys_bytes - memory requested for Go objects from the OS
-//     - go_memstats_mallocs_total - the number of allocations for Go objects
-//     - go_memstats_next_gc_bytes - the target heap size when the next garbage collection should start
-//     - go_memstats_stack_inuse_bytes - memory used for goroutine stacks
-//     - go_memstats_stack_sys_bytes - memory requested fromthe OS for goroutine stacks
-//     - go_memstats_sys_bytes - memory requested by Go runtime from the OS
+//   - process_cpu_seconds_total - CPU time spent by the process
+//
+//   - process_major_pagefaults_total - page faults resulted in disk IO
+//
+//   - process_minor_pagefaults_total - page faults resolved without disk IO
+//
+//   - process_resident_memory_bytes - recently accessed memory (aka RSS or resident memory)
+//
+//   - process_resident_memory_peak_bytes - the maximum RSS memory usage
+//
+//   - process_resident_memory_anon_bytes - RSS for memory-mapped files
+//
+//   - process_resident_memory_file_bytes - RSS for memory allocated by the process
+//
+//   - process_resident_memory_shared_bytes - RSS for memory shared between multiple processes
+//
+//   - process_virtual_memory_bytes - virtual memory usage
+//
+//   - process_virtual_memory_peak_bytes - the maximum virtual memory usage
+//
+//   - process_num_threads - the number of threads
+//
+//   - process_start_time_seconds - process start time as unix timestamp
+//
+//   - process_io_read_bytes_total - the number of bytes read via syscalls
+//
+//   - process_io_written_bytes_total - the number of bytes written via syscalls
+//
+//   - process_io_read_syscalls_total - the number of read syscalls
+//
+//   - process_io_write_syscalls_total - the number of write syscalls
+//
+//   - process_io_storage_read_bytes_total - the number of bytes actually read from disk
+//
+//   - process_io_storage_written_bytes_total - the number of bytes actually written to disk
+//
+//   - go_memstats_alloc_bytes - memory usage for Go objects in the heap
+//
+//   - go_memstats_alloc_bytes_total - the cumulative counter for total size of allocated Go objects
+//
+//   - go_memstats_frees_total - the cumulative counter for number of freed Go objects
+//
+//   - go_memstats_gc_cpu_fraction - the fraction of CPU spent in Go garbage collector
+//
+//   - go_memstats_gc_sys_bytes - the size of Go garbage collector metadata
+//
+//   - go_memstats_heap_alloc_bytes - the same as go_memstats_alloc_bytes
+//
+//   - go_memstats_heap_idle_bytes - idle memory ready for new Go object allocations
+//
+//   - go_memstats_heap_objects - the number of Go objects in the heap
+//
+//   - go_memstats_heap_sys_bytes - memory requested for Go objects from the OS
+//
+//   - go_memstats_mallocs_total - the number of allocations for Go objects
+//
+//   - go_memstats_next_gc_bytes - the target heap size when the next garbage collection should start
+//
+//   - go_memstats_stack_inuse_bytes - memory used for goroutine stacks
+//
+//   - go_memstats_stack_sys_bytes - memory requested fromthe OS for goroutine stacks
+//
+//   - go_memstats_sys_bytes - memory requested by Go runtime from the OS
 //
 // The WriteProcessMetrics func is usually called in combination with writing Set metrics
 // inside "/metrics" handler:
 //
-//     http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
-//         mySet.WritePrometheus(w)
-//         metrics.WriteProcessMetrics(w)
-//     })
+//	http.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
+//	    mySet.WritePrometheus(w)
+//	    metrics.WriteProcessMetrics(w)
+//	})
 //
 // See also WrteFDMetrics.
 func WriteProcessMetrics(w io.Writer) {
diff --git a/vendor/github.com/VictoriaMetrics/metrics/push.go b/vendor/github.com/VictoriaMetrics/metrics/push.go
index 5dd2033868..4215f48ab6 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/push.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/push.go
@@ -60,7 +60,7 @@ func InitPush(pushURL string, interval time.Duration, extraLabels string, pushPr
 // extraLabels may contain comma-separated list of `label="value"` labels, which will be added
 // to all the metrics before pushing them to pushURL.
 //
-/// The metrics are pushed to pushURL in Prometheus text exposition format.
+// / The metrics are pushed to pushURL in Prometheus text exposition format.
 // See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
 //
 // It is recommended pushing metrics to /api/v1/import/prometheus endpoint according to
diff --git a/vendor/github.com/VictoriaMetrics/metrics/set.go b/vendor/github.com/VictoriaMetrics/metrics/set.go
index ae55bb71c6..f98ff6a684 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/set.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/set.go
@@ -22,6 +22,8 @@ type Set struct {
 }
 
 // NewSet creates new set of metrics.
+//
+// Pass the set to RegisterSet() function in order to export its metrics via global WritePrometheus() call.
 func NewSet() *Set {
 	return &Set{
 		m: make(map[string]*namedMetric),
@@ -58,9 +60,9 @@ func (s *Set) WritePrometheus(w io.Writer) {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned histogram is safe to use from concurrent goroutines.
 func (s *Set) NewHistogram(name string) *Histogram {
@@ -75,9 +77,9 @@ func (s *Set) NewHistogram(name string) *Histogram {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned histogram is safe to use from concurrent goroutines.
 //
@@ -116,9 +118,9 @@ func (s *Set) GetOrCreateHistogram(name string) *Histogram {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned counter is safe to use from concurrent goroutines.
 func (s *Set) NewCounter(name string) *Counter {
@@ -133,9 +135,9 @@ func (s *Set) NewCounter(name string) *Counter {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned counter is safe to use from concurrent goroutines.
 //
@@ -174,9 +176,9 @@ func (s *Set) GetOrCreateCounter(name string) *Counter {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned FloatCounter is safe to use from concurrent goroutines.
 func (s *Set) NewFloatCounter(name string) *FloatCounter {
@@ -191,9 +193,9 @@ func (s *Set) NewFloatCounter(name string) *FloatCounter {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned FloatCounter is safe to use from concurrent goroutines.
 //
@@ -233,9 +235,9 @@ func (s *Set) GetOrCreateFloatCounter(name string) *FloatCounter {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // f must be safe for concurrent calls.
 //
@@ -257,9 +259,9 @@ func (s *Set) NewGauge(name string, f func() float64) *Gauge {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned gauge is safe to use from concurrent goroutines.
 //
@@ -303,9 +305,9 @@ func (s *Set) GetOrCreateGauge(name string, f func() float64) *Gauge {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned summary is safe to use from concurrent goroutines.
 func (s *Set) NewSummary(name string) *Summary {
@@ -318,9 +320,9 @@ func (s *Set) NewSummary(name string) *Summary {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned summary is safe to use from concurrent goroutines.
 func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
@@ -347,9 +349,9 @@ func (s *Set) NewSummaryExt(name string, window time.Duration, quantiles []float
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned summary is safe to use from concurrent goroutines.
 //
@@ -365,9 +367,9 @@ func (s *Set) GetOrCreateSummary(name string) *Summary {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned summary is safe to use from concurrent goroutines.
 //
diff --git a/vendor/github.com/VictoriaMetrics/metrics/summary.go b/vendor/github.com/VictoriaMetrics/metrics/summary.go
index 0f01e9ae12..52183d22bb 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/summary.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/summary.go
@@ -36,9 +36,9 @@ type Summary struct {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned summary is safe to use from concurrent goroutines.
 func NewSummary(name string) *Summary {
@@ -51,9 +51,9 @@ func NewSummary(name string) *Summary {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned summary is safe to use from concurrent goroutines.
 func NewSummaryExt(name string, window time.Duration, quantiles []float64) *Summary {
@@ -140,9 +140,9 @@ func (sm *Summary) updateQuantiles() {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned summary is safe to use from concurrent goroutines.
 //
@@ -158,9 +158,9 @@ func GetOrCreateSummary(name string) *Summary {
 // name must be valid Prometheus-compatible metric with possible labels.
 // For instance,
 //
-//     * foo
-//     * foo{bar="baz"}
-//     * foo{bar="baz",aaa="b"}
+//   - foo
+//   - foo{bar="baz"}
+//   - foo{bar="baz",aaa="b"}
 //
 // The returned summary is safe to use from concurrent goroutines.
 //
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 9a1717798b..e151dbcb5b 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -25,7 +25,7 @@ github.com/VictoriaMetrics/fastcache
 github.com/VictoriaMetrics/fasthttp
 github.com/VictoriaMetrics/fasthttp/fasthttputil
 github.com/VictoriaMetrics/fasthttp/stackless
-# github.com/VictoriaMetrics/metrics v1.21.0
+# github.com/VictoriaMetrics/metrics v1.22.1
 ## explicit; go 1.12
 github.com/VictoriaMetrics/metrics
 # github.com/VictoriaMetrics/metricsql v0.44.1