From 55e98e265efb3996b2bd8bcb0aadb2d094a20c0b Mon Sep 17 00:00:00 2001
From: Aliaksandr Valialkin <valyala@gmail.com>
Date: Sat, 7 Nov 2020 01:14:34 +0200
Subject: [PATCH 1/9] docs/CHANGELOG.md: add `CHANGELOG` header

---
 CHANGELOG.md      | 2 ++
 docs/CHANGELOG.md | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index d445028465..71967a61a7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,5 @@
+# CHANGELOG
+
 # tip
 
 * FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label/<name>/values` when `start` and `end` args are set.
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index d445028465..71967a61a7 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -1,3 +1,5 @@
+# CHANGELOG
+
 # tip
 
 * FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label/<name>/values` when `start` and `end` args are set.

From 188325f0fc7f7a943ee4b4ed0430c517436837f6 Mon Sep 17 00:00:00 2001
From: Aliaksandr Valialkin <valyala@gmail.com>
Date: Sat, 7 Nov 2020 12:44:15 +0200
Subject: [PATCH 2/9] lib/promscrape: eliminate data race in `stream parse`
 mode

Previously `-promscrape.streamParse` mode could result in garbage labels for the scraped metrics because of data race.
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247
---
 CHANGELOG.md                 |  2 ++
 lib/promscrape/scrapework.go | 22 +++++++++-------------
 lib/uint64set/uint64set.go   |  6 +++++-
 3 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 71967a61a7..a07ab45dbd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -10,6 +10,8 @@
 * FEATURE: vmagent: add `/ready` HTTP endpoint, which returns 200 OK status code when all the service discovery has been initialized.
   This may be useful during rolling upgrades. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/875
 
+* BUGFIX: vmagent: eliminate data race when `-promscrape.streamParse` command-line is set. Previously this mode could result in scraped metrics with garbage labels.
+  See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247 for details.
 * BUGFIX: properly calculate `topk_*` and `bottomk_*` functions from [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) for time series with gaps.
   See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/883
 
diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go
index 160c3b7f3f..30b0dea597 100644
--- a/lib/promscrape/scrapework.go
+++ b/lib/promscrape/scrapework.go
@@ -325,18 +325,16 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
 		samplesScraped += len(rows)
 		for i := range rows {
 			sw.addRowToTimeseries(wc, &rows[i], scrapeTimestamp, true)
-			if len(wc.labels) > 40000 {
-				// Limit the maximum size of wc.writeRequest.
-				// This should reduce memory usage when scraping targets with millions of metrics and/or labels.
-				// For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/
-				samplesPostRelabeling += len(wc.writeRequest.Timeseries)
-				sw.updateSeriesAdded(wc)
-				startTime := time.Now()
-				sw.PushData(&wc.writeRequest)
-				pushDataDuration.UpdateDuration(startTime)
-				wc.resetNoRows()
-			}
 		}
+		// Push the collected rows to sw before returning from the callback, since they cannot be held
+		// after returning from the callback - this will result in data race.
+		// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247
+		samplesPostRelabeling += len(wc.writeRequest.Timeseries)
+		sw.updateSeriesAdded(wc)
+		startTime := time.Now()
+		sw.PushData(&wc.writeRequest)
+		pushDataDuration.UpdateDuration(startTime)
+		wc.resetNoRows()
 		return nil
 	})
 	scrapedSamples.Update(float64(samplesScraped))
@@ -352,8 +350,6 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
 		}
 		scrapesFailed.Inc()
 	}
-	samplesPostRelabeling += len(wc.writeRequest.Timeseries)
-	sw.updateSeriesAdded(wc)
 	seriesAdded := sw.finalizeSeriesAdded(samplesPostRelabeling)
 	sw.addAutoTimeseries(wc, "up", float64(up), scrapeTimestamp)
 	sw.addAutoTimeseries(wc, "scrape_duration_seconds", duration, scrapeTimestamp)
diff --git a/lib/uint64set/uint64set.go b/lib/uint64set/uint64set.go
index ecd7b2efa0..fdb38eeeda 100644
--- a/lib/uint64set/uint64set.go
+++ b/lib/uint64set/uint64set.go
@@ -927,9 +927,13 @@ func (b *bucket16) delFromSmallPool(x uint16) bool {
 func (b *bucket16) appendTo(dst []uint64, hi uint32, hi16 uint16) []uint64 {
 	hi64 := uint64(hi)<<32 | uint64(hi16)<<16
 	if b.bits == nil {
+		// Sort a copy of b.smallPool, since b must be readonly in order to prevent from data races
+		// when b.appendTo is called from concurrent goroutines.
+		smallPool := b.smallPool
+
 		// Use uint16Sorter instead of sort.Slice here in order to reduce memory allocations.
 		a := uint16SorterPool.Get().(*uint16Sorter)
-		*a = uint16Sorter(b.smallPool[:b.smallPoolLen])
+		*a = uint16Sorter(smallPool[:b.smallPoolLen])
 		if len(*a) > 1 && !sort.IsSorted(a) {
 			sort.Sort(a)
 		}

From 5407eed2f642ecde14f052cdf0f70ccfac6542ba Mon Sep 17 00:00:00 2001
From: Aliaksandr Valialkin <valyala@gmail.com>
Date: Sat, 7 Nov 2020 13:02:12 +0200
Subject: [PATCH 3/9] lib/promscrape/discovery/kubernetes: reduce memory usage
 for labels when discovering big number of scrape targets by using string
 concatenation instead of fmt.Sprintf

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825
---
 lib/promscrape/discovery/kubernetes/common_types.go | 9 ++++-----
 lib/promscrape/discovery/kubernetes/node.go         | 2 +-
 2 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/lib/promscrape/discovery/kubernetes/common_types.go b/lib/promscrape/discovery/kubernetes/common_types.go
index 039fce6a94..12de389628 100644
--- a/lib/promscrape/discovery/kubernetes/common_types.go
+++ b/lib/promscrape/discovery/kubernetes/common_types.go
@@ -1,7 +1,6 @@
 package kubernetes
 
 import (
-	"fmt"
 	"net/url"
 	"strings"
 
@@ -23,13 +22,13 @@ type ObjectMeta struct {
 func (om *ObjectMeta) registerLabelsAndAnnotations(prefix string, m map[string]string) {
 	for _, lb := range om.Labels {
 		ln := discoveryutils.SanitizeLabelName(lb.Name)
-		m[fmt.Sprintf("%s_label_%s", prefix, ln)] = lb.Value
-		m[fmt.Sprintf("%s_labelpresent_%s", prefix, ln)] = "true"
+		m[prefix + "_label_" + ln] = lb.Value
+		m[prefix + "_labelpresent_" + ln] = "true"
 	}
 	for _, a := range om.Annotations {
 		an := discoveryutils.SanitizeLabelName(a.Name)
-		m[fmt.Sprintf("%s_annotation_%s", prefix, an)] = a.Value
-		m[fmt.Sprintf("%s_annotationpresent_%s", prefix, an)] = "true"
+		m[prefix + "_annotation_" + an] = a.Value
+		m[prefix + "_annotationpresent_" + an] = "true"
 	}
 }
 
diff --git a/lib/promscrape/discovery/kubernetes/node.go b/lib/promscrape/discovery/kubernetes/node.go
index 411b259786..02ba6b2564 100644
--- a/lib/promscrape/discovery/kubernetes/node.go
+++ b/lib/promscrape/discovery/kubernetes/node.go
@@ -95,7 +95,7 @@ func (n *Node) appendTargetLabels(ms []map[string]string) []map[string]string {
 		}
 		addrTypesUsed[a.Type] = true
 		ln := discoveryutils.SanitizeLabelName(a.Type)
-		m[fmt.Sprintf("__meta_kubernetes_node_address_%s", ln)] = a.Address
+		m["__meta_kubernetes_node_address_" + ln] = a.Address
 	}
 	ms = append(ms, m)
 	return ms

From 9e83335ca930cd0573157f8c0ff182fc7d6cfb90 Mon Sep 17 00:00:00 2001
From: Aliaksandr Valialkin <valyala@gmail.com>
Date: Sat, 7 Nov 2020 13:03:44 +0200
Subject: [PATCH 4/9] lib/promscrape/discovery/kubernetes: `go fmt`

---
 lib/promscrape/discovery/kubernetes/common_types.go | 8 ++++----
 lib/promscrape/discovery/kubernetes/node.go         | 2 +-
 2 files changed, 5 insertions(+), 5 deletions(-)

diff --git a/lib/promscrape/discovery/kubernetes/common_types.go b/lib/promscrape/discovery/kubernetes/common_types.go
index 12de389628..d1bc212034 100644
--- a/lib/promscrape/discovery/kubernetes/common_types.go
+++ b/lib/promscrape/discovery/kubernetes/common_types.go
@@ -22,13 +22,13 @@ type ObjectMeta struct {
 func (om *ObjectMeta) registerLabelsAndAnnotations(prefix string, m map[string]string) {
 	for _, lb := range om.Labels {
 		ln := discoveryutils.SanitizeLabelName(lb.Name)
-		m[prefix + "_label_" + ln] = lb.Value
-		m[prefix + "_labelpresent_" + ln] = "true"
+		m[prefix+"_label_"+ln] = lb.Value
+		m[prefix+"_labelpresent_"+ln] = "true"
 	}
 	for _, a := range om.Annotations {
 		an := discoveryutils.SanitizeLabelName(a.Name)
-		m[prefix + "_annotation_" + an] = a.Value
-		m[prefix + "_annotationpresent_" + an] = "true"
+		m[prefix+"_annotation_"+an] = a.Value
+		m[prefix+"_annotationpresent_"+an] = "true"
 	}
 }
 
diff --git a/lib/promscrape/discovery/kubernetes/node.go b/lib/promscrape/discovery/kubernetes/node.go
index 02ba6b2564..9a584c67e3 100644
--- a/lib/promscrape/discovery/kubernetes/node.go
+++ b/lib/promscrape/discovery/kubernetes/node.go
@@ -95,7 +95,7 @@ func (n *Node) appendTargetLabels(ms []map[string]string) []map[string]string {
 		}
 		addrTypesUsed[a.Type] = true
 		ln := discoveryutils.SanitizeLabelName(a.Type)
-		m["__meta_kubernetes_node_address_" + ln] = a.Address
+		m["__meta_kubernetes_node_address_"+ln] = a.Address
 	}
 	ms = append(ms, m)
 	return ms

From 83df20b5b5c409cd85c95f0a35167ffa58131b50 Mon Sep 17 00:00:00 2001
From: Aliaksandr Valialkin <valyala@gmail.com>
Date: Sat, 7 Nov 2020 16:16:56 +0200
Subject: [PATCH 5/9] lib/promscrape: clean references to label name and label
 value strings after applying per-target relabeling

This should reduce memory usage when per-target relabeling creates big number of temporary labels
with long names and/or values.

See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825
---
 CHANGELOG.md                             |  2 ++
 app/vmagent/common/push_ctx.go           |  8 ++------
 app/vmagent/influx/request_handler.go    |  9 +++------
 app/vmagent/remotewrite/pendingseries.go |  7 ++-----
 app/vmagent/remotewrite/relabel.go       |  7 +------
 app/vminsert/relabel/relabel.go          |  7 +------
 lib/promrelabel/relabel.go               | 11 +++++++++++
 lib/promscrape/config.go                 |  5 +++++
 8 files changed, 27 insertions(+), 29 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index a07ab45dbd..61c7500371 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,8 @@
 * FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thouthands) and the majority of these targets (99%)
   are dropped during relabeling. Previously labels for all the dropped targets were displayed at `/api/v1/targets` page. Now only up to `-promscrape.maxDroppedTargets` such
   targets are displayed. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/878 for details.
+* FEATURE: vmagent: reduce memory usage when scraping big number of targets with big number of temporary labels starting with `__`.
+  See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825
 * FEATURE: vmagent: add `/ready` HTTP endpoint, which returns 200 OK status code when all the service discovery has been initialized.
   This may be useful during rolling upgrades. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/875
 
diff --git a/app/vmagent/common/push_ctx.go b/app/vmagent/common/push_ctx.go
index 7d2686dd61..1c4e19d36a 100644
--- a/app/vmagent/common/push_ctx.go
+++ b/app/vmagent/common/push_ctx.go
@@ -5,6 +5,7 @@ import (
 	"sync"
 
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
 )
 
 // PushCtx is a context used for populating WriteRequest.
@@ -28,12 +29,7 @@ func (ctx *PushCtx) Reset() {
 	}
 	ctx.WriteRequest.Timeseries = ctx.WriteRequest.Timeseries[:0]
 
-	labels := ctx.Labels
-	for i := range labels {
-		label := &labels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(ctx.Labels)
 	ctx.Labels = ctx.Labels[:0]
 
 	ctx.Samples = ctx.Samples[:0]
diff --git a/app/vmagent/influx/request_handler.go b/app/vmagent/influx/request_handler.go
index f4447dbae8..b9d1e7c3f3 100644
--- a/app/vmagent/influx/request_handler.go
+++ b/app/vmagent/influx/request_handler.go
@@ -11,6 +11,7 @@ import (
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
 	parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
 	"github.com/VictoriaMetrics/metrics"
@@ -135,12 +136,8 @@ type pushCtx struct {
 func (ctx *pushCtx) reset() {
 	ctx.ctx.Reset()
 
-	commonLabels := ctx.commonLabels
-	for i := range commonLabels {
-		label := &commonLabels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(ctx.commonLabels)
+	ctx.commonLabels = ctx.commonLabels[:0]
 
 	ctx.metricGroupBuf = ctx.metricGroupBuf[:0]
 	ctx.buf = ctx.buf[:0]
diff --git a/app/vmagent/remotewrite/pendingseries.go b/app/vmagent/remotewrite/pendingseries.go
index 7ef569abc2..69f2499891 100644
--- a/app/vmagent/remotewrite/pendingseries.go
+++ b/app/vmagent/remotewrite/pendingseries.go
@@ -11,6 +11,7 @@ import (
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
 	"github.com/VictoriaMetrics/metrics"
 	"github.com/golang/snappy"
 )
@@ -104,11 +105,7 @@ func (wr *writeRequest) reset() {
 	}
 	wr.tss = wr.tss[:0]
 
-	for i := range wr.labels {
-		label := &wr.labels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(wr.labels)
 	wr.labels = wr.labels[:0]
 
 	wr.samples = wr.samples[:0]
diff --git a/app/vmagent/remotewrite/relabel.go b/app/vmagent/remotewrite/relabel.go
index 7ebb4165be..0fe16e6b48 100644
--- a/app/vmagent/remotewrite/relabel.go
+++ b/app/vmagent/remotewrite/relabel.go
@@ -117,12 +117,7 @@ type relabelCtx struct {
 }
 
 func (rctx *relabelCtx) reset() {
-	labels := rctx.labels
-	for i := range labels {
-		label := &labels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(rctx.labels)
 	rctx.labels = rctx.labels[:0]
 }
 
diff --git a/app/vminsert/relabel/relabel.go b/app/vminsert/relabel/relabel.go
index 25274c9ad6..11b9cc774f 100644
--- a/app/vminsert/relabel/relabel.go
+++ b/app/vminsert/relabel/relabel.go
@@ -69,12 +69,7 @@ type Ctx struct {
 
 // Reset resets ctx.
 func (ctx *Ctx) Reset() {
-	labels := ctx.tmpLabels
-	for i := range labels {
-		label := &labels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(ctx.tmpLabels)
 	ctx.tmpLabels = ctx.tmpLabels[:0]
 }
 
diff --git a/lib/promrelabel/relabel.go b/lib/promrelabel/relabel.go
index 887466754b..0a7a026461 100644
--- a/lib/promrelabel/relabel.go
+++ b/lib/promrelabel/relabel.go
@@ -336,3 +336,14 @@ func GetLabelValueByName(labels []prompbmarshal.Label, name string) string {
 	}
 	return label.Value
 }
+
+// CleanLabels sets label.Name and label.Value to an empty string for all the labels.
+//
+// This should help GC cleaning up label.Name and label.Value strings.
+func CleanLabels(labels []prompbmarshal.Label) {
+	for i := range labels {
+		label := &labels[i]
+		label.Name = ""
+		label.Value = ""
+	}
+}
diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go
index d8ca914699..4d9cff2079 100644
--- a/lib/promscrape/config.go
+++ b/lib/promscrape/config.go
@@ -643,6 +643,11 @@ func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, ex
 	promrelabel.SortLabels(originalLabels)
 	labels = promrelabel.ApplyRelabelConfigs(labels, 0, swc.relabelConfigs, false)
 	labels = promrelabel.RemoveMetaLabels(labels[:0], labels)
+	// Remove references to already deleted labels, so GC could clean strings for label name and label value.
+	// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
+	// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
+	promrelabel.CleanLabels(labels[len(labels):cap(labels)])
+
 	if len(labels) == 0 {
 		// Drop target without labels.
 		droppedTargetsMap.Register(originalLabels)

From bb3b513bdd0bc86bf1fd6b9afe0d98fd2b6e0cfe Mon Sep 17 00:00:00 2001
From: Aliaksandr Valialkin <valyala@gmail.com>
Date: Sat, 7 Nov 2020 16:25:53 +0200
Subject: [PATCH 6/9] docs/CHANGELOG.md: `make docs-sync`

---
 docs/CHANGELOG.md | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 71967a61a7..61c7500371 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -7,9 +7,13 @@
 * FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thouthands) and the majority of these targets (99%)
   are dropped during relabeling. Previously labels for all the dropped targets were displayed at `/api/v1/targets` page. Now only up to `-promscrape.maxDroppedTargets` such
   targets are displayed. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/878 for details.
+* FEATURE: vmagent: reduce memory usage when scraping big number of targets with big number of temporary labels starting with `__`.
+  See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825
 * FEATURE: vmagent: add `/ready` HTTP endpoint, which returns 200 OK status code when all the service discovery has been initialized.
   This may be useful during rolling upgrades. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/875
 
+* BUGFIX: vmagent: eliminate data race when `-promscrape.streamParse` command-line is set. Previously this mode could result in scraped metrics with garbage labels.
+  See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247 for details.
 * BUGFIX: properly calculate `topk_*` and `bottomk_*` functions from [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) for time series with gaps.
   See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/883
 

From 4e391a5e396a1780e2056d8c62653f9e3b12b822 Mon Sep 17 00:00:00 2001
From: Artem Navoiev <tenmozes@gmail.com>
Date: Sat, 7 Nov 2020 17:00:23 +0200
Subject: [PATCH 7/9] [deployment] add vmalert + alertmanager to docker compose
 (#885)

---
 deployment/docker/alerts.yml         | 23 +++++++++++++++++++++
 deployment/docker/docker-compose.yml | 30 ++++++++++++++++++++++++++++
 2 files changed, 53 insertions(+)
 create mode 100644 deployment/docker/alerts.yml

diff --git a/deployment/docker/alerts.yml b/deployment/docker/alerts.yml
new file mode 100644
index 0000000000..1d2e3398e8
--- /dev/null
+++ b/deployment/docker/alerts.yml
@@ -0,0 +1,23 @@
+groups:
+  - name: groupGorSingleAlert
+    rules:
+      - alert: VMRows
+        for: 10s
+        expr: vm_rows > 0
+        labels:
+          label: bar
+          host: "{{ $labels.instance }}"
+        annotations:
+          summary: "{{ $value|humanize }}"
+          description: "{{$labels}}"
+  - name: TestGroup
+    rules:
+      - alert: Conns
+        expr: sum(vm_tcplistener_conns) by(instance) > 1
+        for: 5s
+        annotations:
+          summary: "Too high connection number for {{$labels.instance}}"
+          description: "It is {{ $value }} connections for {{$labels.instance}}"
+      - alert: ExampleAlertAlwaysFiring
+        expr: sum by(job)
+          (up == 1)
diff --git a/deployment/docker/docker-compose.yml b/deployment/docker/docker-compose.yml
index 8ccab8569f..fc1e4f1157 100644
--- a/deployment/docker/docker-compose.yml
+++ b/deployment/docker/docker-compose.yml
@@ -52,6 +52,36 @@ services:
     networks:
       - vm_net
     restart: always
+  vmalert:
+    container_name: vmalert
+    image: victoriametrics/vmalert
+    depends_on:
+      - "victoriametrics"
+      - "alertmanager"
+    ports:
+      - 8880:8880
+    volumes:
+      - ./alerts.yml:/etc/alerts/alerts.yml
+    command:
+      - '--datasource.url=http://victoriametrics:8428/'
+      - '--remoteRead.url=http://victoriametrics:8428/'
+      - '--remoteWrite.url=http://victoriametrics:8428/'
+      - '--notifier.url=http://alertmanager:9093/'
+      - '--rule=/etc/alerts/*.yml'
+      # display source of alerts in grafana
+      - '-external.url=http://127.0.0.1:3000' #grafana outside container
+      - '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":"{{$$expr|quotesEscape|pathEscape}}"},{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]' ## when copypaste the line be aware of '$$' for escaping in '$expr'
+    networks:
+      - vm_net
+    restart: always
+  alertmanager:
+    container_name: alertmanager
+    image:  prom/alertmanager
+    ports:
+      - 9093:9093
+    networks:
+      - vm_net
+    restart: always
 volumes:
   vmagentdata: {}
   vmdata: {}

From 41813eb87a32a869ee6f54577f78d58177166c94 Mon Sep 17 00:00:00 2001
From: Aliaksandr Valialkin <valyala@gmail.com>
Date: Sat, 7 Nov 2020 17:52:45 +0200
Subject: [PATCH 8/9] CHANGELOG.md: cut v1.46.0

---
 CHANGELOG.md      | 3 +++
 docs/CHANGELOG.md | 3 +++
 2 files changed, 6 insertions(+)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 61c7500371..73bac70fc1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,9 @@
 
 # tip
 
+
+# [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0)
+
 * FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label/<name>/values` when `start` and `end` args are set.
 * FEATURE: reduce memory usage when query touches big number of time series.
 * FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thouthands) and the majority of these targets (99%)
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 61c7500371..73bac70fc1 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -2,6 +2,9 @@
 
 # tip
 
+
+# [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0)
+
 * FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label/<name>/values` when `start` and `end` args are set.
 * FEATURE: reduce memory usage when query touches big number of time series.
 * FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thouthands) and the majority of these targets (99%)

From ef6ab3d2c9e15ce0fa886afc8c1745ad7bccac3f Mon Sep 17 00:00:00 2001
From: Aliaksandr Valialkin <valyala@gmail.com>
Date: Sun, 8 Nov 2020 13:40:19 +0200
Subject: [PATCH 9/9] docs/Single-server-VictoriaMetrics.md: typo fix

---
 README.md                             | 2 +-
 docs/Single-server-VictoriaMetrics.md | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index c110a6d908..3bd12a6765 100644
--- a/README.md
+++ b/README.md
@@ -772,7 +772,7 @@ Time series data can be imported via any supported ingestion protocol:
 * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
 * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
 * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
-  See [these docs](##how-to-import-data-in-json-line-format) for details.
+  See [these docs](#how-to-import-data-in-json-line-format) for details.
 * `/api/v1/import/native` for importing data obtained from [/api/v1/export/native](#how-to-export-data-in-native-format).
   See [these docs](#how-to-import-data-in-native-format) for details.
 * `/api/v1/import/csv` for importing arbitrary CSV data. See [these docs](#how-to-import-csv-data) for details.
diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md
index c110a6d908..3bd12a6765 100644
--- a/docs/Single-server-VictoriaMetrics.md
+++ b/docs/Single-server-VictoriaMetrics.md
@@ -772,7 +772,7 @@ Time series data can be imported via any supported ingestion protocol:
 * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
 * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
 * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
-  See [these docs](##how-to-import-data-in-json-line-format) for details.
+  See [these docs](#how-to-import-data-in-json-line-format) for details.
 * `/api/v1/import/native` for importing data obtained from [/api/v1/export/native](#how-to-export-data-in-native-format).
   See [these docs](#how-to-import-data-in-native-format) for details.
 * `/api/v1/import/csv` for importing arbitrary CSV data. See [these docs](#how-to-import-csv-data) for details.