diff --git a/CHANGELOG.md b/CHANGELOG.md
index d445028465..73bac70fc1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,13 +1,22 @@
+# CHANGELOG
+
 # tip
 
+
+# [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0)
+
 * FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label/<name>/values` when `start` and `end` args are set.
 * FEATURE: reduce memory usage when query touches big number of time series.
 * FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thouthands) and the majority of these targets (99%)
   are dropped during relabeling. Previously labels for all the dropped targets were displayed at `/api/v1/targets` page. Now only up to `-promscrape.maxDroppedTargets` such
   targets are displayed. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/878 for details.
+* FEATURE: vmagent: reduce memory usage when scraping big number of targets with big number of temporary labels starting with `__`.
+  See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825
 * FEATURE: vmagent: add `/ready` HTTP endpoint, which returns 200 OK status code when all the service discovery has been initialized.
   This may be useful during rolling upgrades. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/875
 
+* BUGFIX: vmagent: eliminate data race when `-promscrape.streamParse` command-line is set. Previously this mode could result in scraped metrics with garbage labels.
+  See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247 for details.
 * BUGFIX: properly calculate `topk_*` and `bottomk_*` functions from [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) for time series with gaps.
   See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/883
 
diff --git a/README.md b/README.md
index c110a6d908..3bd12a6765 100644
--- a/README.md
+++ b/README.md
@@ -772,7 +772,7 @@ Time series data can be imported via any supported ingestion protocol:
 * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
 * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
 * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
-  See [these docs](##how-to-import-data-in-json-line-format) for details.
+  See [these docs](#how-to-import-data-in-json-line-format) for details.
 * `/api/v1/import/native` for importing data obtained from [/api/v1/export/native](#how-to-export-data-in-native-format).
   See [these docs](#how-to-import-data-in-native-format) for details.
 * `/api/v1/import/csv` for importing arbitrary CSV data. See [these docs](#how-to-import-csv-data) for details.
diff --git a/app/vmagent/common/push_ctx.go b/app/vmagent/common/push_ctx.go
index 7d2686dd61..1c4e19d36a 100644
--- a/app/vmagent/common/push_ctx.go
+++ b/app/vmagent/common/push_ctx.go
@@ -5,6 +5,7 @@ import (
 	"sync"
 
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
 )
 
 // PushCtx is a context used for populating WriteRequest.
@@ -28,12 +29,7 @@ func (ctx *PushCtx) Reset() {
 	}
 	ctx.WriteRequest.Timeseries = ctx.WriteRequest.Timeseries[:0]
 
-	labels := ctx.Labels
-	for i := range labels {
-		label := &labels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(ctx.Labels)
 	ctx.Labels = ctx.Labels[:0]
 
 	ctx.Samples = ctx.Samples[:0]
diff --git a/app/vmagent/influx/request_handler.go b/app/vmagent/influx/request_handler.go
index f4447dbae8..b9d1e7c3f3 100644
--- a/app/vmagent/influx/request_handler.go
+++ b/app/vmagent/influx/request_handler.go
@@ -11,6 +11,7 @@ import (
 	"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
 	parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
 	"github.com/VictoriaMetrics/metrics"
@@ -135,12 +136,8 @@ type pushCtx struct {
 func (ctx *pushCtx) reset() {
 	ctx.ctx.Reset()
 
-	commonLabels := ctx.commonLabels
-	for i := range commonLabels {
-		label := &commonLabels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(ctx.commonLabels)
+	ctx.commonLabels = ctx.commonLabels[:0]
 
 	ctx.metricGroupBuf = ctx.metricGroupBuf[:0]
 	ctx.buf = ctx.buf[:0]
diff --git a/app/vmagent/remotewrite/pendingseries.go b/app/vmagent/remotewrite/pendingseries.go
index 7ef569abc2..69f2499891 100644
--- a/app/vmagent/remotewrite/pendingseries.go
+++ b/app/vmagent/remotewrite/pendingseries.go
@@ -11,6 +11,7 @@ import (
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
 	"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
+	"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
 	"github.com/VictoriaMetrics/metrics"
 	"github.com/golang/snappy"
 )
@@ -104,11 +105,7 @@ func (wr *writeRequest) reset() {
 	}
 	wr.tss = wr.tss[:0]
 
-	for i := range wr.labels {
-		label := &wr.labels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(wr.labels)
 	wr.labels = wr.labels[:0]
 
 	wr.samples = wr.samples[:0]
diff --git a/app/vmagent/remotewrite/relabel.go b/app/vmagent/remotewrite/relabel.go
index 7ebb4165be..0fe16e6b48 100644
--- a/app/vmagent/remotewrite/relabel.go
+++ b/app/vmagent/remotewrite/relabel.go
@@ -117,12 +117,7 @@ type relabelCtx struct {
 }
 
 func (rctx *relabelCtx) reset() {
-	labels := rctx.labels
-	for i := range labels {
-		label := &labels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(rctx.labels)
 	rctx.labels = rctx.labels[:0]
 }
 
diff --git a/app/vminsert/relabel/relabel.go b/app/vminsert/relabel/relabel.go
index 25274c9ad6..11b9cc774f 100644
--- a/app/vminsert/relabel/relabel.go
+++ b/app/vminsert/relabel/relabel.go
@@ -69,12 +69,7 @@ type Ctx struct {
 
 // Reset resets ctx.
 func (ctx *Ctx) Reset() {
-	labels := ctx.tmpLabels
-	for i := range labels {
-		label := &labels[i]
-		label.Name = ""
-		label.Value = ""
-	}
+	promrelabel.CleanLabels(ctx.tmpLabels)
 	ctx.tmpLabels = ctx.tmpLabels[:0]
 }
 
diff --git a/deployment/docker/alerts.yml b/deployment/docker/alerts.yml
new file mode 100644
index 0000000000..1d2e3398e8
--- /dev/null
+++ b/deployment/docker/alerts.yml
@@ -0,0 +1,23 @@
+groups:
+  - name: groupGorSingleAlert
+    rules:
+      - alert: VMRows
+        for: 10s
+        expr: vm_rows > 0
+        labels:
+          label: bar
+          host: "{{ $labels.instance }}"
+        annotations:
+          summary: "{{ $value|humanize }}"
+          description: "{{$labels}}"
+  - name: TestGroup
+    rules:
+      - alert: Conns
+        expr: sum(vm_tcplistener_conns) by(instance) > 1
+        for: 5s
+        annotations:
+          summary: "Too high connection number for {{$labels.instance}}"
+          description: "It is {{ $value }} connections for {{$labels.instance}}"
+      - alert: ExampleAlertAlwaysFiring
+        expr: sum by(job)
+          (up == 1)
diff --git a/deployment/docker/docker-compose.yml b/deployment/docker/docker-compose.yml
index 8ccab8569f..fc1e4f1157 100644
--- a/deployment/docker/docker-compose.yml
+++ b/deployment/docker/docker-compose.yml
@@ -52,6 +52,36 @@ services:
     networks:
       - vm_net
     restart: always
+  vmalert:
+    container_name: vmalert
+    image: victoriametrics/vmalert
+    depends_on:
+      - "victoriametrics"
+      - "alertmanager"
+    ports:
+      - 8880:8880
+    volumes:
+      - ./alerts.yml:/etc/alerts/alerts.yml
+    command:
+      - '--datasource.url=http://victoriametrics:8428/'
+      - '--remoteRead.url=http://victoriametrics:8428/'
+      - '--remoteWrite.url=http://victoriametrics:8428/'
+      - '--notifier.url=http://alertmanager:9093/'
+      - '--rule=/etc/alerts/*.yml'
+      # display source of alerts in grafana
+      - '-external.url=http://127.0.0.1:3000' #grafana outside container
+      - '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":"{{$$expr|quotesEscape|pathEscape}}"},{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]' ## when copypaste the line be aware of '$$' for escaping in '$expr'
+    networks:
+      - vm_net
+    restart: always
+  alertmanager:
+    container_name: alertmanager
+    image:  prom/alertmanager
+    ports:
+      - 9093:9093
+    networks:
+      - vm_net
+    restart: always
 volumes:
   vmagentdata: {}
   vmdata: {}
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index d445028465..73bac70fc1 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -1,13 +1,22 @@
+# CHANGELOG
+
 # tip
 
+
+# [v1.46.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.46.0)
+
 * FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label/<name>/values` when `start` and `end` args are set.
 * FEATURE: reduce memory usage when query touches big number of time series.
 * FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thouthands) and the majority of these targets (99%)
   are dropped during relabeling. Previously labels for all the dropped targets were displayed at `/api/v1/targets` page. Now only up to `-promscrape.maxDroppedTargets` such
   targets are displayed. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/878 for details.
+* FEATURE: vmagent: reduce memory usage when scraping big number of targets with big number of temporary labels starting with `__`.
+  See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825
 * FEATURE: vmagent: add `/ready` HTTP endpoint, which returns 200 OK status code when all the service discovery has been initialized.
   This may be useful during rolling upgrades. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/875
 
+* BUGFIX: vmagent: eliminate data race when `-promscrape.streamParse` command-line is set. Previously this mode could result in scraped metrics with garbage labels.
+  See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247 for details.
 * BUGFIX: properly calculate `topk_*` and `bottomk_*` functions from [MetricsQL](https://victoriametrics.github.io/MetricsQL.html) for time series with gaps.
   See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/883
 
diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md
index c110a6d908..3bd12a6765 100644
--- a/docs/Single-server-VictoriaMetrics.md
+++ b/docs/Single-server-VictoriaMetrics.md
@@ -772,7 +772,7 @@ Time series data can be imported via any supported ingestion protocol:
 * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details.
 * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details.
 * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format).
-  See [these docs](##how-to-import-data-in-json-line-format) for details.
+  See [these docs](#how-to-import-data-in-json-line-format) for details.
 * `/api/v1/import/native` for importing data obtained from [/api/v1/export/native](#how-to-export-data-in-native-format).
   See [these docs](#how-to-import-data-in-native-format) for details.
 * `/api/v1/import/csv` for importing arbitrary CSV data. See [these docs](#how-to-import-csv-data) for details.
diff --git a/lib/promrelabel/relabel.go b/lib/promrelabel/relabel.go
index 887466754b..0a7a026461 100644
--- a/lib/promrelabel/relabel.go
+++ b/lib/promrelabel/relabel.go
@@ -336,3 +336,14 @@ func GetLabelValueByName(labels []prompbmarshal.Label, name string) string {
 	}
 	return label.Value
 }
+
+// CleanLabels sets label.Name and label.Value to an empty string for all the labels.
+//
+// This should help GC cleaning up label.Name and label.Value strings.
+func CleanLabels(labels []prompbmarshal.Label) {
+	for i := range labels {
+		label := &labels[i]
+		label.Name = ""
+		label.Value = ""
+	}
+}
diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go
index d8ca914699..4d9cff2079 100644
--- a/lib/promscrape/config.go
+++ b/lib/promscrape/config.go
@@ -643,6 +643,11 @@ func appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig, target string, ex
 	promrelabel.SortLabels(originalLabels)
 	labels = promrelabel.ApplyRelabelConfigs(labels, 0, swc.relabelConfigs, false)
 	labels = promrelabel.RemoveMetaLabels(labels[:0], labels)
+	// Remove references to already deleted labels, so GC could clean strings for label name and label value.
+	// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
+	// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
+	promrelabel.CleanLabels(labels[len(labels):cap(labels)])
+
 	if len(labels) == 0 {
 		// Drop target without labels.
 		droppedTargetsMap.Register(originalLabels)
diff --git a/lib/promscrape/discovery/kubernetes/common_types.go b/lib/promscrape/discovery/kubernetes/common_types.go
index 039fce6a94..d1bc212034 100644
--- a/lib/promscrape/discovery/kubernetes/common_types.go
+++ b/lib/promscrape/discovery/kubernetes/common_types.go
@@ -1,7 +1,6 @@
 package kubernetes
 
 import (
-	"fmt"
 	"net/url"
 	"strings"
 
@@ -23,13 +22,13 @@ type ObjectMeta struct {
 func (om *ObjectMeta) registerLabelsAndAnnotations(prefix string, m map[string]string) {
 	for _, lb := range om.Labels {
 		ln := discoveryutils.SanitizeLabelName(lb.Name)
-		m[fmt.Sprintf("%s_label_%s", prefix, ln)] = lb.Value
-		m[fmt.Sprintf("%s_labelpresent_%s", prefix, ln)] = "true"
+		m[prefix+"_label_"+ln] = lb.Value
+		m[prefix+"_labelpresent_"+ln] = "true"
 	}
 	for _, a := range om.Annotations {
 		an := discoveryutils.SanitizeLabelName(a.Name)
-		m[fmt.Sprintf("%s_annotation_%s", prefix, an)] = a.Value
-		m[fmt.Sprintf("%s_annotationpresent_%s", prefix, an)] = "true"
+		m[prefix+"_annotation_"+an] = a.Value
+		m[prefix+"_annotationpresent_"+an] = "true"
 	}
 }
 
diff --git a/lib/promscrape/discovery/kubernetes/node.go b/lib/promscrape/discovery/kubernetes/node.go
index 411b259786..9a584c67e3 100644
--- a/lib/promscrape/discovery/kubernetes/node.go
+++ b/lib/promscrape/discovery/kubernetes/node.go
@@ -95,7 +95,7 @@ func (n *Node) appendTargetLabels(ms []map[string]string) []map[string]string {
 		}
 		addrTypesUsed[a.Type] = true
 		ln := discoveryutils.SanitizeLabelName(a.Type)
-		m[fmt.Sprintf("__meta_kubernetes_node_address_%s", ln)] = a.Address
+		m["__meta_kubernetes_node_address_"+ln] = a.Address
 	}
 	ms = append(ms, m)
 	return ms
diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go
index 160c3b7f3f..30b0dea597 100644
--- a/lib/promscrape/scrapework.go
+++ b/lib/promscrape/scrapework.go
@@ -325,18 +325,16 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
 		samplesScraped += len(rows)
 		for i := range rows {
 			sw.addRowToTimeseries(wc, &rows[i], scrapeTimestamp, true)
-			if len(wc.labels) > 40000 {
-				// Limit the maximum size of wc.writeRequest.
-				// This should reduce memory usage when scraping targets with millions of metrics and/or labels.
-				// For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/
-				samplesPostRelabeling += len(wc.writeRequest.Timeseries)
-				sw.updateSeriesAdded(wc)
-				startTime := time.Now()
-				sw.PushData(&wc.writeRequest)
-				pushDataDuration.UpdateDuration(startTime)
-				wc.resetNoRows()
-			}
 		}
+		// Push the collected rows to sw before returning from the callback, since they cannot be held
+		// after returning from the callback - this will result in data race.
+		// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825#issuecomment-723198247
+		samplesPostRelabeling += len(wc.writeRequest.Timeseries)
+		sw.updateSeriesAdded(wc)
+		startTime := time.Now()
+		sw.PushData(&wc.writeRequest)
+		pushDataDuration.UpdateDuration(startTime)
+		wc.resetNoRows()
 		return nil
 	})
 	scrapedSamples.Update(float64(samplesScraped))
@@ -352,8 +350,6 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
 		}
 		scrapesFailed.Inc()
 	}
-	samplesPostRelabeling += len(wc.writeRequest.Timeseries)
-	sw.updateSeriesAdded(wc)
 	seriesAdded := sw.finalizeSeriesAdded(samplesPostRelabeling)
 	sw.addAutoTimeseries(wc, "up", float64(up), scrapeTimestamp)
 	sw.addAutoTimeseries(wc, "scrape_duration_seconds", duration, scrapeTimestamp)
diff --git a/lib/uint64set/uint64set.go b/lib/uint64set/uint64set.go
index ecd7b2efa0..fdb38eeeda 100644
--- a/lib/uint64set/uint64set.go
+++ b/lib/uint64set/uint64set.go
@@ -927,9 +927,13 @@ func (b *bucket16) delFromSmallPool(x uint16) bool {
 func (b *bucket16) appendTo(dst []uint64, hi uint32, hi16 uint16) []uint64 {
 	hi64 := uint64(hi)<<32 | uint64(hi16)<<16
 	if b.bits == nil {
+		// Sort a copy of b.smallPool, since b must be readonly in order to prevent from data races
+		// when b.appendTo is called from concurrent goroutines.
+		smallPool := b.smallPool
+
 		// Use uint16Sorter instead of sort.Slice here in order to reduce memory allocations.
 		a := uint16SorterPool.Get().(*uint16Sorter)
-		*a = uint16Sorter(b.smallPool[:b.smallPoolLen])
+		*a = uint16Sorter(smallPool[:b.smallPoolLen])
 		if len(*a) > 1 && !sort.IsSorted(a) {
 			sort.Sort(a)
 		}