From 2e2e4f7e21a426fb42b20cc2932e61f533e9515d Mon Sep 17 00:00:00 2001 From: Roman Khavronenko Date: Sat, 9 Jan 2021 23:56:11 +0000 Subject: [PATCH 1/4] vmalert-989: return non-empty result in template func `query` stub to pass validation (#1002) On templates validation stage vmalert does not acutally send queries, so for complex chained expression validation may fail. To avoid this, we add a blank sample in response so validation can pass successfully. Later, during the rule execution, stub will be replaced with real `query` function. https://github.com/VictoriaMetrics/VictoriaMetrics/issues/989 --- app/vmalert/config/testdata/rules2-good.rules | 1 + app/vmalert/notifier/template_func.go | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/app/vmalert/config/testdata/rules2-good.rules b/app/vmalert/config/testdata/rules2-good.rules index 6c9141e90..00e4cb8ee 100644 --- a/app/vmalert/config/testdata/rules2-good.rules +++ b/app/vmalert/config/testdata/rules2-good.rules @@ -17,6 +17,7 @@ groups: (up == 1) labels: job: '{{ $labels.job }}' + dynamic: '{{ $x := query "up" | first | value }}{{ if eq 1.0 $x }}one{{ else }}unknown{{ end }}' annotations: description: Job {{ $labels.job }} is up! summary: All instances up {{ range query "up" }} diff --git a/app/vmalert/notifier/template_func.go b/app/vmalert/notifier/template_func.go index 043339b87..8751abcc7 100644 --- a/app/vmalert/notifier/template_func.go +++ b/app/vmalert/notifier/template_func.go @@ -178,7 +178,9 @@ func InitTemplateFunc(externalURL *url.URL) { // it is present here only for validation purposes, when there is no // provided datasource. "query": func(q string) ([]datasource.Metric, error) { - return nil, nil + // return non-empty slice to pass validation with chained functions in template + // see issue #989 for details + return []datasource.Metric{{}}, nil }, "first": func(metrics []datasource.Metric) (datasource.Metric, error) { if len(metrics) > 0 { From 6740294ebb7625c8f2342ec71c88c4b3914834d5 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 11 Jan 2021 12:17:44 +0200 Subject: [PATCH 2/4] vendor: update github.com/VictoriaMetrics/fasthttp --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go | 5 ++++- vendor/modules.txt | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index 04862cc14..36ad3774b 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( // Do not use the original github.com/valyala/fasthttp because of issues // like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b - github.com/VictoriaMetrics/fasthttp v1.0.9 + github.com/VictoriaMetrics/fasthttp v1.0.10 github.com/VictoriaMetrics/metrics v1.12.3 github.com/VictoriaMetrics/metricsql v0.9.1 github.com/aws/aws-sdk-go v1.36.23 diff --git a/go.sum b/go.sum index 1961d66ca..fe7689199 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= -github.com/VictoriaMetrics/fasthttp v1.0.9 h1:Fja1tfcNMNoUD7RJDYpjGx2CsSfXkUbISKY4kNafdN4= -github.com/VictoriaMetrics/fasthttp v1.0.9/go.mod h1:3SeUL4zwB/p/a9aEeRc6gdlbrtNHXBJR6N376EgiSHU= +github.com/VictoriaMetrics/fasthttp v1.0.10 h1:1UbdmWK59j7znylu55r0y66/zTaHbw+Xk+ObSGVywmE= +github.com/VictoriaMetrics/fasthttp v1.0.10/go.mod h1:3SeUL4zwB/p/a9aEeRc6gdlbrtNHXBJR6N376EgiSHU= github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= github.com/VictoriaMetrics/metrics v1.12.3 h1:Fe6JHC6MSEKa+BtLhPN8WIvS+HKPzMc2evEpNeCGy7I= github.com/VictoriaMetrics/metrics v1.12.3/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go b/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go index e31fd7585..8554306a9 100644 --- a/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go +++ b/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go @@ -190,6 +190,9 @@ func (d *tcpDialer) NewDial(timeout time.Duration) DialFunc { if err == ErrDialTimeout { return nil, err } + if err, ok := err.(net.Error); ok && err.Timeout() { + return nil, err + } idx++ n-- } @@ -232,7 +235,7 @@ func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyC ch := chv.(chan dialResult) go func() { var dr dialResult - dr.conn, dr.err = net.DialTCP(network, nil, addr) + dr.conn, dr.err = net.DialTimeout(network, addr.String(), timeout) ch <- dr <-concurrencyCh }() diff --git a/vendor/modules.txt b/vendor/modules.txt index c07ffa30f..43fb09030 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -10,7 +10,7 @@ cloud.google.com/go/internal/version cloud.google.com/go/storage # github.com/VictoriaMetrics/fastcache v1.5.7 github.com/VictoriaMetrics/fastcache -# github.com/VictoriaMetrics/fasthttp v1.0.9 +# github.com/VictoriaMetrics/fasthttp v1.0.10 github.com/VictoriaMetrics/fasthttp github.com/VictoriaMetrics/fasthttp/fasthttputil github.com/VictoriaMetrics/fasthttp/stackless From 24ffad74c10e885aa499a2b305585072f41e52cd Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 11 Jan 2021 12:50:10 +0200 Subject: [PATCH 3/4] all: use `net.Dial` instead of `fasthttp.Dial`, because `fasthttp.Dial` limits the number of concurrent dials to 1000 --- app/vmagent/remotewrite/statconn.go | 9 ++++----- docs/CHANGELOG.md | 2 ++ go.mod | 2 +- go.sum | 4 ++-- lib/proxy/proxy.go | 7 +++++-- vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go | 5 +---- vendor/modules.txt | 2 +- 7 files changed, 16 insertions(+), 15 deletions(-) diff --git a/app/vmagent/remotewrite/statconn.go b/app/vmagent/remotewrite/statconn.go index 93cbf0a15..3c227dc19 100644 --- a/app/vmagent/remotewrite/statconn.go +++ b/app/vmagent/remotewrite/statconn.go @@ -5,9 +5,9 @@ import ( "net" "strings" "sync/atomic" + "time" "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil" - "github.com/VictoriaMetrics/fasthttp" "github.com/VictoriaMetrics/metrics" ) @@ -15,11 +15,10 @@ func statDial(network, addr string) (conn net.Conn, err error) { if !strings.HasPrefix(network, "tcp") { return nil, fmt.Errorf("unexpected network passed to statDial: %q; it must start from `tcp`", network) } - if netutil.TCP6Enabled() { - conn, err = fasthttp.DialDualStack(addr) - } else { - conn, err = fasthttp.Dial(addr) + if !netutil.TCP6Enabled() { + network = "tcp4" } + conn, err = net.DialTimeout(network, addr, 5*time.Second) dialsTotal.Inc() if err != nil { dialErrors.Inc() diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 4ef7a1d49..11cb74908 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -2,6 +2,8 @@ # tip +* BUGFIX: vmagent: prevent from `dialing to the given TCP address time out` error when scraping big number of unavailable targets. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/987 + * FEATURE: disable final merge for data for the previous month at the beginning of new month, since it may result in high disk IO and CPU usage. Final merge can be enabled by setting `-finalMergeDelay` command-line flag to positive duration. diff --git a/go.mod b/go.mod index 36ad3774b..6a1f20561 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( // Do not use the original github.com/valyala/fasthttp because of issues // like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b - github.com/VictoriaMetrics/fasthttp v1.0.10 + github.com/VictoriaMetrics/fasthttp v1.0.11 github.com/VictoriaMetrics/metrics v1.12.3 github.com/VictoriaMetrics/metricsql v0.9.1 github.com/aws/aws-sdk-go v1.36.23 diff --git a/go.sum b/go.sum index fe7689199..1d48c9e86 100644 --- a/go.sum +++ b/go.sum @@ -41,8 +41,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= -github.com/VictoriaMetrics/fasthttp v1.0.10 h1:1UbdmWK59j7znylu55r0y66/zTaHbw+Xk+ObSGVywmE= -github.com/VictoriaMetrics/fasthttp v1.0.10/go.mod h1:3SeUL4zwB/p/a9aEeRc6gdlbrtNHXBJR6N376EgiSHU= +github.com/VictoriaMetrics/fasthttp v1.0.11 h1:6XOvE1pF/EhW8qoi7V5qJQJ2rhNV+UGrb1/a9vMbTiw= +github.com/VictoriaMetrics/fasthttp v1.0.11/go.mod h1:3SeUL4zwB/p/a9aEeRc6gdlbrtNHXBJR6N376EgiSHU= github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= github.com/VictoriaMetrics/metrics v1.12.3 h1:Fe6JHC6MSEKa+BtLhPN8WIvS+HKPzMc2evEpNeCGy7I= github.com/VictoriaMetrics/metrics v1.12.3/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE= diff --git a/lib/proxy/proxy.go b/lib/proxy/proxy.go index 82cb7b46a..bb3c339ef 100644 --- a/lib/proxy/proxy.go +++ b/lib/proxy/proxy.go @@ -7,6 +7,7 @@ import ( "fmt" "net" "net/url" + "time" "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil" "github.com/VictoriaMetrics/fasthttp" @@ -80,10 +81,12 @@ func (u *URL) NewDialFunc(tlsConfig *tls.Config) (fasthttp.DialFunc, error) { } func defaultDialFunc(addr string) (net.Conn, error) { + network := "tcp4" if netutil.TCP6Enabled() { - return fasthttp.DialDualStack(addr) + network = "tcp" } - return fasthttp.Dial(addr) + // Do not use fasthttp.Dial because of https://github.com/VictoriaMetrics/VictoriaMetrics/issues/987 + return net.DialTimeout(network, addr, 5*time.Second) } // sendConnectRequest sends CONNECT request to proxyConn for the given addr and authHeader and returns the established connection to dstAddr. diff --git a/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go b/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go index 8554306a9..e31fd7585 100644 --- a/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go +++ b/vendor/github.com/VictoriaMetrics/fasthttp/tcpdialer.go @@ -190,9 +190,6 @@ func (d *tcpDialer) NewDial(timeout time.Duration) DialFunc { if err == ErrDialTimeout { return nil, err } - if err, ok := err.(net.Error); ok && err.Timeout() { - return nil, err - } idx++ n-- } @@ -235,7 +232,7 @@ func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyC ch := chv.(chan dialResult) go func() { var dr dialResult - dr.conn, dr.err = net.DialTimeout(network, addr.String(), timeout) + dr.conn, dr.err = net.DialTCP(network, nil, addr) ch <- dr <-concurrencyCh }() diff --git a/vendor/modules.txt b/vendor/modules.txt index 43fb09030..4c80dec6f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -10,7 +10,7 @@ cloud.google.com/go/internal/version cloud.google.com/go/storage # github.com/VictoriaMetrics/fastcache v1.5.7 github.com/VictoriaMetrics/fastcache -# github.com/VictoriaMetrics/fasthttp v1.0.10 +# github.com/VictoriaMetrics/fasthttp v1.0.11 github.com/VictoriaMetrics/fasthttp github.com/VictoriaMetrics/fasthttp/fasthttputil github.com/VictoriaMetrics/fasthttp/stackless From 14f0f905071fb3e5e67088931942dc7efc8ff1ab Mon Sep 17 00:00:00 2001 From: Roman Khavronenko Date: Mon, 11 Jan 2021 11:03:15 +0000 Subject: [PATCH 4/4] docker-compose: provide the example list of alerting rules for vm components (#1005) List contains examples for the alerting rules which might be executed via `vmalert` to track the health state of VM components. It is assumed that list will be revised and calibrated for each system individually. --- README.md | 1 + deployment/docker/alerts.yml | 185 ++++++++++++++++++++++++++++--- deployment/docker/prometheus.yml | 2 +- 3 files changed, 170 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 67db1496c..b0df5c4e6 100644 --- a/README.md +++ b/README.md @@ -1279,6 +1279,7 @@ The most interesting metrics are: VictoriaMetrics also exposes currently running queries with their execution times at `/api/v1/status/active_queries` page. +See the example of alerting rules for VM components [here](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml). ## Troubleshooting diff --git a/deployment/docker/alerts.yml b/deployment/docker/alerts.yml index 1d2e3398e..77f72854b 100644 --- a/deployment/docker/alerts.yml +++ b/deployment/docker/alerts.yml @@ -1,23 +1,174 @@ +# File contains default list of alerts for vm-single and vmagent services. +# The alerts below are just recommendations and may require some updates +# and threshold calibration according to every specific setup. groups: - - name: groupGorSingleAlert + - name: serviceHealth rules: - - alert: VMRows - for: 10s - expr: vm_rows > 0 + # note the `job` filter and update accordingly to your setup + - alert: TooManyRestarts + expr: changes(process_start_time_seconds{job=~"victoriametrics|vmagent|vmalert"}[15m]) > 2 labels: - label: bar - host: "{{ $labels.instance }}" + severity: critical annotations: - summary: "{{ $value|humanize }}" - description: "{{$labels}}" - - name: TestGroup + summary: "{{ $labels.job }} too many restarts (instance {{ $labels.instance }})" + description: "Job {{ $labels.job }} has restarted more than twice in the last 15 minutes. + It might be crashlooping." + + # Alerts group for VM single assumes that Grafana dashboard + # https://grafana.com/grafana/dashboards/10229 is installed. + # Pls update the `dashboard` annotation according to your setup. + - name: vmsingle + interval: 30s + concurrency: 2 rules: - - alert: Conns - expr: sum(vm_tcplistener_conns) by(instance) > 1 - for: 5s + - alert: DiskRunsOutOfSpaceIn3Days + expr: | + vm_free_disk_space_bytes / ignoring(path) ( + ( + sum(rate(vm_rows_added_to_storage_total[1d])) - + sum(rate(vm_deduplicated_samples_total[1d])) without(type) + ) + * + ( + sum(vm_data_size_bytes{type!="indexdb"}) / + sum(vm_rows{type!="indexdb"}) + ) + ) < 3 * 24 * 3600 + for: 30m + labels: + severity: critical annotations: - summary: "Too high connection number for {{$labels.instance}}" - description: "It is {{ $value }} connections for {{$labels.instance}}" - - alert: ExampleAlertAlwaysFiring - expr: sum by(job) - (up == 1) + dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=73&var-instance={{ $labels.instance }}" + summary: "Instance {{ $labels.instance }} will run out of disk space soon" + description: "Taking into account current ingestion rate, free disk space will be enough only + for {{ $value | humanizeDuration }} on instance {{ $labels.instance }}.\n + Consider to limit the ingestion rate, decrease retention or scale the disk space if possible." + + - alert: RequestErrorsToAPI + expr: increase(vm_http_request_errors_total[5m]) > 0 + for: 15m + labels: + severity: warning + annotations: + dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=35&var-instance={{ $labels.instance }}" + summary: "Too many errors served for path {{ $labels.path }} (instance {{ $labels.instance }})" + description: "Requests to path {{ $labels.path }} are receiving errors. + Please verify if clients are sending correct requests." + + - alert: ConcurrentFlushesHitTheLimit + expr: vm_concurrent_addrows_current >= vm_concurrent_addrows_capacity + for: 15m + labels: + severity: warning + annotations: + dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=59&var-instance={{ $labels.instance }}" + summary: "VictoriMetrics on instance {{ $labels.instance }} is constantly hitting concurrent flushes limit" + description: "The limit of concurrent flushes on instance {{ $labels.instance }} is equal to number of CPUs.\n + When VictoriaMetrics constantly hits the limit it means that storage is overloaded and requires more CPU." + + - alert: TooManyLogs + expr: sum(increase(vm_log_messages_total{level!="info"}[5m])) by (job, instance) > 0 + for: 15m + labels: + severity: warning + annotations: + dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=67&var-instance={{ $labels.instance }}" + summary: "Too many logs printed for job \"{{ $labels.job }}\" ({{ $labels.instance }})" + description: "Logging rate for job \"{{ $labels.job }}\" ({{ $labels.instance }}) is {{ $value }} for last 15m.\n + Worth to check logs for specific error messages." + + - alert: RowsRejectedOnIngestion + expr: sum(rate(vm_rows_ignored_total[5m])) by (instance, reason) > 0 + for: 15m + labels: + severity: warning + annotations: + dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=58&var-instance={{ $labels.instance }}" + summary: "Some rows are rejected on \"{{ $labels.instance }}\" on ingestion attempt" + description: "VM is rejecting to ingest rows on \"{{ $labels.instance }}\" due to the + following reason: \"{{ $labels.reason }}\"" + + - alert: TooHighChurnRate + expr: | + ( + sum(rate(vm_new_timeseries_created_total[5m])) by(instance) + / + sum(rate(vm_rows_inserted_total[5m])) by (instance) + ) > 0.1 + for: 15m + labels: + severity: warning + annotations: + dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=66&var-instance={{ $labels.instance }}" + summary: "Churn rate is more than 10% on \"{{ $labels.instance }}\" for the last 15m" + description: "VM constantly creates new time series on \"{{ $labels.instance }}\".\n + This effect is known as Churn Rate.\n + High Churn Rate tightly connected with database performance and may + result in unexpected OOM's or slow queries." + + - alert: TooHighSlowInsertsRate + expr: | + ( + sum(rate(vm_slow_row_inserts_total[5m])) by(instance) + / + sum(rate(vm_rows_inserted_total[5m])) by (instance) + ) > 0.5 + for: 15m + labels: + severity: warning + annotations: + dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=68&var-instance={{ $labels.instance }}" + summary: "Percentage of slow inserts is more than 50% on \"{{ $labels.instance }}\" for the last 15m" + description: "High rate of slow inserts on \"{{ $labels.instance }}\" may be a sign of resource exhaustion + for the current load. It is likely more RAM is needed for optimal handling of the current number of active time series." + + # Alerts group for vmagent assumes that Grafana dashboard + # https://grafana.com/grafana/dashboards/12683 is installed. + # Pls update the `dashboard` annotation according to your setup. + - name: vmagent + interval: 30s + concurrency: 2 + rules: + - alert: PersistentQueueIsDroppingData + expr: sum(increase(vm_persistentqueue_bytes_dropped_total[5m])) by (job, instance) > 0 + for: 10m + labels: + severity: critical + annotations: + dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=49&var-instance={{ $labels.instance }}" + summary: "Instance {{ $labels.instance }} is dropping data from persistent queue" + description: "Vmagent dropped {{ $value | humanize1024 }} from persistent queue + on instance {{ $labels.instance }} for the last 10m." + + - alert: TooManyScrapeErrors + expr: sum(increase(vm_promscrape_scrapes_failed_total[5m])) by (job, instance) > 0 + for: 15m + labels: + severity: warning + annotations: + dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=31&var-instance={{ $labels.instance }}" + summary: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} fails to scrape targets for last 15m" + + - alert: TooManyWriteErrors + expr: | + (sum(increase(vm_ingestserver_request_errors_total[5m])) by (job, instance) + + + sum(increase(vmagent_http_request_errors_total[5m])) by (job, instance)) > 0 + for: 15m + labels: + severity: warning + annotations: + dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=77&var-instance={{ $labels.instance }}" + summary: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} responds with errors to write requests for last 15m." + + - alert: TooManyRemoteWriteErrors + expr: sum(rate(vmagent_remotewrite_retries_count_total[5m])) by(job, instance, url) > 0 + for: 15m + labels: + severity: warning + annotations: + dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=61&var-instance={{ $labels.instance }}" + summary: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} fails to push to remote storage" + description: "Vmagent fails to push data via remote write protocol to destination \"{{ $labels.url }}\"\n + Ensure that destination is up and reachable." + diff --git a/deployment/docker/prometheus.yml b/deployment/docker/prometheus.yml index 17e46a943..451ed70ae 100644 --- a/deployment/docker/prometheus.yml +++ b/deployment/docker/prometheus.yml @@ -1,5 +1,5 @@ global: - scrape_interval: 10s + scrape_interval: 10s scrape_configs: - job_name: 'vmagent'