diff --git a/docs/changelog/CHANGELOG.md b/docs/changelog/CHANGELOG.md index c237595fd..392facc3c 100644 --- a/docs/changelog/CHANGELOG.md +++ b/docs/changelog/CHANGELOG.md @@ -18,16 +18,13 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/). ## tip -**Update note 1: The `external_labels` field in vmalert-tool [test file](https://docs.victoriametrics.com/vmalert-tool/#test-file-format) will be deprecated soon. Please use `-external.label` command-line flag instead, in the same way as vmalert uses it. This change is done for the sake of consistency between vmalert and vmalert-tool configuration. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6735).** - -**Update note 2: starting from this release, [vmagent](https://docs.victoriametrics.com/vmagent/) and [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) will no longer add the default port 80/443 for scrape URLs without a specified port. -The value of `instance` label for those scrape targets will be changed from `
:<80|443>` to ``, generating new time series. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6792) for details.** +**Update note: The `external_labels` field in vmalert-tool [test file](https://docs.victoriametrics.com/vmalert-tool/#test-file-format) will be deprecated soon. Please use `-external.label` command-line flag instead, in the same way as vmalert uses it. This change is done for the sake of consistency between vmalert and vmalert-tool configuration. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6735).** * FEATURE: add `/influx/health` health-check handler for Influx endpoints. This is needed as some clients use the health endpoint to determine if the server is healthy and ready for data ingestion. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6653) for the details. * FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl/): add `--vm-backoff-retries`, `--vm-backoff-factor`, `--vm-backoff-min-duration` and `--vm-native-backoff-retries`, `--vm-native-backoff-factor`, `--vm-native-backoff-min-duration` command-line flags. These flags allow to change backoff policy config for import requests to VictoriaMetrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6622). * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent/): allow overriding the `sample_limit` option at [scrape_configs](https://docs.victoriametrics.com/sd_configs/#scrape_configs) when a label `__sample_limit__` is specified for target. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6665). Thanks to @zoglam for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6666). * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent/): reduce memory usage when scraping targets with big response body. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6759). -* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent/) and [Single-node VictoriaMetrics](https://docs.victoriametrics.com/): stop adding default port 80/443 for scrape URLs without a port. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6792). +* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent/) and [Single-node VictoriaMetrics](https://docs.victoriametrics.com/): stop adding default port 80/443 for scrape URLs without a port. The value in `instance` label will still carry port for backward compatible. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6792). * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add flags `-remoteWrite.retryMinInterval` and `-remoteWrite.retryMaxTime` for adjusting remote-write requests retry policy. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5486). Thanks to @yorik for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/6289). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert/): add command-line flag `-notifier.headers` to allow configuring additional headers for all requests sent to the corresponding `-notifier.url`. * FEATURE: [vmalert-tool](https://docs.victoriametrics.com/vmalert-tool/): add `-external.label` and `-external.url` command-line flags, in the same way as these flags are supported by vmalert. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6735). diff --git a/lib/promrelabel/scrape_url.go b/lib/promrelabel/scrape_url.go index 7533d7547..cedbe6caf 100644 --- a/lib/promrelabel/scrape_url.go +++ b/lib/promrelabel/scrape_url.go @@ -38,6 +38,13 @@ func GetScrapeURL(labels *promutils.Labels, extraParams map[string][]string) (st address = address[:n] } + // If port is missing, typically it should be 80/443. This WAS written in a label and used as scrapeURL. + // However, adding the port by default can cause some issues, see: https://github.com/prometheus/prometheus/pull/9523#issuecomment-2059314966 + // After https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6792: + // - don't add the default port to scrapeURL. + // - continue adding the default port to the label value for backward compatibility and avoid generating new time series. + addressMustWithPort := addMissingPort(address, scheme == "https") + if !strings.HasPrefix(metricsPath, "/") { metricsPath = "/" + metricsPath } @@ -51,7 +58,7 @@ func GetScrapeURL(labels *promutils.Labels, extraParams map[string][]string) (st } paramsStr := url.Values(params).Encode() scrapeURL := buildScrapeURL(scheme, address, metricsPath, optionalQuestion, paramsStr) - return scrapeURL, address + return scrapeURL, addressMustWithPort } func getParamsFromLabels(labels *promutils.Labels, extraParams map[string][]string) map[string][]string { @@ -89,4 +96,25 @@ func buildScrapeURL(scheme, address, metricsPath, optionalQuestion, paramsStr st return s } +func addMissingPort(addr string, isTLS bool) string { + if strings.Contains(addr, ":") { + return addr + } + if isTLS { + return concatTwoStrings(addr, ":443") + } + return concatTwoStrings(addr, ":80") +} + +func concatTwoStrings(x, y string) string { + bb := bbPool.Get() + b := bb.B[:0] + b = append(b, x...) + b = append(b, y...) + s := bytesutil.InternBytes(b) + bb.B = b + bbPool.Put(bb) + return s +} + var bbPool bytesutil.ByteBufferPool diff --git a/lib/promrelabel/scrape_url_test.go b/lib/promrelabel/scrape_url_test.go index 7bdbdb5a1..8660bb9d0 100644 --- a/lib/promrelabel/scrape_url_test.go +++ b/lib/promrelabel/scrape_url_test.go @@ -24,35 +24,39 @@ func TestGetScrapeURL(t *testing.T) { f(`{foo="bar"}`, "", "") // __address__ without port - f(`{__address__="foo"}`, "http://foo/metrics", "foo") + f(`{__address__="foo"}`, "http://foo/metrics", "foo:80") // __address__ with explicit port f(`{__address__="foo:1234"}`, "http://foo:1234/metrics", "foo:1234") // explicit __scheme__ - f(`{__address__="foo",__scheme__="https"}`, "https://foo/metrics", "foo") + f(`{__address__="foo",__scheme__="https"}`, "https://foo/metrics", "foo:443") f(`{__address__="foo:1234",__scheme__="https"}`, "https://foo:1234/metrics", "foo:1234") // explicit __metrics_path__ - f(`{__address__="foo",__metrics_path__="abc"}`, "http://foo/abc", "foo") - f(`{__address__="foo",__metrics_path__="/abc"}`, "http://foo/abc", "foo") - f(`{__address__="foo",__metrics_path__="/ab/c?d=ef&aa=bb"}`, "http://foo/ab/c?d=ef&aa=bb", "foo") + f(`{__address__="foo",__metrics_path__="abc"}`, "http://foo/abc", "foo:80") + f(`{__address__="foo",__metrics_path__="/abc"}`, "http://foo/abc", "foo:80") + f(`{__address__="foo",__metrics_path__="/ab/c?d=ef&aa=bb"}`, "http://foo/ab/c?d=ef&aa=bb", "foo:80") // explitit __param_* - f(`{__address__="foo",__param_x="y"}`, "http://foo/metrics?x=y", "foo") - f(`{__address__="foo",__param_x="y",__param_y="aa"}`, "http://foo/metrics?x=y&y=aa", "foo") - f(`{__address__="foo",__param_x="y",__metrics_path__="?abc=de"}`, "http://foo/?abc=de&x=y", "foo") - f(`{__address__="foo",__param_abc="y",__metrics_path__="?abc=de"}`, "http://foo/?abc=de&abc=y", "foo") + f(`{__address__="foo",__param_x="y"}`, "http://foo/metrics?x=y", "foo:80") + f(`{__address__="foo",__param_x="y",__param_y="aa"}`, "http://foo/metrics?x=y&y=aa", "foo:80") + f(`{__address__="foo",__param_x="y",__metrics_path__="?abc=de"}`, "http://foo/?abc=de&x=y", "foo:80") + f(`{__address__="foo",__param_abc="y",__metrics_path__="?abc=de"}`, "http://foo/?abc=de&abc=y", "foo:80") // __address__ with metrics path and/or scheme - f(`{__address__="foo/bar/baz?abc=de"}`, "http://foo/bar/baz?abc=de", "foo") + f(`{__address__="foo/bar/baz?abc=de"}`, "http://foo/bar/baz?abc=de", "foo:80") f(`{__address__="foo:784/bar/baz?abc=de"}`, "http://foo:784/bar/baz?abc=de", "foo:784") f(`{__address__="foo:784/bar/baz?abc=de",__param_xx="yy"}`, "http://foo:784/bar/baz?abc=de&xx=yy", "foo:784") f(`{__address__="foo:784/bar/baz?abc=de",__param_xx="yy",__scheme__="https"}`, "https://foo:784/bar/baz?abc=de&xx=yy", "foo:784") - f(`{__address__="http://foo/bar/baz?abc=de",__param_xx="yy"}`, "http://foo/bar/baz?abc=de&xx=yy", "foo") - f(`{__address__="https://foo/bar/baz?abc=de",__param_xx="yy"}`, "https://foo/bar/baz?abc=de&xx=yy", "foo") + f(`{__address__="http://foo/bar/baz?abc=de",__param_xx="yy"}`, "http://foo/bar/baz?abc=de&xx=yy", "foo:80") + f(`{__address__="https://foo/bar/baz?abc=de",__param_xx="yy"}`, "https://foo/bar/baz?abc=de&xx=yy", "foo:443") - // __address__ with 80 or 443 as port + // __address__ already carry 80/443 port + f(`{__address__="foo:80"}`, "http://foo:80/metrics", "foo:80") + f(`{__address__="foo:443"}`, "http://foo:443/metrics", "foo:443") + f(`{__address__="http://foo"}`, "http://foo/metrics", "foo:80") + f(`{__address__="https://foo"}`, "https://foo/metrics", "foo:443") f(`{__address__="http://foo:80"}`, "http://foo:80/metrics", "foo:80") f(`{__address__="https://foo:443"}`, "https://foo:443/metrics", "foo:443") } diff --git a/lib/promscrape/config_test.go b/lib/promscrape/config_test.go index a73c5b5fd..84c1bd72f 100644 --- a/lib/promscrape/config_test.go +++ b/lib/promscrape/config_test.go @@ -229,7 +229,7 @@ scrape_configs: ScrapeTimeout: defaultScrapeTimeout, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "host1", + "instance": "host1:80", "job": "abc", }), jobNameOriginal: "abc", @@ -240,7 +240,7 @@ scrape_configs: ScrapeTimeout: defaultScrapeTimeout, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "host2", + "instance": "host2:443", "job": "abc", }), jobNameOriginal: "abc", @@ -464,7 +464,7 @@ scrape_configs: ScrapeTimeout: defaultScrapeTimeout, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "host1", + "instance": "host1:80", "job": "foo", "qwe": "rty", }), @@ -476,7 +476,7 @@ scrape_configs: ScrapeTimeout: defaultScrapeTimeout, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "host2", + "instance": "host2:80", "job": "foo", "qwe": "rty", }), @@ -695,7 +695,7 @@ scrape_configs: ScrapeTimeout: defaultScrapeTimeout, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "s", + "instance": "s:80", "job": "aa", }), jobNameOriginal: "aa", @@ -717,7 +717,7 @@ scrape_configs: ScrapeTimeout: defaultScrapeTimeout, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "s", + "instance": "s:80", "job": "aa", }), jobNameOriginal: "aa", @@ -739,7 +739,7 @@ scrape_configs: ScrapeTimeout: defaultScrapeTimeout, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "s", + "instance": "s:80", "job": "aa", }), jobNameOriginal: "aa", @@ -761,7 +761,7 @@ scrape_configs: ScrapeTimeout: defaultScrapeTimeout, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "s", + "instance": "s:80", "job": "aa", }), jobNameOriginal: "aa", @@ -853,7 +853,7 @@ scrape_configs: HonorTimestamps: true, DenyRedirects: true, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "foo.bar", + "instance": "foo.bar:443", "job": "foo", "x": "y", }), @@ -869,7 +869,7 @@ scrape_configs: HonorTimestamps: true, DenyRedirects: true, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "aaa", + "instance": "aaa:443", "job": "foo", "x": "y", }), @@ -882,7 +882,7 @@ scrape_configs: ScrapeTimeout: 8 * time.Second, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "1.2.3.4", + "instance": "1.2.3.4:80", "job": "qwer", }), jobNameOriginal: "qwer", @@ -893,7 +893,7 @@ scrape_configs: ScrapeTimeout: 8 * time.Second, MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ - "instance": "foobar", + "instance": "foobar:80", "job": "asdf", }), jobNameOriginal: "asdf", @@ -1109,7 +1109,7 @@ scrape_configs: MaxScrapeSize: maxScrapeSize.N, Labels: promutils.NewLabelsFromMap(map[string]string{ "foo": "bar", - "instance": "pp", + "instance": "pp:80", "job": "yyy", }), ExternalLabels: promutils.NewLabelsFromMap(map[string]string{