mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-11 14:53:49 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
b42e5627fb
19 changed files with 599 additions and 240 deletions
|
@ -1,9 +1,7 @@
|
|||
package datadog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
|
@ -59,12 +57,7 @@ func insertRows(at *auth.Token, series []parser.Series, extraLabels []prompbmars
|
|||
Value: ss.Host,
|
||||
})
|
||||
for _, tag := range ss.Tags {
|
||||
n := strings.IndexByte(tag, ':')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("cannot find ':' in tag %q", tag)
|
||||
}
|
||||
name := tag[:n]
|
||||
value := tag[n+1:]
|
||||
name, value := parser.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
|
|
|
@ -390,6 +390,10 @@ Alertmanagers.
|
|||
To avoid recording rules results and alerts state duplication in VictoriaMetrics server
|
||||
don't forget to configure [deduplication](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#deduplication).
|
||||
The recommended value for `-dedup.minScrapeInterval` must be greater or equal to vmalert's `evaluation_interval`.
|
||||
If you observe inconsistent or "jumping" values in series produced by vmalert, try disabling `-datasource.queryTimeAlignment`
|
||||
command line flag. Because of alignment, two or more vmalert HA pairs will produce results with the same timestamps.
|
||||
But due of backfilling (data delivered to the datasource with some delay) values of such results may differ,
|
||||
which would affect deduplication logic and result into "jumping" datapoints.
|
||||
|
||||
Alertmanager will automatically deduplicate alerts with identical labels, so ensure that
|
||||
all `vmalert`s are having the same config.
|
||||
|
|
|
@ -3,7 +3,6 @@ package datadog
|
|||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
|
@ -57,12 +56,7 @@ func insertRows(series []parser.Series, extraLabels []prompbmarshal.Label) error
|
|||
ctx.AddLabel("", ss.Metric)
|
||||
ctx.AddLabel("host", ss.Host)
|
||||
for _, tag := range ss.Tags {
|
||||
n := strings.IndexByte(tag, ':')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("cannot find ':' in tag %q", tag)
|
||||
}
|
||||
name := tag[:n]
|
||||
value := tag[n+1:]
|
||||
name, value := parser.SplitTag(tag)
|
||||
if name == "host" {
|
||||
name = "exported_host"
|
||||
}
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.7e6d0c89.css",
|
||||
"main.js": "./static/js/main.84a0d8f8.js",
|
||||
"main.js": "./static/js/main.4dca3866.js",
|
||||
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.7e6d0c89.css",
|
||||
"static/js/main.84a0d8f8.js"
|
||||
"static/js/main.4dca3866.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.84a0d8f8.js"></script><link href="./static/css/main.7e6d0c89.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.4dca3866.js"></script><link href="./static/css/main.7e6d0c89.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
|
@ -40,13 +40,13 @@ const NestedNav: FC<RecursiveProps> = ({ trace, totalMsec}) => {
|
|||
<AddCircleRoundedIcon fontSize={"large"} color={"info"} />}
|
||||
</ListItemIcon>: null}
|
||||
<Box display="flex" flexDirection="column" flexGrow={0.5} sx={{ ml: 4, mr: 4, width: "100%" }}>
|
||||
<ListItemText
|
||||
primary={`duration: ${trace.duration} ms`}
|
||||
secondary={trace.message}
|
||||
/>
|
||||
<ListItemText>
|
||||
<BorderLinearProgressWithLabel variant="determinate" value={progress} />
|
||||
</ListItemText>
|
||||
<ListItemText
|
||||
primary={trace.message}
|
||||
secondary={`duration: ${trace.duration} ms`}
|
||||
/>
|
||||
</Box>
|
||||
</ListItemButton>
|
||||
</ListItem>
|
||||
|
|
|
@ -23,6 +23,7 @@ If you use alerting rules or Grafana dashboards, which rely on this metric, then
|
|||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add an UI for [query tracing](https://docs.victoriametrics.com/#query-tracing). It can be enabled by clicking `enable query tracing` checkbox and re-running the query. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2703).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `-remoteWrite.headers` command-line option for specifying optional HTTP headers to send to the configured `-remoteWrite.url`. For example, `-remoteWrite.headers='Foo:Bar^^Baz:x'` would send `Foo: Bar` and `Baz: x` HTTP headers with every request to `-remoteWrite.url`. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2805).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): push per-target `scrape_samples_limit` metric to the cofigured `-remoteWrite.url` if `sample_limit` option is set for this target in [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config). See [this feature request](https://github.com/VictoriaMetrics/operator/issues/497).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): attach node-level labels to [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config) targets if `attach_metadata: {"node": true}` is set for `role: endpoints` and `role: endpointslice`. This is a feature backport from Prometheus 2.37 - see [this pull request](https://github.com/prometheus/prometheus/pull/10759).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add ability to specify additional HTTP headers to send to scrape targets via `headers` section in `scrape_configs`. This can be used when the scrape target requires custom authorization and authentication like in [this stackoverflow question](https://stackoverflow.com/questions/66032498/prometheus-scrape-metric-with-custom-header). For example, the following config instructs sending `My-Auth: top-secret` and `TenantID: FooBar` headers with each request to `http://host123:8080/metrics`:
|
||||
|
||||
```yaml
|
||||
|
@ -45,7 +46,7 @@ scrape_configs:
|
|||
* `vm_rows_read_per_series` - the number of raw samples read per queried series.
|
||||
* `vm_series_read_per_query` - the number of series read per query.
|
||||
|
||||
* BUGFIX: properly register time series in per-day inverted index. Previously some series could miss registration in the per-day inverted index. This could result in missing time series during querying. The issue has been introduced in [v1.78.0](#v1780).
|
||||
* BUGFIX: properly register time series in per-day inverted index. Previously some series could miss registration in the per-day inverted index. This could result in missing time series during querying. The issue has been introduced in [v1.78.0](#v1780). See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2798) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2793) issues.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow using `__name__` label (aka [metric name](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)) in alerting annotations. For example:
|
||||
|
||||
{% raw %}
|
||||
|
@ -57,10 +58,14 @@ scrape_configs:
|
|||
* BUGFIX: limit max memory occupied by the cache, which stores parsed regular expressions. Previously too long regular expressions passed in [MetricsQL queries](https://docs.victoriametrics.com/MetricsQL.html) could result in big amounts of used memory (e.g. multiple of gigabytes). Now the max cache size for parsed regexps is limited to a a few megabytes.
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly handle partial counter resets when calculating [rate](https://docs.victoriametrics.com/MetricsQL.html#rate), [irate](https://docs.victoriametrics.com/MetricsQL.html#irate) and [increase](https://docs.victoriametrics.com/MetricsQL.html#increase) functions. Previously these functions could return zero values after partial counter resets until the counter increases to the last value before partial counter reset. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2787).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate [histogram_quantile](https://docs.victoriametrics.com/MetricsQL.html#histogram_quantile) over Prometheus buckets with unexpected values. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2819).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly add service-level labels (`__meta_kubernetes_service_*`) to discovered targets for `role: endpointslice` in [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). Previously these labels were missing. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2823).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): make sure that [stale markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) are generated with the actual timestamp when unsuccessful scrape occurs. This should prevent from possible time series overlap on scrape target restart in dynmaic envirnoments such as Kubernetes.
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly reload changed `-promscrape.config` file when `-promscrape.configCheckInterval` option is set. The changed config file wasn't reloaded in this case since [v1.69.0](#v1690). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2786). Thanks to @ttyv for the fix.
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly set `Host` header during target scraping when `proxy_url` is set to http proxy. Previously the `Host` header was set to the proxy hostname instead of the target hostname. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2794).
|
||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): assume that the response is complete if `-search.denyPartialResponse` is enabled and up to `-replicationFactor - 1` `vmstorage` nodes are unavailable. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1767).
|
||||
* BUGFIX: [vmselect](https://docs.victoriametrics.com/#vmselect): update `vm_partial_results_total` metric labels to be consistent with `vm_requests_total` labels.
|
||||
* FEATURE: accept tags without values when reading data in [DataDog format](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent). Thanks to @PerGon for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2839).
|
||||
|
||||
|
||||
## [v1.78.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.78.0)
|
||||
|
||||
|
|
|
@ -394,6 +394,10 @@ Alertmanagers.
|
|||
To avoid recording rules results and alerts state duplication in VictoriaMetrics server
|
||||
don't forget to configure [deduplication](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#deduplication).
|
||||
The recommended value for `-dedup.minScrapeInterval` must be greater or equal to vmalert's `evaluation_interval`.
|
||||
If you observe inconsistent or "jumping" values in series produced by vmalert, try disabling `-datasource.queryTimeAlignment`
|
||||
command line flag. Because of alignment, two or more vmalert HA pairs will produce results with the same timestamps.
|
||||
But due of backfilling (data delivered to the datasource with some delay) values of such results may differ,
|
||||
which would affect deduplication logic and result into "jumping" datapoints.
|
||||
|
||||
Alertmanager will automatically deduplicate alerts with identical labels, so ensure that
|
||||
all `vmalert`s are having the same config.
|
||||
|
|
|
@ -46,7 +46,7 @@ type client struct {
|
|||
|
||||
scrapeURL string
|
||||
scrapeTimeoutSecondsStr string
|
||||
host string
|
||||
hostPort string
|
||||
requestURI string
|
||||
setHeaders func(req *http.Request)
|
||||
setProxyHeaders func(req *http.Request)
|
||||
|
@ -57,10 +57,21 @@ type client struct {
|
|||
disableKeepAlive bool
|
||||
}
|
||||
|
||||
func addMissingPort(addr string, isTLS bool) string {
|
||||
if strings.Contains(addr, ":") {
|
||||
return addr
|
||||
}
|
||||
if isTLS {
|
||||
return addr + ":443"
|
||||
}
|
||||
return addr + ":80"
|
||||
}
|
||||
|
||||
func newClient(sw *ScrapeWork) *client {
|
||||
var u fasthttp.URI
|
||||
u.Update(sw.ScrapeURL)
|
||||
host := string(u.Host())
|
||||
hostPort := string(u.Host())
|
||||
dialAddr := hostPort
|
||||
requestURI := string(u.RequestURI())
|
||||
isTLS := string(u.Scheme()) == "https"
|
||||
var tlsCfg *tls.Config
|
||||
|
@ -75,7 +86,7 @@ func newClient(sw *ScrapeWork) *client {
|
|||
// like net/http package from Go does.
|
||||
// See https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers
|
||||
pu := proxyURL.GetURL()
|
||||
host = pu.Host
|
||||
dialAddr = pu.Host
|
||||
requestURI = sw.ScrapeURL
|
||||
isTLS = pu.Scheme == "https"
|
||||
if isTLS {
|
||||
|
@ -90,19 +101,14 @@ func newClient(sw *ScrapeWork) *client {
|
|||
}
|
||||
proxyURL = &proxy.URL{}
|
||||
}
|
||||
if !strings.Contains(host, ":") {
|
||||
if !isTLS {
|
||||
host += ":80"
|
||||
} else {
|
||||
host += ":443"
|
||||
}
|
||||
}
|
||||
hostPort = addMissingPort(hostPort, isTLS)
|
||||
dialAddr = addMissingPort(dialAddr, isTLS)
|
||||
dialFunc, err := newStatDialFunc(proxyURL, sw.ProxyAuthConfig)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot create dial func: %s", err)
|
||||
}
|
||||
hc := &fasthttp.HostClient{
|
||||
Addr: host,
|
||||
Addr: dialAddr,
|
||||
Name: "vm_promscrape",
|
||||
Dial: dialFunc,
|
||||
IsTLS: isTLS,
|
||||
|
@ -152,7 +158,7 @@ func newClient(sw *ScrapeWork) *client {
|
|||
sc: sc,
|
||||
scrapeURL: sw.ScrapeURL,
|
||||
scrapeTimeoutSecondsStr: fmt.Sprintf("%.3f", sw.ScrapeTimeout.Seconds()),
|
||||
host: host,
|
||||
hostPort: hostPort,
|
||||
requestURI: requestURI,
|
||||
setHeaders: func(req *http.Request) { sw.AuthConfig.SetHeaders(req, true) },
|
||||
setProxyHeaders: setProxyHeaders,
|
||||
|
@ -218,7 +224,7 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
|
|||
deadline := time.Now().Add(c.hc.ReadTimeout)
|
||||
req := fasthttp.AcquireRequest()
|
||||
req.SetRequestURI(c.requestURI)
|
||||
req.Header.SetHost(c.host)
|
||||
req.Header.SetHost(c.hostPort)
|
||||
// The following `Accept` header has been copied from Prometheus sources.
|
||||
// See https://github.com/prometheus/prometheus/blob/f9d21f10ecd2a343a381044f131ea4e46381ce09/scrape/scrape.go#L532 .
|
||||
// This is needed as a workaround for scraping stupid Java-based servers such as Spring Boot.
|
||||
|
|
|
@ -1143,7 +1143,7 @@ func (swc *scrapeWorkConfig) getScrapeWork(target string, extraLabels, metaLabel
|
|||
droppedTargetsMap.Register(originalLabels)
|
||||
return nil, nil
|
||||
}
|
||||
addressRelabeled = addMissingPort(schemeRelabeled, addressRelabeled)
|
||||
addressRelabeled = addMissingPort(addressRelabeled, schemeRelabeled == "https")
|
||||
metricsPathRelabeled := promrelabel.GetLabelValueByName(labels, "__metrics_path__")
|
||||
if metricsPathRelabeled == "" {
|
||||
metricsPathRelabeled = "/metrics"
|
||||
|
@ -1360,18 +1360,6 @@ func appendLabel(dst []prompbmarshal.Label, name, value string) []prompbmarshal.
|
|||
})
|
||||
}
|
||||
|
||||
func addMissingPort(scheme, target string) string {
|
||||
if strings.Contains(target, ":") {
|
||||
return target
|
||||
}
|
||||
if scheme == "https" {
|
||||
target += ":443"
|
||||
} else {
|
||||
target += ":80"
|
||||
}
|
||||
return target
|
||||
}
|
||||
|
||||
const (
|
||||
defaultScrapeInterval = time.Minute
|
||||
defaultScrapeTimeout = 10 * time.Second
|
||||
|
|
|
@ -345,7 +345,7 @@ func (gw *groupWatcher) startWatchersForRole(role string, aw *apiWatcher) {
|
|||
gw.startWatchersForRole("pod", nil)
|
||||
gw.startWatchersForRole("service", nil)
|
||||
}
|
||||
if gw.attachNodeMetadata && role == "pod" {
|
||||
if gw.attachNodeMetadata && (role == "pod" || role == "endpoints" || role == "endpointslice") {
|
||||
gw.startWatchersForRole("node", nil)
|
||||
}
|
||||
paths := getAPIPathsWithNamespaces(role, gw.namespaces, gw.selectors)
|
||||
|
@ -803,8 +803,8 @@ func (uw *urlWatcher) maybeUpdateDependedScrapeWorksLocked() {
|
|||
uwx.needRecreateScrapeWorks = true
|
||||
continue
|
||||
}
|
||||
if attachNodeMetadata && role == "node" && uwx.role == "pod" {
|
||||
// pod objects depend on node objects if attachNodeMetadata is set
|
||||
if attachNodeMetadata && role == "node" && (uwx.role == "pod" || uwx.role == "endpoints" || uwx.role == "endpointslice") {
|
||||
// pod, endpoints and enpointslices objects depend on node objects if attachNodeMetadata is set
|
||||
uwx.needRecreateScrapeWorks = true
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -114,13 +114,13 @@ func TestParseEndpointsListSuccess(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestGetEndpointLabels(t *testing.T) {
|
||||
func TestGetEndpointsLabels(t *testing.T) {
|
||||
type testArgs struct {
|
||||
containerPorts map[string][]ContainerPort
|
||||
endpointPorts []EndpointPort
|
||||
}
|
||||
f := func(name string, args testArgs, wantLabels [][]prompbmarshal.Label) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
f := func(t *testing.T, args testArgs, wantLabels [][]prompbmarshal.Label) {
|
||||
t.Helper()
|
||||
eps := Endpoints{
|
||||
Metadata: ObjectMeta{
|
||||
Name: "test-eps",
|
||||
|
@ -148,6 +148,8 @@ func TestGetEndpointLabels(t *testing.T) {
|
|||
Namespace: "default",
|
||||
},
|
||||
Spec: ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Type: "service-type",
|
||||
Ports: []ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
|
@ -158,10 +160,28 @@ func TestGetEndpointLabels(t *testing.T) {
|
|||
}
|
||||
pod := Pod{
|
||||
Metadata: ObjectMeta{
|
||||
UID: "pod-uid",
|
||||
Name: "test-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Status: PodStatus{PodIP: "192.168.15.1"},
|
||||
Spec: PodSpec{
|
||||
NodeName: "test-node",
|
||||
},
|
||||
Status: PodStatus{
|
||||
Phase: "abc",
|
||||
PodIP: "192.168.15.1",
|
||||
HostIP: "4.5.6.7",
|
||||
},
|
||||
}
|
||||
node := Node{
|
||||
Metadata: ObjectMeta{
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "node-label",
|
||||
Value: "xyz",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for cn, ports := range args.containerPorts {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, Container{Name: cn, Ports: ports})
|
||||
|
@ -180,7 +200,14 @@ func TestGetEndpointLabels(t *testing.T) {
|
|||
"default/test-eps": &svc,
|
||||
},
|
||||
},
|
||||
"node": {
|
||||
role: "node",
|
||||
objectsByKey: map[string]object{
|
||||
"/test-node": &node,
|
||||
},
|
||||
},
|
||||
}
|
||||
gw.attachNodeMetadata = true
|
||||
var sortedLabelss [][]prompbmarshal.Label
|
||||
gotLabels := eps.getTargetLabels(&gw)
|
||||
for _, lbs := range gotLabels {
|
||||
|
@ -188,16 +215,16 @@ func TestGetEndpointLabels(t *testing.T) {
|
|||
}
|
||||
if !areEqualLabelss(sortedLabelss, wantLabels) {
|
||||
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, wantLabels)
|
||||
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
f("1 port from endpoint", testArgs{
|
||||
t.Run("1 port from endpoint", func(t *testing.T) {
|
||||
f(t, testArgs{
|
||||
endpointPorts: []EndpointPort{
|
||||
{
|
||||
Name: "web",
|
||||
Port: 8081,
|
||||
Protocol: "foobar",
|
||||
},
|
||||
},
|
||||
}, [][]prompbmarshal.Label{
|
||||
|
@ -206,32 +233,39 @@ func TestGetEndpointLabels(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "test-pod",
|
||||
"__meta_kubernetes_endpoint_port_name": "web",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "foobar",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
"__meta_kubernetes_endpoints_name": "test-eps",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_pod_host_ip": "",
|
||||
"__meta_kubernetes_node_label_node_label": "xyz",
|
||||
"__meta_kubernetes_node_labelpresent_node_label": "true",
|
||||
"__meta_kubernetes_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_host_ip": "4.5.6.7",
|
||||
"__meta_kubernetes_pod_ip": "192.168.15.1",
|
||||
"__meta_kubernetes_pod_name": "test-pod",
|
||||
"__meta_kubernetes_pod_node_name": "",
|
||||
"__meta_kubernetes_pod_phase": "",
|
||||
"__meta_kubernetes_pod_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_phase": "abc",
|
||||
"__meta_kubernetes_pod_ready": "unknown",
|
||||
"__meta_kubernetes_pod_uid": "",
|
||||
"__meta_kubernetes_service_cluster_ip": "",
|
||||
"__meta_kubernetes_pod_uid": "pod-uid",
|
||||
"__meta_kubernetes_service_cluster_ip": "1.2.3.4",
|
||||
"__meta_kubernetes_service_name": "test-eps",
|
||||
"__meta_kubernetes_service_type": "",
|
||||
"__meta_kubernetes_service_type": "service-type",
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
f("1 port from endpoint and 1 from pod", testArgs{
|
||||
t.Run("1 port from endpoint and 1 from pod", func(t *testing.T) {
|
||||
f(t, testArgs{
|
||||
containerPorts: map[string][]ContainerPort{"metrics": {{
|
||||
Name: "http-metrics",
|
||||
ContainerPort: 8428,
|
||||
Protocol: "foobar",
|
||||
}}},
|
||||
endpointPorts: []EndpointPort{
|
||||
{
|
||||
Name: "web",
|
||||
Port: 8081,
|
||||
Protocol: "https",
|
||||
},
|
||||
},
|
||||
}, [][]prompbmarshal.Label{
|
||||
|
@ -240,50 +274,60 @@ func TestGetEndpointLabels(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "test-pod",
|
||||
"__meta_kubernetes_endpoint_port_name": "web",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "https",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
"__meta_kubernetes_endpoints_name": "test-eps",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_pod_host_ip": "",
|
||||
"__meta_kubernetes_node_label_node_label": "xyz",
|
||||
"__meta_kubernetes_node_labelpresent_node_label": "true",
|
||||
"__meta_kubernetes_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_host_ip": "4.5.6.7",
|
||||
"__meta_kubernetes_pod_ip": "192.168.15.1",
|
||||
"__meta_kubernetes_pod_name": "test-pod",
|
||||
"__meta_kubernetes_pod_node_name": "",
|
||||
"__meta_kubernetes_pod_phase": "",
|
||||
"__meta_kubernetes_pod_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_phase": "abc",
|
||||
"__meta_kubernetes_pod_ready": "unknown",
|
||||
"__meta_kubernetes_pod_uid": "",
|
||||
"__meta_kubernetes_service_cluster_ip": "",
|
||||
"__meta_kubernetes_pod_uid": "pod-uid",
|
||||
"__meta_kubernetes_service_cluster_ip": "1.2.3.4",
|
||||
"__meta_kubernetes_service_name": "test-eps",
|
||||
"__meta_kubernetes_service_type": "",
|
||||
"__meta_kubernetes_service_type": "service-type",
|
||||
}),
|
||||
discoveryutils.GetSortedLabels(map[string]string{
|
||||
"__address__": "192.168.15.1:8428",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_node_label_node_label": "xyz",
|
||||
"__meta_kubernetes_node_labelpresent_node_label": "true",
|
||||
"__meta_kubernetes_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_container_name": "metrics",
|
||||
"__meta_kubernetes_pod_container_port_name": "http-metrics",
|
||||
"__meta_kubernetes_pod_container_port_number": "8428",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "",
|
||||
"__meta_kubernetes_pod_host_ip": "",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "foobar",
|
||||
"__meta_kubernetes_pod_host_ip": "4.5.6.7",
|
||||
"__meta_kubernetes_pod_ip": "192.168.15.1",
|
||||
"__meta_kubernetes_pod_name": "test-pod",
|
||||
"__meta_kubernetes_pod_node_name": "",
|
||||
"__meta_kubernetes_pod_phase": "",
|
||||
"__meta_kubernetes_pod_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_phase": "abc",
|
||||
"__meta_kubernetes_pod_ready": "unknown",
|
||||
"__meta_kubernetes_pod_uid": "",
|
||||
"__meta_kubernetes_service_cluster_ip": "",
|
||||
"__meta_kubernetes_pod_uid": "pod-uid",
|
||||
"__meta_kubernetes_service_cluster_ip": "1.2.3.4",
|
||||
"__meta_kubernetes_service_name": "test-eps",
|
||||
"__meta_kubernetes_service_type": "",
|
||||
"__meta_kubernetes_service_type": "service-type",
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
f("1 port from endpoint", testArgs{
|
||||
t.Run("1 port from endpoint", func(t *testing.T) {
|
||||
f(t, testArgs{
|
||||
containerPorts: map[string][]ContainerPort{"metrics": {{
|
||||
Name: "web",
|
||||
ContainerPort: 8428,
|
||||
Protocol: "sdc",
|
||||
}}},
|
||||
endpointPorts: []EndpointPort{
|
||||
{
|
||||
Name: "web",
|
||||
Port: 8428,
|
||||
Protocol: "xabc",
|
||||
},
|
||||
},
|
||||
}, [][]prompbmarshal.Label{
|
||||
|
@ -292,24 +336,28 @@ func TestGetEndpointLabels(t *testing.T) {
|
|||
"__meta_kubernetes_endpoint_address_target_kind": "Pod",
|
||||
"__meta_kubernetes_endpoint_address_target_name": "test-pod",
|
||||
"__meta_kubernetes_endpoint_port_name": "web",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "",
|
||||
"__meta_kubernetes_endpoint_port_protocol": "xabc",
|
||||
"__meta_kubernetes_endpoint_ready": "true",
|
||||
"__meta_kubernetes_endpoints_name": "test-eps",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_node_label_node_label": "xyz",
|
||||
"__meta_kubernetes_node_labelpresent_node_label": "true",
|
||||
"__meta_kubernetes_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_container_name": "metrics",
|
||||
"__meta_kubernetes_pod_container_port_name": "web",
|
||||
"__meta_kubernetes_pod_container_port_number": "8428",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "",
|
||||
"__meta_kubernetes_pod_host_ip": "",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "sdc",
|
||||
"__meta_kubernetes_pod_host_ip": "4.5.6.7",
|
||||
"__meta_kubernetes_pod_ip": "192.168.15.1",
|
||||
"__meta_kubernetes_pod_name": "test-pod",
|
||||
"__meta_kubernetes_pod_node_name": "",
|
||||
"__meta_kubernetes_pod_phase": "",
|
||||
"__meta_kubernetes_pod_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_phase": "abc",
|
||||
"__meta_kubernetes_pod_ready": "unknown",
|
||||
"__meta_kubernetes_pod_uid": "",
|
||||
"__meta_kubernetes_service_cluster_ip": "",
|
||||
"__meta_kubernetes_pod_uid": "pod-uid",
|
||||
"__meta_kubernetes_service_cluster_ip": "1.2.3.4",
|
||||
"__meta_kubernetes_service_name": "test-eps",
|
||||
"__meta_kubernetes_service_type": "",
|
||||
"__meta_kubernetes_service_type": "service-type",
|
||||
}),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -38,8 +38,11 @@ func parseEndpointSlice(data []byte) (object, error) {
|
|||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#endpointslices
|
||||
func (eps *EndpointSlice) getTargetLabels(gw *groupWatcher) []map[string]string {
|
||||
// The associated service name is stored in kubernetes.io/service-name label.
|
||||
// See https://kubernetes.io/docs/reference/labels-annotations-taints/#kubernetesioservice-name
|
||||
svcName := eps.Metadata.Labels.GetByName("kubernetes.io/service-name")
|
||||
var svc *Service
|
||||
if o := gw.getObjectByRoleLocked("service", eps.Metadata.Namespace, eps.Metadata.Name); o != nil {
|
||||
if o := gw.getObjectByRoleLocked("service", eps.Metadata.Namespace, svcName); o != nil {
|
||||
svc = o.(*Service)
|
||||
}
|
||||
podPortsSeen := make(map[*Pod][]int)
|
||||
|
|
|
@ -224,3 +224,281 @@ func TestParseEndpointSliceListSuccess(t *testing.T) {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetEndpointsliceLabels(t *testing.T) {
|
||||
type testArgs struct {
|
||||
containerPorts map[string][]ContainerPort
|
||||
endpointPorts []EndpointPort
|
||||
}
|
||||
f := func(t *testing.T, args testArgs, wantLabels [][]prompbmarshal.Label) {
|
||||
t.Helper()
|
||||
eps := EndpointSlice{
|
||||
Metadata: ObjectMeta{
|
||||
Name: "test-eps",
|
||||
Namespace: "default",
|
||||
Labels: discoveryutils.GetSortedLabels(map[string]string{
|
||||
"kubernetes.io/service-name": "test-svc",
|
||||
}),
|
||||
},
|
||||
Endpoints: []Endpoint{
|
||||
{
|
||||
Addresses: []string{
|
||||
"10.13.15.15",
|
||||
},
|
||||
Conditions: EndpointConditions{
|
||||
Ready: true,
|
||||
},
|
||||
Hostname: "foo.bar",
|
||||
TargetRef: ObjectReference{
|
||||
Kind: "Pod",
|
||||
Namespace: "default",
|
||||
Name: "test-pod",
|
||||
},
|
||||
Topology: map[string]string{
|
||||
"x": "y",
|
||||
},
|
||||
},
|
||||
},
|
||||
AddressType: "foobar",
|
||||
Ports: args.endpointPorts,
|
||||
}
|
||||
svc := Service{
|
||||
Metadata: ObjectMeta{
|
||||
Name: "test-svc",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: ServiceSpec{
|
||||
ClusterIP: "1.2.3.4",
|
||||
Type: "service-type",
|
||||
Ports: []ServicePort{
|
||||
{
|
||||
Name: "test-port",
|
||||
Port: 8081,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
pod := Pod{
|
||||
Metadata: ObjectMeta{
|
||||
UID: "pod-uid",
|
||||
Name: "test-pod",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: PodSpec{
|
||||
NodeName: "test-node",
|
||||
},
|
||||
Status: PodStatus{
|
||||
Phase: "abc",
|
||||
PodIP: "192.168.15.1",
|
||||
HostIP: "4.5.6.7",
|
||||
},
|
||||
}
|
||||
node := Node{
|
||||
Metadata: ObjectMeta{
|
||||
Labels: []prompbmarshal.Label{
|
||||
{
|
||||
Name: "node-label",
|
||||
Value: "xyz",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for cn, ports := range args.containerPorts {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, Container{Name: cn, Ports: ports})
|
||||
}
|
||||
var gw groupWatcher
|
||||
gw.m = map[string]*urlWatcher{
|
||||
"pod": {
|
||||
role: "pod",
|
||||
objectsByKey: map[string]object{
|
||||
"default/test-pod": &pod,
|
||||
},
|
||||
},
|
||||
"service": {
|
||||
role: "service",
|
||||
objectsByKey: map[string]object{
|
||||
"default/test-svc": &svc,
|
||||
},
|
||||
},
|
||||
"node": {
|
||||
role: "node",
|
||||
objectsByKey: map[string]object{
|
||||
"/test-node": &node,
|
||||
},
|
||||
},
|
||||
}
|
||||
gw.attachNodeMetadata = true
|
||||
var sortedLabelss [][]prompbmarshal.Label
|
||||
gotLabels := eps.getTargetLabels(&gw)
|
||||
for _, lbs := range gotLabels {
|
||||
sortedLabelss = append(sortedLabelss, discoveryutils.GetSortedLabels(lbs))
|
||||
}
|
||||
if !areEqualLabelss(sortedLabelss, wantLabels) {
|
||||
t.Fatalf("unexpected labels:\ngot\n%v\nwant\n%v", sortedLabelss, wantLabels)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run("1 port from endpoint", func(t *testing.T) {
|
||||
f(t, testArgs{
|
||||
endpointPorts: []EndpointPort{
|
||||
{
|
||||
Name: "web",
|
||||
Port: 8081,
|
||||
Protocol: "foobar",
|
||||
},
|
||||
},
|
||||
}, [][]prompbmarshal.Label{
|
||||
discoveryutils.GetSortedLabels(map[string]string{
|
||||
"__address__": "10.13.15.15:8081",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "test-pod",
|
||||
"__meta_kubernetes_endpointslice_address_type": "foobar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "foo.bar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_x": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_x": "y",
|
||||
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "test-svc",
|
||||
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||
"__meta_kubernetes_endpointslice_name": "test-eps",
|
||||
"__meta_kubernetes_endpointslice_port": "8081",
|
||||
"__meta_kubernetes_endpointslice_port_name": "web",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "foobar",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_node_label_node_label": "xyz",
|
||||
"__meta_kubernetes_node_labelpresent_node_label": "true",
|
||||
"__meta_kubernetes_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_host_ip": "4.5.6.7",
|
||||
"__meta_kubernetes_pod_ip": "192.168.15.1",
|
||||
"__meta_kubernetes_pod_name": "test-pod",
|
||||
"__meta_kubernetes_pod_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_phase": "abc",
|
||||
"__meta_kubernetes_pod_ready": "unknown",
|
||||
"__meta_kubernetes_pod_uid": "pod-uid",
|
||||
"__meta_kubernetes_service_cluster_ip": "1.2.3.4",
|
||||
"__meta_kubernetes_service_name": "test-svc",
|
||||
"__meta_kubernetes_service_type": "service-type",
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("1 port from endpoint and 1 from pod", func(t *testing.T) {
|
||||
f(t, testArgs{
|
||||
containerPorts: map[string][]ContainerPort{"metrics": {{
|
||||
Name: "http-metrics",
|
||||
ContainerPort: 8428,
|
||||
Protocol: "foobar",
|
||||
}}},
|
||||
endpointPorts: []EndpointPort{
|
||||
{
|
||||
Name: "web",
|
||||
Port: 8081,
|
||||
Protocol: "https",
|
||||
},
|
||||
},
|
||||
}, [][]prompbmarshal.Label{
|
||||
discoveryutils.GetSortedLabels(map[string]string{
|
||||
"__address__": "10.13.15.15:8081",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "test-pod",
|
||||
"__meta_kubernetes_endpointslice_address_type": "foobar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "foo.bar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_x": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_x": "y",
|
||||
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "test-svc",
|
||||
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||
"__meta_kubernetes_endpointslice_name": "test-eps",
|
||||
"__meta_kubernetes_endpointslice_port": "8081",
|
||||
"__meta_kubernetes_endpointslice_port_name": "web",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "https",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_node_label_node_label": "xyz",
|
||||
"__meta_kubernetes_node_labelpresent_node_label": "true",
|
||||
"__meta_kubernetes_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_host_ip": "4.5.6.7",
|
||||
"__meta_kubernetes_pod_ip": "192.168.15.1",
|
||||
"__meta_kubernetes_pod_name": "test-pod",
|
||||
"__meta_kubernetes_pod_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_phase": "abc",
|
||||
"__meta_kubernetes_pod_ready": "unknown",
|
||||
"__meta_kubernetes_pod_uid": "pod-uid",
|
||||
"__meta_kubernetes_service_cluster_ip": "1.2.3.4",
|
||||
"__meta_kubernetes_service_name": "test-svc",
|
||||
"__meta_kubernetes_service_type": "service-type",
|
||||
}),
|
||||
discoveryutils.GetSortedLabels(map[string]string{
|
||||
"__address__": "192.168.15.1:8428",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_node_label_node_label": "xyz",
|
||||
"__meta_kubernetes_node_labelpresent_node_label": "true",
|
||||
"__meta_kubernetes_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_container_name": "metrics",
|
||||
"__meta_kubernetes_pod_container_port_name": "http-metrics",
|
||||
"__meta_kubernetes_pod_container_port_number": "8428",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "foobar",
|
||||
"__meta_kubernetes_pod_host_ip": "4.5.6.7",
|
||||
"__meta_kubernetes_pod_ip": "192.168.15.1",
|
||||
"__meta_kubernetes_pod_name": "test-pod",
|
||||
"__meta_kubernetes_pod_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_phase": "abc",
|
||||
"__meta_kubernetes_pod_ready": "unknown",
|
||||
"__meta_kubernetes_pod_uid": "pod-uid",
|
||||
"__meta_kubernetes_service_cluster_ip": "1.2.3.4",
|
||||
"__meta_kubernetes_service_name": "test-svc",
|
||||
"__meta_kubernetes_service_type": "service-type",
|
||||
}),
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("1 port from endpoint", func(t *testing.T) {
|
||||
f(t, testArgs{
|
||||
containerPorts: map[string][]ContainerPort{"metrics": {{
|
||||
Name: "web",
|
||||
ContainerPort: 8428,
|
||||
Protocol: "sdc",
|
||||
}}},
|
||||
endpointPorts: []EndpointPort{
|
||||
{
|
||||
Name: "web",
|
||||
Port: 8428,
|
||||
Protocol: "xabc",
|
||||
},
|
||||
},
|
||||
}, [][]prompbmarshal.Label{
|
||||
discoveryutils.GetSortedLabels(map[string]string{
|
||||
"__address__": "10.13.15.15:8428",
|
||||
"__meta_kubernetes_endpointslice_address_target_kind": "Pod",
|
||||
"__meta_kubernetes_endpointslice_address_target_name": "test-pod",
|
||||
"__meta_kubernetes_endpointslice_address_type": "foobar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_hostname": "foo.bar",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_present_x": "true",
|
||||
"__meta_kubernetes_endpointslice_endpoint_topology_x": "y",
|
||||
"__meta_kubernetes_endpointslice_label_kubernetes_io_service_name": "test-svc",
|
||||
"__meta_kubernetes_endpointslice_labelpresent_kubernetes_io_service_name": "true",
|
||||
"__meta_kubernetes_endpointslice_name": "test-eps",
|
||||
"__meta_kubernetes_endpointslice_port": "8428",
|
||||
"__meta_kubernetes_endpointslice_port_name": "web",
|
||||
"__meta_kubernetes_endpointslice_port_protocol": "xabc",
|
||||
"__meta_kubernetes_namespace": "default",
|
||||
"__meta_kubernetes_node_label_node_label": "xyz",
|
||||
"__meta_kubernetes_node_labelpresent_node_label": "true",
|
||||
"__meta_kubernetes_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_container_name": "metrics",
|
||||
"__meta_kubernetes_pod_container_port_name": "web",
|
||||
"__meta_kubernetes_pod_container_port_number": "8428",
|
||||
"__meta_kubernetes_pod_container_port_protocol": "sdc",
|
||||
"__meta_kubernetes_pod_host_ip": "4.5.6.7",
|
||||
"__meta_kubernetes_pod_ip": "192.168.15.1",
|
||||
"__meta_kubernetes_pod_name": "test-pod",
|
||||
"__meta_kubernetes_pod_node_name": "test-node",
|
||||
"__meta_kubernetes_pod_phase": "abc",
|
||||
"__meta_kubernetes_pod_ready": "unknown",
|
||||
"__meta_kubernetes_pod_uid": "pod-uid",
|
||||
"__meta_kubernetes_service_cluster_ip": "1.2.3.4",
|
||||
"__meta_kubernetes_service_name": "test-svc",
|
||||
"__meta_kubernetes_service_type": "service-type",
|
||||
}),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -48,6 +48,16 @@ type Client struct {
|
|||
sendFullURL bool
|
||||
}
|
||||
|
||||
func addMissingPort(addr string, isTLS bool) string {
|
||||
if strings.Contains(addr, ":") {
|
||||
return addr
|
||||
}
|
||||
if isTLS {
|
||||
return addr + ":443"
|
||||
}
|
||||
return addr + ":80"
|
||||
}
|
||||
|
||||
// NewClient returns new Client for the given args.
|
||||
func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxyAC *promauth.Config) (*Client, error) {
|
||||
var u fasthttp.URI
|
||||
|
@ -64,6 +74,7 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
|
|||
}
|
||||
|
||||
hostPort := string(u.Host())
|
||||
dialAddr := hostPort
|
||||
isTLS := string(u.Scheme()) == "https"
|
||||
var tlsCfg *tls.Config
|
||||
if isTLS {
|
||||
|
@ -76,7 +87,7 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
|
|||
// like net/http package from Go does.
|
||||
// See https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers
|
||||
pu := proxyURL.GetURL()
|
||||
hostPort = pu.Host
|
||||
dialAddr = pu.Host
|
||||
isTLS = pu.Scheme == "https"
|
||||
if isTLS {
|
||||
tlsCfg = proxyAC.NewTLSConfig()
|
||||
|
@ -87,13 +98,8 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
|
|||
}
|
||||
proxyURL = &proxy.URL{}
|
||||
}
|
||||
if !strings.Contains(hostPort, ":") {
|
||||
port := "80"
|
||||
if isTLS {
|
||||
port = "443"
|
||||
}
|
||||
hostPort = net.JoinHostPort(hostPort, port)
|
||||
}
|
||||
hostPort = addMissingPort(hostPort, isTLS)
|
||||
dialAddr = addMissingPort(dialAddr, isTLS)
|
||||
if dialFunc == nil {
|
||||
var err error
|
||||
dialFunc, err = proxyURL.NewDialFunc(proxyAC)
|
||||
|
@ -102,7 +108,7 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
|
|||
}
|
||||
}
|
||||
hc := &fasthttp.HostClient{
|
||||
Addr: hostPort,
|
||||
Addr: dialAddr,
|
||||
Name: "vm_promscrape/discovery",
|
||||
IsTLS: isTLS,
|
||||
TLSConfig: tlsCfg,
|
||||
|
@ -113,7 +119,7 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
|
|||
Dial: dialFunc,
|
||||
}
|
||||
blockingClient := &fasthttp.HostClient{
|
||||
Addr: hostPort,
|
||||
Addr: dialAddr,
|
||||
Name: "vm_promscrape/discovery",
|
||||
IsTLS: isTLS,
|
||||
TLSConfig: tlsCfg,
|
||||
|
|
|
@ -3,10 +3,23 @@ package datadog
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
)
|
||||
|
||||
// SplitTag splits DataDog tag into tag name and value.
|
||||
//
|
||||
// See https://docs.datadoghq.com/getting_started/tagging/#define-tags
|
||||
func SplitTag(tag string) (string, string) {
|
||||
n := strings.IndexByte(tag, ':')
|
||||
if n < 0 {
|
||||
// No tag value.
|
||||
return tag, "no_label_value"
|
||||
}
|
||||
return tag[:n], tag[n+1:]
|
||||
}
|
||||
|
||||
// Request represents DataDog POST request to /api/v1/series
|
||||
//
|
||||
// See https://docs.datadoghq.com/api/latest/metrics/#submit-metrics
|
||||
|
|
|
@ -5,6 +5,23 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestSplitTag(t *testing.T) {
|
||||
f := func(s, nameExpected, valueExpected string) {
|
||||
t.Helper()
|
||||
name, value := SplitTag(s)
|
||||
if name != nameExpected {
|
||||
t.Fatalf("unexpected name obtained from %q; got %q; want %q", s, name, nameExpected)
|
||||
}
|
||||
if value != valueExpected {
|
||||
t.Fatalf("unexpected value obtained from %q; got %q; want %q", s, value, valueExpected)
|
||||
}
|
||||
}
|
||||
f("", "", "no_label_value")
|
||||
f("foo", "foo", "no_label_value")
|
||||
f("foo:bar", "foo", "bar")
|
||||
f(":bar", "", "bar")
|
||||
}
|
||||
|
||||
func TestRequestUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
|
Loading…
Reference in a new issue