mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
adds digital ocean sd (#1376)
* adds digital ocean sd config * adds digital ocean sd https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1367 * typo fix
This commit is contained in:
parent
b8526e88d3
commit
729c4eeb9c
8 changed files with 644 additions and 10 deletions
|
@ -343,6 +343,7 @@ Currently the following [scrape_config](https://prometheus.io/docs/prometheus/la
|
|||
* [openstack_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#openstack_sd_config)
|
||||
* [dockerswarm_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config)
|
||||
* [eureka_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config)
|
||||
* [digitalocean_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config)
|
||||
|
||||
|
||||
Other `*_sd_config` types will be supported in the future.
|
||||
|
@ -1721,6 +1722,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Wait time used by Consul service discovery. Default value is used if not set
|
||||
-promscrape.consulSDCheckInterval duration
|
||||
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s)
|
||||
-promscrape.digitaloceanSDCheckInterval duration
|
||||
Interval for checking for changes in digital ocean. This works only if digitalocean_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config for details (default 1m0s)
|
||||
-promscrape.disableCompression
|
||||
Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
||||
-promscrape.disableKeepAlive
|
||||
|
|
|
@ -177,6 +177,8 @@ The following scrape types in [scrape_config](https://prometheus.io/docs/prometh
|
|||
See [dockerswarm_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config) for details.
|
||||
* `eureka_sd_configs` - is for scraping targets registered in [Netflix Eureka](https://github.com/Netflix/eureka).
|
||||
See [eureka_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config) for details.
|
||||
* `digitalocean_sd_configs` is for scraping targerts registered in [DigitalOcean](https://www.digitalocean.com/)
|
||||
See [digitalocean_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config) for details.
|
||||
|
||||
Please file feature requests to [our issue tracker](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need other service discovery mechanisms to be supported by `vmagent`.
|
||||
|
||||
|
@ -627,6 +629,8 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Wait time used by Consul service discovery. Default value is used if not set
|
||||
-promscrape.consulSDCheckInterval duration
|
||||
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s)
|
||||
-promscrape.digitaloceanSDCheckInterval duration
|
||||
Interval for checking for changes in digital ocean. This works only if digitalocean_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config for details (default 1m0s)
|
||||
-promscrape.disableCompression
|
||||
Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
||||
-promscrape.disableKeepAlive
|
||||
|
|
|
@ -181,6 +181,8 @@ The following scrape types in [scrape_config](https://prometheus.io/docs/prometh
|
|||
See [dockerswarm_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config) for details.
|
||||
* `eureka_sd_configs` - is for scraping targets registered in [Netflix Eureka](https://github.com/Netflix/eureka).
|
||||
See [eureka_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config) for details.
|
||||
* `digitalocean_sd_configs` is for scraping targerts registered in [DigitalOcean](https://www.digitalocean.com/)
|
||||
See [digitalocean_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config) for details.
|
||||
|
||||
Please file feature requests to [our issue tracker](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need other service discovery mechanisms to be supported by `vmagent`.
|
||||
|
||||
|
@ -631,6 +633,8 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Wait time used by Consul service discovery. Default value is used if not set
|
||||
-promscrape.consulSDCheckInterval duration
|
||||
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config for details (default 30s)
|
||||
-promscrape.digitaloceanSDCheckInterval duration
|
||||
Interval for checking for changes in digital ocean. This works only if digitalocean_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config for details (default 1m0s)
|
||||
-promscrape.disableCompression
|
||||
Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine grained control
|
||||
-promscrape.disableKeepAlive
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/digitalocean"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/dns"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/dockerswarm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/ec2"
|
||||
|
@ -106,16 +107,17 @@ type ScrapeConfig struct {
|
|||
MetricRelabelConfigs []promrelabel.RelabelConfig `yaml:"metric_relabel_configs,omitempty"`
|
||||
SampleLimit int `yaml:"sample_limit,omitempty"`
|
||||
|
||||
StaticConfigs []StaticConfig `yaml:"static_configs,omitempty"`
|
||||
FileSDConfigs []FileSDConfig `yaml:"file_sd_configs,omitempty"`
|
||||
KubernetesSDConfigs []kubernetes.SDConfig `yaml:"kubernetes_sd_configs,omitempty"`
|
||||
OpenStackSDConfigs []openstack.SDConfig `yaml:"openstack_sd_configs,omitempty"`
|
||||
ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
|
||||
EurekaSDConfigs []eureka.SDConfig `yaml:"eureka_sd_configs,omitempty"`
|
||||
DockerSwarmSDConfigs []dockerswarm.SDConfig `yaml:"dockerswarm_sd_configs,omitempty"`
|
||||
DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs,omitempty"`
|
||||
EC2SDConfigs []ec2.SDConfig `yaml:"ec2_sd_configs,omitempty"`
|
||||
GCESDConfigs []gce.SDConfig `yaml:"gce_sd_configs,omitempty"`
|
||||
StaticConfigs []StaticConfig `yaml:"static_configs,omitempty"`
|
||||
FileSDConfigs []FileSDConfig `yaml:"file_sd_configs,omitempty"`
|
||||
KubernetesSDConfigs []kubernetes.SDConfig `yaml:"kubernetes_sd_configs,omitempty"`
|
||||
OpenStackSDConfigs []openstack.SDConfig `yaml:"openstack_sd_configs,omitempty"`
|
||||
ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
|
||||
EurekaSDConfigs []eureka.SDConfig `yaml:"eureka_sd_configs,omitempty"`
|
||||
DockerSwarmSDConfigs []dockerswarm.SDConfig `yaml:"dockerswarm_sd_configs,omitempty"`
|
||||
DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs,omitempty"`
|
||||
EC2SDConfigs []ec2.SDConfig `yaml:"ec2_sd_configs,omitempty"`
|
||||
GCESDConfigs []gce.SDConfig `yaml:"gce_sd_configs,omitempty"`
|
||||
DigitaloceanSDConfigs []digitalocean.SDConfig `yaml:"digitalocean_sd_configs,omitempty"`
|
||||
|
||||
// These options are supported only by lib/promscrape.
|
||||
RelabelDebug bool `yaml:"relabel_debug,omitempty"`
|
||||
|
@ -488,6 +490,34 @@ func (cfg *Config) getGCESDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
|
|||
return dst
|
||||
}
|
||||
|
||||
// getDigitalOceanDScrapeWork returns `digitalocean_sd_configs` ScrapeWork from cfg.
|
||||
func (cfg *Config) getDigitalOceanDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
|
||||
swsPrevByJob := getSWSByJob(prev)
|
||||
dst := make([]*ScrapeWork, 0, len(prev))
|
||||
for i := range cfg.ScrapeConfigs {
|
||||
sc := &cfg.ScrapeConfigs[i]
|
||||
dstLen := len(dst)
|
||||
ok := true
|
||||
for j := range sc.DigitaloceanSDConfigs {
|
||||
sdc := &sc.DigitaloceanSDConfigs[j]
|
||||
var okLocal bool
|
||||
dst, okLocal = appendSDScrapeWork(dst, sdc, cfg.baseDir, sc.swc, "digitalocean_sd_config")
|
||||
if ok {
|
||||
ok = okLocal
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
continue
|
||||
}
|
||||
swsPrev := swsPrevByJob[sc.swc.jobName]
|
||||
if len(swsPrev) > 0 {
|
||||
logger.Errorf("there were errors when discovering digitalocean targets for job %q, so preserving the previous targets", sc.swc.jobName)
|
||||
dst = append(dst[:dstLen], swsPrev...)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// getFileSDScrapeWork returns `file_sd_configs` ScrapeWork from cfg.
|
||||
func (cfg *Config) getFileSDScrapeWork(prev []*ScrapeWork) []*ScrapeWork {
|
||||
// Create a map for the previous scrape work.
|
||||
|
|
92
lib/promscrape/discovery/digitalocean/api.go
Normal file
92
lib/promscrape/discovery/digitalocean/api.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
|
||||
)
|
||||
|
||||
var configMap = discoveryutils.NewConfigMap()
|
||||
|
||||
type apiConfig struct {
|
||||
client *discoveryutils.Client
|
||||
port int
|
||||
}
|
||||
|
||||
func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||
ac, err := sdc.HTTPClientConfig.NewConfig(baseDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse auth config: %w", err)
|
||||
}
|
||||
|
||||
apiServer := sdc.Server
|
||||
if apiServer == "" {
|
||||
apiServer = "https://api.digitalocean.com"
|
||||
}
|
||||
if !strings.Contains(apiServer, "://") {
|
||||
scheme := "http"
|
||||
if sdc.HTTPClientConfig.TLSConfig != nil {
|
||||
scheme = "https"
|
||||
}
|
||||
apiServer = scheme + "://" + apiServer
|
||||
}
|
||||
proxyAC, err := sdc.ProxyClientConfig.NewConfig(baseDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse proxy auth config: %w", err)
|
||||
}
|
||||
client, err := discoveryutils.NewClient(apiServer, ac, sdc.ProxyURL, proxyAC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create HTTP client for %q: %w", apiServer, err)
|
||||
}
|
||||
cfg := &apiConfig{
|
||||
client: client,
|
||||
port: sdc.Port,
|
||||
}
|
||||
if cfg.port == 0 {
|
||||
cfg.port = 80
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
||||
v, err := configMap.Get(sdc, func() (interface{}, error) { return newAPIConfig(sdc, baseDir) })
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v.(*apiConfig), nil
|
||||
|
||||
}
|
||||
|
||||
const dropletsAPIPath = "/v2/droplets"
|
||||
|
||||
func getDroplets(getAPIResponse func(string) ([]byte, error)) ([]droplet, error) {
|
||||
var droplets []droplet
|
||||
|
||||
nextAPIURL := dropletsAPIPath
|
||||
for nextAPIURL != "" {
|
||||
data, err := getAPIResponse(nextAPIURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot fetch data from digitalocean list api: %w", err)
|
||||
}
|
||||
apiResp, err := parseAPIResponse(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
droplets = append(droplets, apiResp.Droplets...)
|
||||
nextAPIURL, err = apiResp.nextURLPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return droplets, nil
|
||||
}
|
||||
|
||||
func parseAPIResponse(data []byte) (*listDropletResponse, error) {
|
||||
var dps listDropletResponse
|
||||
if err := json.Unmarshal(data, &dps); err != nil {
|
||||
return nil, fmt.Errorf("failed parse digitalocean api response: %q, err: %w", data, err)
|
||||
}
|
||||
return &dps, nil
|
||||
}
|
349
lib/promscrape/discovery/digitalocean/api_test.go
Normal file
349
lib/promscrape/discovery/digitalocean/api_test.go
Normal file
|
@ -0,0 +1,349 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_parseAPIResponse(t *testing.T) {
|
||||
type args struct {
|
||||
data []byte
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *listDropletResponse
|
||||
wantErr bool
|
||||
}{
|
||||
|
||||
{
|
||||
name: "simple parse",
|
||||
args: args{data: []byte(`{
|
||||
"droplets": [
|
||||
{
|
||||
"id": 3164444,
|
||||
"name": "example.com",
|
||||
"memory": 1024,
|
||||
"vcpus": 1,
|
||||
"status": "active",
|
||||
"kernel": {
|
||||
"id": 2233,
|
||||
"name": "Ubuntu 14.04 x64 vmlinuz-3.13.0-37-generic",
|
||||
"version": "3.13.0-37-generic"
|
||||
},
|
||||
"features": [
|
||||
"backups",
|
||||
"ipv6",
|
||||
"virtio"
|
||||
],
|
||||
"snapshot_ids": [],
|
||||
"image": {
|
||||
"id": 6918990,
|
||||
"name": "14.04 x64",
|
||||
"distribution": "Ubuntu",
|
||||
"slug": "ubuntu-16-04-x64",
|
||||
"public": true,
|
||||
"regions": [
|
||||
"nyc1"
|
||||
]
|
||||
},
|
||||
"size_slug": "s-1vcpu-1gb",
|
||||
"networks": {
|
||||
"v4": [
|
||||
{
|
||||
"ip_address": "104.236.32.182",
|
||||
"netmask": "255.255.192.0",
|
||||
"gateway": "104.236.0.1",
|
||||
"type": "public"
|
||||
}
|
||||
],
|
||||
"v6": [
|
||||
{
|
||||
"ip_address": "2604:A880:0800:0010:0000:0000:02DD:4001",
|
||||
"netmask": 64,
|
||||
"gateway": "2604:A880:0800:0010:0000:0000:0000:0001",
|
||||
"type": "public"
|
||||
}
|
||||
]
|
||||
},
|
||||
"region": {
|
||||
"name": "New York 3",
|
||||
"slug": "nyc3",
|
||||
"features": [
|
||||
"private_networking",
|
||||
"backups",
|
||||
"ipv6"
|
||||
]
|
||||
},
|
||||
"tags": [
|
||||
"tag1",
|
||||
"tag2"
|
||||
],
|
||||
"vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662"
|
||||
}
|
||||
],
|
||||
"links": {
|
||||
"pages": {
|
||||
"last": "https://api.digitalocean.com/v2/droplets?page=3&per_page=1",
|
||||
"next": "https://api.digitalocean.com/v2/droplets?page=2&per_page=1"
|
||||
}
|
||||
}
|
||||
}`)},
|
||||
want: &listDropletResponse{
|
||||
Droplets: []droplet{
|
||||
{
|
||||
Image: struct {
|
||||
Name string `json:"name"`
|
||||
Slug string `json:"slug"`
|
||||
}(struct {
|
||||
Name string
|
||||
Slug string
|
||||
}{Name: "14.04 x64", Slug: "ubuntu-16-04-x64"}),
|
||||
Region: struct {
|
||||
Slug string `json:"slug"`
|
||||
}(struct{ Slug string }{Slug: "nyc3"}),
|
||||
Networks: networks{
|
||||
V6: []network{
|
||||
{
|
||||
IPAddress: "2604:A880:0800:0010:0000:0000:02DD:4001",
|
||||
Type: "public",
|
||||
},
|
||||
},
|
||||
V4: []network{
|
||||
{
|
||||
IPAddress: "104.236.32.182",
|
||||
Type: "public",
|
||||
},
|
||||
},
|
||||
},
|
||||
SizeSlug: "s-1vcpu-1gb",
|
||||
Features: []string{"backups", "ipv6", "virtio"},
|
||||
Tags: []string{"tag1", "tag2"},
|
||||
Status: "active",
|
||||
Name: "example.com",
|
||||
ID: 3164444,
|
||||
VpcUUID: "f9b0769c-e118-42fb-a0c4-fed15ef69662",
|
||||
},
|
||||
},
|
||||
Links: links{
|
||||
Pages: struct {
|
||||
Last string `json:"last,omitempty"`
|
||||
Next string `json:"next,omitempty"`
|
||||
}(struct {
|
||||
Last string
|
||||
Next string
|
||||
}{Last: "https://api.digitalocean.com/v2/droplets?page=3&per_page=1", Next: "https://api.digitalocean.com/v2/droplets?page=2&per_page=1"}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parseAPIResponse(tt.args.data)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseAPIResponse() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("parseAPIResponse() got = \n%v\n, \nwant \n%v\n", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getDroplets(t *testing.T) {
|
||||
type args struct {
|
||||
getAPIResponse func(string) ([]byte, error)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantDropletCount int
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "get 4 droples",
|
||||
args: args{
|
||||
func(s string) ([]byte, error) {
|
||||
var resp []byte
|
||||
switch s {
|
||||
case dropletsAPIPath:
|
||||
// return next
|
||||
resp = []byte(`{ "droplets": [
|
||||
{
|
||||
"id": 3164444,
|
||||
"name": "example.com",
|
||||
"status": "active",
|
||||
"image": {
|
||||
"id": 6918990,
|
||||
"name": "14.04 x64",
|
||||
"distribution": "Ubuntu",
|
||||
"slug": "ubuntu-16-04-x64",
|
||||
"public": true,
|
||||
"regions": [
|
||||
"nyc1"
|
||||
]
|
||||
},
|
||||
"size_slug": "s-1vcpu-1gb",
|
||||
"networks": {
|
||||
"v4": [
|
||||
{
|
||||
"ip_address": "104.236.32.182",
|
||||
"netmask": "255.255.192.0",
|
||||
"gateway": "104.236.0.1",
|
||||
"type": "public"
|
||||
}
|
||||
]
|
||||
},
|
||||
"region": {
|
||||
"name": "New York 3",
|
||||
"slug": "nyc3"
|
||||
},
|
||||
"tags": [
|
||||
"tag1",
|
||||
"tag2"
|
||||
],
|
||||
"vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662"
|
||||
},
|
||||
{
|
||||
"id": 3164444,
|
||||
"name": "example.com",
|
||||
"status": "active",
|
||||
"image": {
|
||||
"id": 6918990,
|
||||
"name": "14.04 x64",
|
||||
"distribution": "Ubuntu",
|
||||
"slug": "ubuntu-16-04-x64"
|
||||
},
|
||||
"size_slug": "s-1vcpu-1gb",
|
||||
"networks": {
|
||||
"v4": [
|
||||
{
|
||||
"ip_address": "104.236.32.183",
|
||||
"netmask": "255.255.192.0",
|
||||
"gateway": "104.236.0.1",
|
||||
"type": "public"
|
||||
}
|
||||
]
|
||||
},
|
||||
"region": {
|
||||
"name": "New York 3",
|
||||
"slug": "nyc3"
|
||||
},
|
||||
"vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662"
|
||||
},
|
||||
{
|
||||
"id": 3164444,
|
||||
"name": "example.com",
|
||||
"status": "active",
|
||||
"image": {
|
||||
"id": 6918990,
|
||||
"name": "14.04 x64",
|
||||
"distribution": "Ubuntu",
|
||||
"slug": "ubuntu-16-04-x64"
|
||||
},
|
||||
"size_slug": "s-1vcpu-1gb",
|
||||
"networks": {
|
||||
"v4": [
|
||||
{
|
||||
"ip_address": "104.236.32.183",
|
||||
"netmask": "255.255.192.0",
|
||||
"gateway": "104.236.0.1",
|
||||
"type": "public"
|
||||
}
|
||||
]
|
||||
},
|
||||
"region": {
|
||||
"name": "New York 3",
|
||||
"slug": "nyc3"
|
||||
},
|
||||
"vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662"
|
||||
}
|
||||
],
|
||||
"links": {
|
||||
"pages": {
|
||||
"last": "https://api.digitalocean.com/v2/droplets?page=3&per_page=1",
|
||||
"next": "https://api.digitalocean.com/v2/droplets?page=2&per_page=1"
|
||||
}
|
||||
}
|
||||
}`)
|
||||
default:
|
||||
// return with empty next
|
||||
resp = []byte(`{ "droplets": [
|
||||
{
|
||||
"id": 3164444,
|
||||
"name": "example.com",
|
||||
"status": "active",
|
||||
"image": {
|
||||
"id": 6918990,
|
||||
"name": "14.04 x64",
|
||||
"distribution": "Ubuntu",
|
||||
"slug": "ubuntu-16-04-x64"
|
||||
},
|
||||
"size_slug": "s-1vcpu-1gb",
|
||||
"networks": {
|
||||
"v4": [
|
||||
{
|
||||
"ip_address": "104.236.32.183",
|
||||
"netmask": "255.255.192.0",
|
||||
"gateway": "104.236.0.1",
|
||||
"type": "public"
|
||||
}
|
||||
]
|
||||
},
|
||||
"region": {
|
||||
"name": "New York 3",
|
||||
"slug": "nyc3"
|
||||
},
|
||||
"vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662"
|
||||
},
|
||||
{
|
||||
"id": 3164444,
|
||||
"name": "example.com",
|
||||
"status": "active",
|
||||
"image": {
|
||||
"id": 6918990,
|
||||
"name": "14.04 x64",
|
||||
"distribution": "Ubuntu",
|
||||
"slug": "ubuntu-16-04-x64"
|
||||
},
|
||||
"size_slug": "s-1vcpu-1gb",
|
||||
"networks": {
|
||||
"v4": [
|
||||
{
|
||||
"ip_address": "104.236.32.183",
|
||||
"netmask": "255.255.192.0",
|
||||
"gateway": "104.236.0.1",
|
||||
"type": "public"
|
||||
}
|
||||
]
|
||||
},
|
||||
"region": {
|
||||
"name": "New York 3",
|
||||
"slug": "nyc3"
|
||||
},
|
||||
"vpc_uuid": "f9b0769c-e118-42fb-a0c4-fed15ef69662"
|
||||
}
|
||||
]
|
||||
}`)
|
||||
}
|
||||
return resp, nil
|
||||
},
|
||||
},
|
||||
wantDropletCount: 5,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := getDroplets(tt.args.getAPIResponse)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("getDroplets() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if len(got) != tt.wantDropletCount {
|
||||
t.Fatalf("unexpected droplets count: %d, want: %d, \n droplets: %v\n", len(got), tt.wantDropletCount, got)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
148
lib/promscrape/discovery/digitalocean/digitalocean.go
Normal file
148
lib/promscrape/discovery/digitalocean/digitalocean.go
Normal file
|
@ -0,0 +1,148 @@
|
|||
package digitalocean
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
|
||||
)
|
||||
|
||||
// SDConfig represents service discovery config for digital ocean.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config
|
||||
type SDConfig struct {
|
||||
Server string `yaml:"server,omitempty"`
|
||||
HTTPClientConfig promauth.HTTPClientConfig `yaml:",inline"`
|
||||
ProxyURL proxy.URL `yaml:"proxy_url,omitempty"`
|
||||
ProxyClientConfig promauth.ProxyClientConfig `yaml:",inline"`
|
||||
Port int `yaml:"port,omitempty"`
|
||||
}
|
||||
|
||||
// GetLabels returns Digital Ocean droplet labels according to sdc.
|
||||
func (sdc *SDConfig) GetLabels(baseDir string) ([]map[string]string, error) {
|
||||
cfg, err := getAPIConfig(sdc, baseDir)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot get API config: %w", err)
|
||||
}
|
||||
droplets, err := getDroplets(cfg.client.GetAPIResponse)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return addDropletLabels(droplets, cfg.port), nil
|
||||
}
|
||||
|
||||
// https://developers.digitalocean.com/documentation/v2/#retrieve-an-existing-droplet-by-id
|
||||
type droplet struct {
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"status"`
|
||||
|
||||
Features []string `json:"features"`
|
||||
Image struct {
|
||||
Name string `json:"name"`
|
||||
Slug string `json:"slug"`
|
||||
} `json:"image"`
|
||||
SizeSlug string `json:"size_slug"`
|
||||
Networks networks `json:"networks"`
|
||||
Region struct {
|
||||
Slug string `json:"slug"`
|
||||
} `json:"region"`
|
||||
Tags []string `json:"tags"`
|
||||
VpcUUID string `json:"vpc_uuid"`
|
||||
}
|
||||
|
||||
func (d *droplet) getIPByNet(netVersion, netType string) string {
|
||||
var dropletNetworks []network
|
||||
switch netVersion {
|
||||
case "v4":
|
||||
dropletNetworks = d.Networks.V4
|
||||
case "v6":
|
||||
dropletNetworks = d.Networks.V6
|
||||
default:
|
||||
logger.Fatalf("BUG, unexpected network type: %s, want v4 or v6", netVersion)
|
||||
}
|
||||
for _, net := range dropletNetworks {
|
||||
if net.Type == netType {
|
||||
return net.IPAddress
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type networks struct {
|
||||
V4 []network `json:"v4"`
|
||||
V6 []network `json:"v6"`
|
||||
}
|
||||
type network struct {
|
||||
IPAddress string `json:"ip_address"`
|
||||
// private | public.
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// https://developers.digitalocean.com/documentation/v2/#list-all-droplets
|
||||
type listDropletResponse struct {
|
||||
Droplets []droplet `json:"droplets,omitempty"`
|
||||
Links links `json:"links,omitempty"`
|
||||
}
|
||||
|
||||
type links struct {
|
||||
Pages struct {
|
||||
Last string `json:"last,omitempty"`
|
||||
Next string `json:"next,omitempty"`
|
||||
} `json:"pages,omitempty"`
|
||||
}
|
||||
|
||||
func (r *listDropletResponse) nextURLPath() (string, error) {
|
||||
if r.Links.Pages.Next == "" {
|
||||
return "", nil
|
||||
}
|
||||
u, err := url.Parse(r.Links.Pages.Next)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("cannot parse digital ocean next url: %s, err: %s", r.Links.Pages.Next, err)
|
||||
}
|
||||
return u.RequestURI(), nil
|
||||
}
|
||||
|
||||
func addDropletLabels(droplets []droplet, defaultPort int) []map[string]string {
|
||||
var ms []map[string]string
|
||||
for _, droplet := range droplets {
|
||||
if len(droplet.Networks.V4) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
privateIPv4 := droplet.getIPByNet("v4", "private")
|
||||
publicIPv4 := droplet.getIPByNet("v4", "public")
|
||||
publicIPv6 := droplet.getIPByNet("v6", "public")
|
||||
|
||||
addr := discoveryutils.JoinHostPort(publicIPv4, defaultPort)
|
||||
m := map[string]string{
|
||||
"__address__": addr,
|
||||
"__meta_digitalocean_droplet_id": fmt.Sprintf("%d", droplet.ID),
|
||||
"__meta_digitalocean_droplet_name": droplet.Name,
|
||||
"__meta_digitalocean_image": droplet.Image.Slug,
|
||||
"__meta_digitalocean_image_name": droplet.Image.Name,
|
||||
"__meta_digitalocean_private_ipv4": privateIPv4,
|
||||
"__meta_digitalocean_public_ipv4": publicIPv4,
|
||||
"__meta_digitalocean_public_ipv6": publicIPv6,
|
||||
"__meta_digitalocean_region": droplet.Region.Slug,
|
||||
"__meta_digitalocean_size": droplet.SizeSlug,
|
||||
"__meta_digitalocean_status": droplet.Status,
|
||||
"__meta_digitalocean_vpc": droplet.VpcUUID,
|
||||
}
|
||||
if len(droplet.Features) > 0 {
|
||||
features := fmt.Sprintf(",%s,", strings.Join(droplet.Features, ","))
|
||||
m["__meta_digitalocean_vpc"] = features
|
||||
}
|
||||
if len(droplet.Tags) > 0 {
|
||||
tags := fmt.Sprintf(",%s,", strings.Join(droplet.Features, ","))
|
||||
m["__meta_digitalocean_tags"] = tags
|
||||
}
|
||||
ms = append(ms, m)
|
||||
}
|
||||
return ms
|
||||
}
|
|
@ -41,6 +41,9 @@ var (
|
|||
dockerswarmSDCheckInterval = flag.Duration("promscrape.dockerswarmSDCheckInterval", 30*time.Second, "Interval for checking for changes in dockerswarm. "+
|
||||
"This works only if dockerswarm_sd_configs is configured in '-promscrape.config' file. "+
|
||||
"See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config for details")
|
||||
digitaloceanSDCheckInterval = flag.Duration("promscrape.digitaloceanSDCheckInterval", time.Minute, "Interval for checking for changes in digital ocean. "+
|
||||
"This works only if digitalocean_sd_configs is configured in '-promscrape.config' file. "+
|
||||
"See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#digitalocean_sd_config for details")
|
||||
promscrapeConfigFile = flag.String("promscrape.config", "", "Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. "+
|
||||
"See https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter for details")
|
||||
suppressDuplicateScrapeTargetErrors = flag.Bool("promscrape.suppressDuplicateScrapeTargetErrors", false, "Whether to suppress 'duplicate scrape target' errors; "+
|
||||
|
@ -111,6 +114,7 @@ func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest)
|
|||
scs.add("ec2_sd_configs", *ec2SDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getEC2SDScrapeWork(swsPrev) })
|
||||
scs.add("gce_sd_configs", *gceSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getGCESDScrapeWork(swsPrev) })
|
||||
scs.add("dockerswarm_sd_configs", *dockerswarmSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getDockerSwarmSDScrapeWork(swsPrev) })
|
||||
scs.add("digitalocean_sd_configs", *digitaloceanSDCheckInterval, func(cfg *Config, swsPrev []*ScrapeWork) []*ScrapeWork { return cfg.getDigitalOceanDScrapeWork(swsPrev) })
|
||||
|
||||
var tickerCh <-chan time.Time
|
||||
if *configCheckInterval > 0 {
|
||||
|
|
Loading…
Reference in a new issue