2020-02-23 11:35:47 +00:00
|
|
|
package promscrape
|
|
|
|
|
|
|
|
import (
|
2020-11-04 15:12:05 +00:00
|
|
|
"flag"
|
2020-02-23 11:35:47 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
2020-12-14 11:36:48 +00:00
|
|
|
"net/http"
|
2022-04-19 15:26:21 +00:00
|
|
|
"regexp"
|
2020-02-23 11:35:47 +00:00
|
|
|
"sort"
|
2020-12-14 12:02:57 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
2020-02-23 11:35:47 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
2022-02-03 16:57:36 +00:00
|
|
|
"unsafe"
|
2020-10-20 18:44:59 +00:00
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
2022-11-30 05:22:12 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
2022-06-03 13:38:45 +00:00
|
|
|
"github.com/cespare/xxhash/v2"
|
2020-02-23 11:35:47 +00:00
|
|
|
)
|
|
|
|
|
2021-03-15 19:59:25 +00:00
|
|
|
var maxDroppedTargets = flag.Int("promscrape.maxDroppedTargets", 1000, "The maximum number of droppedTargets to show at /api/v1/targets page. "+
|
2020-11-04 15:12:05 +00:00
|
|
|
"Increase this value if your setup drops more scrape targets during relabeling and you need investigating labels for all the dropped targets. "+
|
|
|
|
"Note that the increased number of tracked dropped targets may result in increased memory usage")
|
|
|
|
|
2020-02-23 11:35:47 +00:00
|
|
|
var tsmGlobal = newTargetStatusMap()
|
|
|
|
|
2022-02-03 16:57:36 +00:00
|
|
|
// WriteTargetResponse serves requests to /target_response?id=<id>
|
|
|
|
//
|
|
|
|
// It fetches response for the given target id and returns it.
|
|
|
|
func WriteTargetResponse(w http.ResponseWriter, r *http.Request) error {
|
|
|
|
targetID := r.FormValue("id")
|
|
|
|
sw := tsmGlobal.getScrapeWorkByTargetID(targetID)
|
|
|
|
if sw == nil {
|
|
|
|
return fmt.Errorf("cannot find target for id=%s", targetID)
|
|
|
|
}
|
|
|
|
data, err := sw.getTargetResponse()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cannot fetch response from id=%s: %w", targetID, err)
|
|
|
|
}
|
|
|
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
|
|
|
_, err = w.Write(data)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-12-14 12:02:57 +00:00
|
|
|
// WriteHumanReadableTargetsStatus writes human-readable status for all the scrape targets to w according to r.
|
|
|
|
func WriteHumanReadableTargetsStatus(w http.ResponseWriter, r *http.Request) {
|
2022-06-06 21:57:05 +00:00
|
|
|
filter := getRequestFilter(r)
|
|
|
|
tsr := tsmGlobal.getTargetsStatusByJob(filter)
|
2020-12-14 12:02:57 +00:00
|
|
|
if accept := r.Header.Get("Accept"); strings.Contains(accept, "text/html") {
|
2020-12-14 11:36:48 +00:00
|
|
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
2022-06-06 21:57:05 +00:00
|
|
|
WriteTargetsResponseHTML(w, tsr, filter)
|
2020-12-14 12:02:57 +00:00
|
|
|
} else {
|
|
|
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
2022-06-06 21:57:05 +00:00
|
|
|
WriteTargetsResponsePlain(w, tsr, filter)
|
2020-12-14 11:36:48 +00:00
|
|
|
}
|
2020-02-23 11:35:47 +00:00
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
// WriteServiceDiscovery writes /service-discovery response to w similar to http://demo.robustperception.io:9090/service-discovery
|
|
|
|
func WriteServiceDiscovery(w http.ResponseWriter, r *http.Request) {
|
|
|
|
filter := getRequestFilter(r)
|
|
|
|
tsr := tsmGlobal.getTargetsStatusByJob(filter)
|
|
|
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
|
|
|
WriteServiceDiscoveryResponse(w, tsr, filter)
|
|
|
|
}
|
|
|
|
|
2020-10-20 18:44:59 +00:00
|
|
|
// WriteAPIV1Targets writes /api/v1/targets to w according to https://prometheus.io/docs/prometheus/latest/querying/api/#targets
|
|
|
|
func WriteAPIV1Targets(w io.Writer, state string) {
|
|
|
|
if state == "" {
|
|
|
|
state = "any"
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `{"status":"success","data":{"activeTargets":`)
|
|
|
|
if state == "active" || state == "any" {
|
|
|
|
tsmGlobal.WriteActiveTargetsJSON(w)
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(w, `[]`)
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `,"droppedTargets":`)
|
|
|
|
if state == "dropped" || state == "any" {
|
|
|
|
droppedTargetsMap.WriteDroppedTargetsJSON(w)
|
|
|
|
} else {
|
|
|
|
fmt.Fprintf(w, `[]`)
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `}}`)
|
|
|
|
}
|
|
|
|
|
2020-02-23 11:35:47 +00:00
|
|
|
type targetStatusMap struct {
|
2021-06-18 07:53:10 +00:00
|
|
|
mu sync.Mutex
|
2022-02-03 16:57:36 +00:00
|
|
|
m map[*scrapeWork]*targetStatus
|
2021-06-18 07:53:10 +00:00
|
|
|
jobNames []string
|
2020-02-23 11:35:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func newTargetStatusMap() *targetStatusMap {
|
|
|
|
return &targetStatusMap{
|
2022-02-03 16:57:36 +00:00
|
|
|
m: make(map[*scrapeWork]*targetStatus),
|
2020-02-23 11:35:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (tsm *targetStatusMap) Reset() {
|
|
|
|
tsm.mu.Lock()
|
2022-02-03 16:57:36 +00:00
|
|
|
tsm.m = make(map[*scrapeWork]*targetStatus)
|
2020-02-23 11:35:47 +00:00
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2021-06-18 07:53:10 +00:00
|
|
|
func (tsm *targetStatusMap) registerJobNames(jobNames []string) {
|
|
|
|
tsm.mu.Lock()
|
|
|
|
tsm.jobNames = append(tsm.jobNames[:0], jobNames...)
|
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-02-03 16:57:36 +00:00
|
|
|
func (tsm *targetStatusMap) Register(sw *scrapeWork) {
|
2020-03-11 01:19:56 +00:00
|
|
|
tsm.mu.Lock()
|
2020-12-17 12:30:33 +00:00
|
|
|
tsm.m[sw] = &targetStatus{
|
|
|
|
sw: sw,
|
2020-03-11 01:19:56 +00:00
|
|
|
}
|
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-02-03 16:57:36 +00:00
|
|
|
func (tsm *targetStatusMap) Unregister(sw *scrapeWork) {
|
2020-03-11 01:19:56 +00:00
|
|
|
tsm.mu.Lock()
|
2020-12-17 12:30:33 +00:00
|
|
|
delete(tsm.m, sw)
|
2020-03-11 01:19:56 +00:00
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
func (tsm *targetStatusMap) Update(sw *scrapeWork, up bool, scrapeTime, scrapeDuration int64, samplesScraped int, err error) {
|
2020-02-23 11:35:47 +00:00
|
|
|
tsm.mu.Lock()
|
2020-12-17 12:30:33 +00:00
|
|
|
ts := tsm.m[sw]
|
|
|
|
if ts == nil {
|
|
|
|
ts = &targetStatus{
|
|
|
|
sw: sw,
|
|
|
|
}
|
|
|
|
tsm.m[sw] = ts
|
2020-02-23 11:35:47 +00:00
|
|
|
}
|
2020-12-17 12:30:33 +00:00
|
|
|
ts.up = up
|
|
|
|
ts.scrapeTime = scrapeTime
|
|
|
|
ts.scrapeDuration = scrapeDuration
|
2021-06-14 11:01:13 +00:00
|
|
|
ts.samplesScraped = samplesScraped
|
2022-02-03 18:22:35 +00:00
|
|
|
ts.scrapesTotal++
|
|
|
|
if !up {
|
|
|
|
ts.scrapesFailed++
|
|
|
|
}
|
2020-12-17 12:30:33 +00:00
|
|
|
ts.err = err
|
2020-02-23 11:35:47 +00:00
|
|
|
tsm.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-02-03 16:57:36 +00:00
|
|
|
func (tsm *targetStatusMap) getScrapeWorkByTargetID(targetID string) *scrapeWork {
|
|
|
|
tsm.mu.Lock()
|
|
|
|
defer tsm.mu.Unlock()
|
|
|
|
for sw := range tsm.m {
|
2022-12-10 10:09:21 +00:00
|
|
|
// The target is uniquely identified by a pointer to its original labels.
|
|
|
|
if getLabelsID(sw.Config.OriginalLabels) == targetID {
|
2022-02-03 16:57:36 +00:00
|
|
|
return sw
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-12-10 10:09:21 +00:00
|
|
|
func getLabelsID(labels *promutils.Labels) string {
|
|
|
|
return fmt.Sprintf("%016x", uintptr(unsafe.Pointer(labels)))
|
2022-02-03 16:57:36 +00:00
|
|
|
}
|
|
|
|
|
2020-07-13 18:52:03 +00:00
|
|
|
// StatusByGroup returns the number of targets with status==up
|
|
|
|
// for the given group name
|
|
|
|
func (tsm *targetStatusMap) StatusByGroup(group string, up bool) int {
|
|
|
|
var count int
|
|
|
|
tsm.mu.Lock()
|
2022-06-06 21:57:05 +00:00
|
|
|
for _, ts := range tsm.m {
|
|
|
|
if ts.sw.ScrapeGroup == group && ts.up == up {
|
2020-07-13 18:52:03 +00:00
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tsm.mu.Unlock()
|
|
|
|
return count
|
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
func (tsm *targetStatusMap) getActiveTargetStatuses() []targetStatus {
|
2020-10-20 18:44:59 +00:00
|
|
|
tsm.mu.Lock()
|
2022-06-06 21:57:05 +00:00
|
|
|
tss := make([]targetStatus, 0, len(tsm.m))
|
|
|
|
for _, ts := range tsm.m {
|
|
|
|
tss = append(tss, *ts)
|
2020-10-20 18:44:59 +00:00
|
|
|
}
|
|
|
|
tsm.mu.Unlock()
|
2022-06-06 21:57:05 +00:00
|
|
|
// Sort discovered targets by __address__ label, so they stay in consistent order across calls
|
|
|
|
sort.Slice(tss, func(i, j int) bool {
|
2022-11-30 05:22:12 +00:00
|
|
|
addr1 := tss[i].sw.Config.OriginalLabels.Get("__address__")
|
|
|
|
addr2 := tss[j].sw.Config.OriginalLabels.Get("__address__")
|
2022-06-06 21:57:05 +00:00
|
|
|
return addr1 < addr2
|
2020-10-20 18:44:59 +00:00
|
|
|
})
|
2022-06-06 21:57:05 +00:00
|
|
|
return tss
|
2022-06-03 13:38:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// WriteActiveTargetsJSON writes `activeTargets` contents to w according to https://prometheus.io/docs/prometheus/latest/querying/api/#targets
|
|
|
|
func (tsm *targetStatusMap) WriteActiveTargetsJSON(w io.Writer) {
|
2022-06-06 21:57:05 +00:00
|
|
|
tss := tsm.getActiveTargetStatuses()
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `[`)
|
2022-06-06 21:57:05 +00:00
|
|
|
for i, ts := range tss {
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `{"discoveredLabels":`)
|
2022-06-06 21:57:05 +00:00
|
|
|
writeLabelsJSON(w, ts.sw.Config.OriginalLabels)
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `,"labels":`)
|
2022-10-07 19:39:28 +00:00
|
|
|
writeLabelsJSON(w, ts.sw.Config.Labels)
|
2022-06-06 21:57:05 +00:00
|
|
|
fmt.Fprintf(w, `,"scrapePool":%q`, ts.sw.Config.Job())
|
|
|
|
fmt.Fprintf(w, `,"scrapeUrl":%q`, ts.sw.Config.ScrapeURL)
|
2020-10-20 18:44:59 +00:00
|
|
|
errMsg := ""
|
2022-06-06 21:57:05 +00:00
|
|
|
if ts.err != nil {
|
|
|
|
errMsg = ts.err.Error()
|
2020-10-20 18:44:59 +00:00
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `,"lastError":%q`, errMsg)
|
2022-06-06 21:57:05 +00:00
|
|
|
fmt.Fprintf(w, `,"lastScrape":%q`, time.Unix(ts.scrapeTime/1000, (ts.scrapeTime%1000)*1e6).Format(time.RFC3339Nano))
|
|
|
|
fmt.Fprintf(w, `,"lastScrapeDuration":%g`, (time.Millisecond * time.Duration(ts.scrapeDuration)).Seconds())
|
|
|
|
fmt.Fprintf(w, `,"lastSamplesScraped":%d`, ts.samplesScraped)
|
2020-10-20 18:44:59 +00:00
|
|
|
state := "up"
|
2022-06-06 21:57:05 +00:00
|
|
|
if !ts.up {
|
2020-10-20 18:44:59 +00:00
|
|
|
state = "down"
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `,"health":%q}`, state)
|
2022-06-06 21:57:05 +00:00
|
|
|
if i+1 < len(tss) {
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `,`)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `]`)
|
|
|
|
}
|
|
|
|
|
2022-11-30 05:22:12 +00:00
|
|
|
func writeLabelsJSON(w io.Writer, labels *promutils.Labels) {
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `{`)
|
2022-11-30 05:22:12 +00:00
|
|
|
labelsList := labels.GetLabels()
|
|
|
|
for i, label := range labelsList {
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, "%q:%q", label.Name, label.Value)
|
2022-11-30 05:22:12 +00:00
|
|
|
if i+1 < len(labelsList) {
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `,`)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `}`)
|
|
|
|
}
|
|
|
|
|
2020-02-23 11:35:47 +00:00
|
|
|
type targetStatus struct {
|
2022-02-03 16:57:36 +00:00
|
|
|
sw *scrapeWork
|
2020-02-23 11:35:47 +00:00
|
|
|
up bool
|
|
|
|
scrapeTime int64
|
|
|
|
scrapeDuration int64
|
2021-06-14 11:01:13 +00:00
|
|
|
samplesScraped int
|
2022-02-03 18:22:35 +00:00
|
|
|
scrapesTotal int
|
|
|
|
scrapesFailed int
|
2020-02-23 11:35:47 +00:00
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
func (ts *targetStatus) getDurationFromLastScrape() time.Duration {
|
|
|
|
return time.Since(time.Unix(ts.scrapeTime/1000, (ts.scrapeTime%1000)*1e6))
|
2020-02-23 11:35:47 +00:00
|
|
|
}
|
2020-10-20 18:44:59 +00:00
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
type droppedTargets struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
m map[uint64]droppedTarget
|
|
|
|
lastCleanupTime uint64
|
|
|
|
}
|
2020-10-20 18:44:59 +00:00
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
type droppedTarget struct {
|
2022-11-30 05:22:12 +00:00
|
|
|
originalLabels *promutils.Labels
|
2022-12-10 10:09:21 +00:00
|
|
|
relabelConfigs *promrelabel.ParsedConfigs
|
2022-06-06 21:57:05 +00:00
|
|
|
deadline uint64
|
|
|
|
}
|
|
|
|
|
2022-12-10 10:09:21 +00:00
|
|
|
func (dt *droppedTargets) getTargetsList() []droppedTarget {
|
2022-06-03 13:38:45 +00:00
|
|
|
dt.mu.Lock()
|
2022-12-10 10:09:21 +00:00
|
|
|
dts := make([]droppedTarget, 0, len(dt.m))
|
2022-06-03 13:38:45 +00:00
|
|
|
for _, v := range dt.m {
|
2022-12-10 10:09:21 +00:00
|
|
|
dts = append(dts, v)
|
2022-06-03 13:38:45 +00:00
|
|
|
}
|
|
|
|
dt.mu.Unlock()
|
2022-06-06 21:57:05 +00:00
|
|
|
// Sort discovered targets by __address__ label, so they stay in consistent order across calls
|
2022-12-10 10:09:21 +00:00
|
|
|
sort.Slice(dts, func(i, j int) bool {
|
|
|
|
addr1 := dts[i].originalLabels.Get("__address__")
|
|
|
|
addr2 := dts[j].originalLabels.Get("__address__")
|
2022-06-06 21:57:05 +00:00
|
|
|
return addr1 < addr2
|
2022-06-03 13:38:45 +00:00
|
|
|
})
|
2022-12-10 10:09:21 +00:00
|
|
|
return dts
|
2020-10-20 18:44:59 +00:00
|
|
|
}
|
|
|
|
|
2022-12-10 10:09:21 +00:00
|
|
|
func (dt *droppedTargets) Register(originalLabels *promutils.Labels, relabelConfigs *promrelabel.ParsedConfigs) {
|
2023-01-05 10:49:26 +00:00
|
|
|
if *dropOriginalLabels {
|
|
|
|
// The originalLabels must be dropped, so do not register it.
|
|
|
|
return
|
|
|
|
}
|
2022-12-10 10:09:21 +00:00
|
|
|
// It is better to have hash collisions instead of spending additional CPU on originalLabels.String() call.
|
2022-04-20 12:21:12 +00:00
|
|
|
key := labelsHash(originalLabels)
|
2020-10-20 18:44:59 +00:00
|
|
|
currentTime := fasttime.UnixTimestamp()
|
|
|
|
dt.mu.Lock()
|
2022-12-10 10:09:21 +00:00
|
|
|
_, ok := dt.m[key]
|
|
|
|
if ok || len(dt.m) < *maxDroppedTargets {
|
2020-11-04 15:03:43 +00:00
|
|
|
dt.m[key] = droppedTarget{
|
|
|
|
originalLabels: originalLabels,
|
2022-12-10 10:09:21 +00:00
|
|
|
relabelConfigs: relabelConfigs,
|
2020-11-04 15:03:43 +00:00
|
|
|
deadline: currentTime + 10*60,
|
|
|
|
}
|
2020-10-20 18:44:59 +00:00
|
|
|
}
|
|
|
|
if currentTime-dt.lastCleanupTime > 60 {
|
|
|
|
for k, v := range dt.m {
|
|
|
|
if currentTime > v.deadline {
|
|
|
|
delete(dt.m, k)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dt.lastCleanupTime = currentTime
|
|
|
|
}
|
|
|
|
dt.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
2022-11-30 05:22:12 +00:00
|
|
|
func labelsHash(labels *promutils.Labels) uint64 {
|
2022-04-20 12:21:12 +00:00
|
|
|
d := xxhashPool.Get().(*xxhash.Digest)
|
2023-01-05 10:49:26 +00:00
|
|
|
for _, label := range labels.GetLabels() {
|
2022-04-20 12:21:12 +00:00
|
|
|
_, _ = d.WriteString(label.Name)
|
|
|
|
_, _ = d.WriteString(label.Value)
|
|
|
|
}
|
|
|
|
h := d.Sum64()
|
|
|
|
d.Reset()
|
|
|
|
xxhashPool.Put(d)
|
|
|
|
return h
|
|
|
|
}
|
|
|
|
|
|
|
|
var xxhashPool = &sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return xxhash.New()
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2020-10-20 18:44:59 +00:00
|
|
|
// WriteDroppedTargetsJSON writes `droppedTargets` contents to w according to https://prometheus.io/docs/prometheus/latest/querying/api/#targets
|
|
|
|
func (dt *droppedTargets) WriteDroppedTargetsJSON(w io.Writer) {
|
2022-12-10 10:09:21 +00:00
|
|
|
dts := dt.getTargetsList()
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `[`)
|
2022-12-10 10:09:21 +00:00
|
|
|
for i, dt := range dts {
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `{"discoveredLabels":`)
|
2022-12-10 10:09:21 +00:00
|
|
|
writeLabelsJSON(w, dt.originalLabels)
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `}`)
|
2022-12-10 10:09:21 +00:00
|
|
|
if i+1 < len(dts) {
|
2020-10-20 18:44:59 +00:00
|
|
|
fmt.Fprintf(w, `,`)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `]`)
|
|
|
|
}
|
|
|
|
|
|
|
|
var droppedTargetsMap = &droppedTargets{
|
2022-04-20 12:21:12 +00:00
|
|
|
m: make(map[uint64]droppedTarget),
|
2020-10-20 18:44:59 +00:00
|
|
|
}
|
2020-12-14 11:36:48 +00:00
|
|
|
|
|
|
|
type jobTargetsStatuses struct {
|
2022-06-06 21:57:05 +00:00
|
|
|
jobName string
|
2020-12-14 11:36:48 +00:00
|
|
|
upCount int
|
|
|
|
targetsTotal int
|
2022-02-03 18:22:35 +00:00
|
|
|
targetsStatus []targetStatus
|
2020-12-14 11:36:48 +00:00
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
func (tsm *targetStatusMap) getTargetsStatusByJob(filter *requestFilter) *targetsStatusResult {
|
2020-12-14 11:36:48 +00:00
|
|
|
byJob := make(map[string][]targetStatus)
|
|
|
|
tsm.mu.Lock()
|
2022-06-06 21:57:05 +00:00
|
|
|
for _, ts := range tsm.m {
|
|
|
|
jobName := ts.sw.Config.jobNameOriginal
|
|
|
|
byJob[jobName] = append(byJob[jobName], *ts)
|
2020-12-14 11:36:48 +00:00
|
|
|
}
|
2021-06-18 07:53:10 +00:00
|
|
|
jobNames := append([]string{}, tsm.jobNames...)
|
2020-12-14 11:36:48 +00:00
|
|
|
tsm.mu.Unlock()
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
var jts []*jobTargetsStatuses
|
|
|
|
for jobName, statuses := range byJob {
|
2020-12-14 11:36:48 +00:00
|
|
|
sort.Slice(statuses, func(i, j int) bool {
|
2022-02-03 16:57:36 +00:00
|
|
|
return statuses[i].sw.Config.ScrapeURL < statuses[j].sw.Config.ScrapeURL
|
2020-12-14 11:36:48 +00:00
|
|
|
})
|
|
|
|
ups := 0
|
2022-02-03 18:22:35 +00:00
|
|
|
var targetsStatuses []targetStatus
|
2020-12-14 11:36:48 +00:00
|
|
|
for _, ts := range statuses {
|
|
|
|
if ts.up {
|
|
|
|
ups++
|
|
|
|
}
|
2022-06-06 21:57:05 +00:00
|
|
|
if filter.showOnlyUnhealthy && ts.up {
|
|
|
|
continue
|
|
|
|
}
|
2022-02-03 18:22:35 +00:00
|
|
|
targetsStatuses = append(targetsStatuses, ts)
|
2020-12-14 11:36:48 +00:00
|
|
|
}
|
2022-06-06 21:57:05 +00:00
|
|
|
jts = append(jts, &jobTargetsStatuses{
|
|
|
|
jobName: jobName,
|
2020-12-14 11:36:48 +00:00
|
|
|
upCount: ups,
|
|
|
|
targetsTotal: len(statuses),
|
|
|
|
targetsStatus: targetsStatuses,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
sort.Slice(jts, func(i, j int) bool {
|
2022-06-06 21:57:05 +00:00
|
|
|
return jts[i].jobName < jts[j].jobName
|
2020-12-14 11:36:48 +00:00
|
|
|
})
|
2021-06-18 07:53:10 +00:00
|
|
|
emptyJobs := getEmptyJobs(jts, jobNames)
|
2022-04-19 15:26:21 +00:00
|
|
|
var err error
|
2022-06-06 21:57:05 +00:00
|
|
|
jts, err = filterTargets(jts, filter.endpointSearch, filter.labelSearch)
|
|
|
|
if len(filter.endpointSearch) > 0 || len(filter.labelSearch) > 0 {
|
2022-04-19 15:26:21 +00:00
|
|
|
// Do not show empty jobs if target filters are set.
|
|
|
|
emptyJobs = nil
|
|
|
|
}
|
2022-12-10 10:09:21 +00:00
|
|
|
dts := droppedTargetsMap.getTargetsList()
|
2022-06-06 21:57:05 +00:00
|
|
|
return &targetsStatusResult{
|
2022-12-10 10:09:21 +00:00
|
|
|
jobTargetsStatuses: jts,
|
|
|
|
droppedTargets: dts,
|
|
|
|
emptyJobs: emptyJobs,
|
|
|
|
err: err,
|
2022-06-06 21:57:05 +00:00
|
|
|
}
|
2022-04-19 15:26:21 +00:00
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
func filterTargetsByEndpoint(jts []*jobTargetsStatuses, searchQuery string) ([]*jobTargetsStatuses, error) {
|
2022-04-19 15:26:21 +00:00
|
|
|
if searchQuery == "" {
|
|
|
|
return jts, nil
|
|
|
|
}
|
|
|
|
finder, err := regexp.Compile(searchQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse %s: %w", searchQuery, err)
|
|
|
|
}
|
2022-06-06 21:57:05 +00:00
|
|
|
var jtsFiltered []*jobTargetsStatuses
|
2022-04-19 15:26:21 +00:00
|
|
|
for _, job := range jts {
|
|
|
|
var tss []targetStatus
|
|
|
|
for _, ts := range job.targetsStatus {
|
|
|
|
if finder.MatchString(ts.sw.Config.ScrapeURL) {
|
|
|
|
tss = append(tss, ts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(tss) == 0 {
|
|
|
|
// Skip jobs with zero targets after filtering, so users could see only the requested targets
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
job.targetsStatus = tss
|
|
|
|
jtsFiltered = append(jtsFiltered, job)
|
|
|
|
}
|
|
|
|
return jtsFiltered, nil
|
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
func filterTargetsByLabels(jts []*jobTargetsStatuses, searchQuery string) ([]*jobTargetsStatuses, error) {
|
2022-04-19 15:26:21 +00:00
|
|
|
if searchQuery == "" {
|
|
|
|
return jts, nil
|
|
|
|
}
|
|
|
|
var ie promrelabel.IfExpression
|
|
|
|
if err := ie.Parse(searchQuery); err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse %s: %w", searchQuery, err)
|
|
|
|
}
|
2022-06-06 21:57:05 +00:00
|
|
|
var jtsFiltered []*jobTargetsStatuses
|
2022-04-19 15:26:21 +00:00
|
|
|
for _, job := range jts {
|
|
|
|
var tss []targetStatus
|
|
|
|
for _, ts := range job.targetsStatus {
|
2022-11-30 05:22:12 +00:00
|
|
|
labels := ts.sw.Config.Labels.GetLabels()
|
|
|
|
if ie.Match(labels) {
|
2022-04-19 15:26:21 +00:00
|
|
|
tss = append(tss, ts)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(tss) == 0 {
|
|
|
|
// Skip jobs with zero targets after filtering, so users could see only the requested targets
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
job.targetsStatus = tss
|
|
|
|
jtsFiltered = append(jtsFiltered, job)
|
|
|
|
}
|
|
|
|
return jtsFiltered, nil
|
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
func filterTargets(jts []*jobTargetsStatuses, endpointQuery, labelQuery string) ([]*jobTargetsStatuses, error) {
|
2022-04-19 15:26:21 +00:00
|
|
|
var err error
|
|
|
|
jts, err = filterTargetsByEndpoint(jts, endpointQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
jts, err = filterTargetsByLabels(jts, labelQuery)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return jts, nil
|
2021-06-18 07:53:10 +00:00
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
func getEmptyJobs(jts []*jobTargetsStatuses, jobNames []string) []string {
|
2021-06-18 07:53:10 +00:00
|
|
|
jobNamesMap := make(map[string]struct{}, len(jobNames))
|
|
|
|
for _, jobName := range jobNames {
|
|
|
|
jobNamesMap[jobName] = struct{}{}
|
|
|
|
}
|
|
|
|
for i := range jts {
|
2022-06-06 21:57:05 +00:00
|
|
|
delete(jobNamesMap, jts[i].jobName)
|
2021-06-18 07:53:10 +00:00
|
|
|
}
|
|
|
|
emptyJobs := make([]string, 0, len(jobNamesMap))
|
|
|
|
for k := range jobNamesMap {
|
|
|
|
emptyJobs = append(emptyJobs, k)
|
|
|
|
}
|
|
|
|
sort.Strings(emptyJobs)
|
|
|
|
return emptyJobs
|
2020-12-14 11:36:48 +00:00
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
type requestFilter struct {
|
|
|
|
showOriginalLabels bool
|
|
|
|
showOnlyUnhealthy bool
|
|
|
|
endpointSearch string
|
|
|
|
labelSearch string
|
|
|
|
}
|
|
|
|
|
|
|
|
func getRequestFilter(r *http.Request) *requestFilter {
|
|
|
|
showOriginalLabels, _ := strconv.ParseBool(r.FormValue("show_original_labels"))
|
|
|
|
showOnlyUnhealthy, _ := strconv.ParseBool(r.FormValue("show_only_unhealthy"))
|
|
|
|
endpointSearch := strings.TrimSpace(r.FormValue("endpoint_search"))
|
|
|
|
labelSearch := strings.TrimSpace(r.FormValue("label_search"))
|
|
|
|
return &requestFilter{
|
|
|
|
showOriginalLabels: showOriginalLabels,
|
|
|
|
showOnlyUnhealthy: showOnlyUnhealthy,
|
|
|
|
endpointSearch: endpointSearch,
|
|
|
|
labelSearch: labelSearch,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type targetsStatusResult struct {
|
2022-12-10 10:09:21 +00:00
|
|
|
jobTargetsStatuses []*jobTargetsStatuses
|
|
|
|
droppedTargets []droppedTarget
|
|
|
|
emptyJobs []string
|
|
|
|
err error
|
2022-06-06 21:57:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type targetLabels struct {
|
2022-12-10 10:09:21 +00:00
|
|
|
up bool
|
|
|
|
originalLabels *promutils.Labels
|
|
|
|
labels *promutils.Labels
|
2022-06-06 21:57:05 +00:00
|
|
|
}
|
|
|
|
type targetLabelsByJob struct {
|
|
|
|
jobName string
|
|
|
|
targets []targetLabels
|
|
|
|
activeTargets int
|
|
|
|
droppedTargets int
|
|
|
|
}
|
|
|
|
|
2023-01-05 11:26:47 +00:00
|
|
|
func getMetricRelabelContextByTargetID(targetID string) (*promrelabel.ParsedConfigs, *promutils.Labels, bool) {
|
|
|
|
tsmGlobal.mu.Lock()
|
|
|
|
defer tsmGlobal.mu.Unlock()
|
|
|
|
|
|
|
|
for sw := range tsmGlobal.m {
|
|
|
|
// The target is uniquely identified by a pointer to its original labels.
|
|
|
|
if getLabelsID(sw.Config.OriginalLabels) == targetID {
|
|
|
|
return sw.Config.MetricRelabelConfigs, sw.Config.Labels, true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, nil, false
|
|
|
|
}
|
|
|
|
|
|
|
|
func getTargetRelabelContextByTargetID(targetID string) (*promrelabel.ParsedConfigs, *promutils.Labels, bool) {
|
2022-12-10 10:09:21 +00:00
|
|
|
var relabelConfigs *promrelabel.ParsedConfigs
|
|
|
|
var labels *promutils.Labels
|
|
|
|
found := false
|
|
|
|
|
|
|
|
// Search for relabel context in tsmGlobal (aka active targets)
|
|
|
|
tsmGlobal.mu.Lock()
|
|
|
|
for sw := range tsmGlobal.m {
|
|
|
|
// The target is uniquely identified by a pointer to its original labels.
|
|
|
|
if getLabelsID(sw.Config.OriginalLabels) == targetID {
|
|
|
|
relabelConfigs = sw.Config.RelabelConfigs
|
|
|
|
labels = sw.Config.OriginalLabels
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
tsmGlobal.mu.Unlock()
|
|
|
|
|
|
|
|
if found {
|
|
|
|
return relabelConfigs, labels, true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for relabel context in droppedTargetsMap (aka deleted targets)
|
|
|
|
droppedTargetsMap.mu.Lock()
|
|
|
|
for _, dt := range droppedTargetsMap.m {
|
|
|
|
if getLabelsID(dt.originalLabels) == targetID {
|
|
|
|
relabelConfigs = dt.relabelConfigs
|
|
|
|
labels = dt.originalLabels
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
droppedTargetsMap.mu.Unlock()
|
|
|
|
|
|
|
|
return relabelConfigs, labels, found
|
|
|
|
}
|
|
|
|
|
2022-06-06 21:57:05 +00:00
|
|
|
func (tsr *targetsStatusResult) getTargetLabelsByJob() []*targetLabelsByJob {
|
|
|
|
byJob := make(map[string]*targetLabelsByJob)
|
|
|
|
for _, jts := range tsr.jobTargetsStatuses {
|
|
|
|
jobName := jts.jobName
|
|
|
|
for _, ts := range jts.targetsStatus {
|
|
|
|
m := byJob[jobName]
|
|
|
|
if m == nil {
|
|
|
|
m = &targetLabelsByJob{
|
|
|
|
jobName: jobName,
|
|
|
|
}
|
|
|
|
byJob[jobName] = m
|
|
|
|
}
|
|
|
|
m.activeTargets++
|
|
|
|
m.targets = append(m.targets, targetLabels{
|
2022-12-10 10:09:21 +00:00
|
|
|
up: ts.up,
|
|
|
|
originalLabels: ts.sw.Config.OriginalLabels,
|
|
|
|
labels: ts.sw.Config.Labels,
|
2022-06-06 21:57:05 +00:00
|
|
|
})
|
|
|
|
}
|
2022-06-03 13:38:45 +00:00
|
|
|
}
|
2022-12-10 10:09:21 +00:00
|
|
|
for _, dt := range tsr.droppedTargets {
|
|
|
|
jobName := dt.originalLabels.Get("job")
|
2022-06-06 21:57:05 +00:00
|
|
|
m := byJob[jobName]
|
|
|
|
if m == nil {
|
|
|
|
m = &targetLabelsByJob{
|
|
|
|
jobName: jobName,
|
|
|
|
}
|
|
|
|
byJob[jobName] = m
|
|
|
|
}
|
|
|
|
m.droppedTargets++
|
|
|
|
m.targets = append(m.targets, targetLabels{
|
2022-12-10 10:09:21 +00:00
|
|
|
originalLabels: dt.originalLabels,
|
2022-06-06 21:57:05 +00:00
|
|
|
})
|
2022-06-03 13:38:45 +00:00
|
|
|
}
|
2022-06-06 21:57:05 +00:00
|
|
|
a := make([]*targetLabelsByJob, 0, len(byJob))
|
|
|
|
for _, tls := range byJob {
|
|
|
|
a = append(a, tls)
|
2022-06-03 13:38:45 +00:00
|
|
|
}
|
2022-06-06 21:57:05 +00:00
|
|
|
sort.Slice(a, func(i, j int) bool {
|
|
|
|
return a[i].jobName < a[j].jobName
|
|
|
|
})
|
|
|
|
return a
|
2020-12-14 11:36:48 +00:00
|
|
|
}
|