mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-02-19 15:30:17 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
6712a8269c
35 changed files with 1579 additions and 1190 deletions
12
README.md
12
README.md
|
@ -1713,6 +1713,16 @@ and [vmrestore](https://docs.victoriametrics.com/vmrestore.html) tools.
|
|||
We also provide [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html) tool for enterprise subscribers.
|
||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
## vmalert
|
||||
|
||||
A single-node VictoriaMetrics is capable of proxying requests to [vmalert](https://docs.victoriametrics.com/vmalert.html)
|
||||
when `-vmalert.proxyURL` flag is set. Use this feature for the following cases:
|
||||
* for proxying requests from [Grafana Alerting UI](https://grafana.com/docs/grafana/latest/alerting/);
|
||||
* for accessing vmalert's UI through single-node VictoriaMetrics Web interface.
|
||||
|
||||
For accessing vmalert's UI through single-node VictoriaMetrics configure `-vmalert.proxyURL` flag and visit
|
||||
`http://<victoriametrics-addr>:8428/vmalert/home` link.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Note, that vendors (including VictoriaMetrics) are often biased when doing such tests. E.g. they try highlighting
|
||||
|
@ -2181,5 +2191,5 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-version
|
||||
Show VictoriaMetrics version
|
||||
-vmalert.proxyURL string
|
||||
Optional URL for proxying alerting API requests from Grafana. For example, if -vmalert.proxyURL is set to http://vmalert:8880 , then requests to /api/v1/rules are proxied to http://vmalert:8880/api/v1/rules
|
||||
Optional URL for proxying requests to vmalert. For example, if -vmalert.proxyURL=http://vmalert:8880 , then alerting API requests such as /api/v1/rules from Grafana will be proxied to http://vmalert:8880/api/v1/rules
|
||||
```
|
||||
|
|
|
@ -212,7 +212,15 @@ func getAuthConfig(argIdx int) (*promauth.Config, error) {
|
|||
InsecureSkipVerify: tlsInsecureSkipVerify.GetOptionalArg(argIdx),
|
||||
}
|
||||
|
||||
authCfg, err := promauth.NewConfig(".", nil, basicAuthCfg, token, tokenFile, oauth2Cfg, tlsCfg, hdrs)
|
||||
opts := &promauth.Options{
|
||||
BasicAuth: basicAuthCfg,
|
||||
BearerToken: token,
|
||||
BearerTokenFile: tokenFile,
|
||||
OAuth2: oauth2Cfg,
|
||||
TLSConfig: tlsCfg,
|
||||
Headers: hdrs,
|
||||
}
|
||||
authCfg, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot populate OAuth2 config for remoteWrite idx: %d, err: %w", argIdx, err)
|
||||
}
|
||||
|
|
|
@ -1,12 +1,19 @@
|
|||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"net/http"
|
||||
"strings"
|
||||
) %}
|
||||
|
||||
{% func Footer() %}
|
||||
{% code pathPrefix := httpserver.GetPathPrefix() %}
|
||||
|
||||
{% func Footer(r *http.Request) %}
|
||||
{%code
|
||||
prefix := "/vmalert/"
|
||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
||||
prefix = ""
|
||||
}
|
||||
%}
|
||||
</main>
|
||||
<script src="{%s pathPrefix %}/static/js/jquery-3.6.0.min.js" type="text/javascript"></script>
|
||||
<script src="{%s pathPrefix %}/static/js/bootstrap.bundle.min.js" type="text/javascript"></script>
|
||||
<script src="{%s prefix %}static/js/jquery-3.6.0.min.js" type="text/javascript"></script>
|
||||
<script src="{%s prefix %}static/js/bootstrap.bundle.min.js" type="text/javascript"></script>
|
||||
<script type="text/javascript">
|
||||
function expandAll() {
|
||||
$('.collapse').addClass('show');
|
||||
|
|
|
@ -6,43 +6,47 @@ package tpl
|
|||
|
||||
//line app/vmalert/tpl/footer.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:5
|
||||
//line app/vmalert/tpl/footer.qtpl:7
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:5
|
||||
//line app/vmalert/tpl/footer.qtpl:7
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:5
|
||||
func StreamFooter(qw422016 *qt422016.Writer) {
|
||||
//line app/vmalert/tpl/footer.qtpl:5
|
||||
//line app/vmalert/tpl/footer.qtpl:7
|
||||
func StreamFooter(qw422016 *qt422016.Writer, r *http.Request) {
|
||||
//line app/vmalert/tpl/footer.qtpl:7
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/footer.qtpl:6
|
||||
pathPrefix := httpserver.GetPathPrefix()
|
||||
`)
|
||||
//line app/vmalert/tpl/footer.qtpl:9
|
||||
prefix := "/vmalert/"
|
||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
||||
prefix = ""
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:6
|
||||
//line app/vmalert/tpl/footer.qtpl:13
|
||||
qw422016.N().S(`
|
||||
</main>
|
||||
<script src="`)
|
||||
//line app/vmalert/tpl/footer.qtpl:8
|
||||
qw422016.E().S(pathPrefix)
|
||||
//line app/vmalert/tpl/footer.qtpl:8
|
||||
qw422016.N().S(`/static/js/jquery-3.6.0.min.js" type="text/javascript"></script>
|
||||
//line app/vmalert/tpl/footer.qtpl:15
|
||||
qw422016.E().S(prefix)
|
||||
//line app/vmalert/tpl/footer.qtpl:15
|
||||
qw422016.N().S(`static/js/jquery-3.6.0.min.js" type="text/javascript"></script>
|
||||
<script src="`)
|
||||
//line app/vmalert/tpl/footer.qtpl:9
|
||||
qw422016.E().S(pathPrefix)
|
||||
//line app/vmalert/tpl/footer.qtpl:9
|
||||
qw422016.N().S(`/static/js/bootstrap.bundle.min.js" type="text/javascript"></script>
|
||||
//line app/vmalert/tpl/footer.qtpl:16
|
||||
qw422016.E().S(prefix)
|
||||
//line app/vmalert/tpl/footer.qtpl:16
|
||||
qw422016.N().S(`static/js/bootstrap.bundle.min.js" type="text/javascript"></script>
|
||||
<script type="text/javascript">
|
||||
function expandAll() {
|
||||
$('.collapse').addClass('show');
|
||||
|
@ -75,31 +79,31 @@ func StreamFooter(qw422016 *qt422016.Writer) {
|
|||
</body>
|
||||
</html>
|
||||
`)
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
func WriteFooter(qq422016 qtio422016.Writer) {
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
func WriteFooter(qq422016 qtio422016.Writer, r *http.Request) {
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
StreamFooter(qw422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
StreamFooter(qw422016, r)
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
func Footer() string {
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
func Footer(r *http.Request) string {
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
WriteFooter(qb422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
WriteFooter(qb422016, r)
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/footer.qtpl:41
|
||||
//line app/vmalert/tpl/footer.qtpl:48
|
||||
}
|
||||
|
|
|
@ -1,14 +1,21 @@
|
|||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"strings"
|
||||
"net/http"
|
||||
"path"
|
||||
) %}
|
||||
|
||||
{% func Header(title string, pages []NavItem) %}
|
||||
{% code pathPrefix := httpserver.GetPathPrefix() %}
|
||||
{% func Header(r *http.Request, navItems []NavItem, title string) %}
|
||||
{%code
|
||||
prefix := "/vmalert/"
|
||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
||||
prefix = ""
|
||||
}
|
||||
%}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>vmalert{% if title != "" %} - {%s title %}{% endif %}</title>
|
||||
<link href="{%s pathPrefix %}/static/css/bootstrap.min.css" rel="stylesheet" />
|
||||
<link href="{%s prefix %}static/css/bootstrap.min.css" rel="stylesheet" />
|
||||
<style>
|
||||
body{
|
||||
min-height: 75rem;
|
||||
|
@ -57,6 +64,37 @@
|
|||
</style>
|
||||
</head>
|
||||
<body>
|
||||
{%= PrintNavItems(title, pages) %}
|
||||
{%= printNavItems(r, title, navItems) %}
|
||||
<main class="px-2">
|
||||
{% endfunc %}
|
||||
|
||||
|
||||
{% code
|
||||
type NavItem struct {
|
||||
Name string
|
||||
Url string
|
||||
}
|
||||
%}
|
||||
|
||||
{% func printNavItems(r *http.Request, current string, items []NavItem) %}
|
||||
{%code
|
||||
prefix := "/vmalert/"
|
||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
||||
prefix = ""
|
||||
}
|
||||
%}
|
||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||
<div class="container-fluid">
|
||||
<div class="collapse navbar-collapse" id="navbarCollapse">
|
||||
<ul class="navbar-nav me-auto mb-2 mb-md-0">
|
||||
{% for _, item := range items %}
|
||||
<li class="nav-item">
|
||||
<a class="nav-link{% if current == item.Name %} active{% endif %}" href="{%s path.Join(prefix,item.Url) %}">
|
||||
{%s item.Name %}
|
||||
</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
{% endfunc %}
|
|
@ -6,51 +6,56 @@ package tpl
|
|||
|
||||
//line app/vmalert/tpl/header.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"net/http"
|
||||
"path"
|
||||
"strings"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:5
|
||||
//line app/vmalert/tpl/header.qtpl:7
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:5
|
||||
//line app/vmalert/tpl/header.qtpl:7
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:5
|
||||
func StreamHeader(qw422016 *qt422016.Writer, title string, pages []NavItem) {
|
||||
//line app/vmalert/tpl/header.qtpl:5
|
||||
//line app/vmalert/tpl/header.qtpl:7
|
||||
func StreamHeader(qw422016 *qt422016.Writer, r *http.Request, navItems []NavItem, title string) {
|
||||
//line app/vmalert/tpl/header.qtpl:7
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:6
|
||||
pathPrefix := httpserver.GetPathPrefix()
|
||||
//line app/vmalert/tpl/header.qtpl:9
|
||||
prefix := "/vmalert/"
|
||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
||||
prefix = ""
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:6
|
||||
//line app/vmalert/tpl/header.qtpl:13
|
||||
qw422016.N().S(`
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>vmalert`)
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
//line app/vmalert/tpl/header.qtpl:17
|
||||
if title != "" {
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
//line app/vmalert/tpl/header.qtpl:17
|
||||
qw422016.N().S(` - `)
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
//line app/vmalert/tpl/header.qtpl:17
|
||||
qw422016.E().S(title)
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
//line app/vmalert/tpl/header.qtpl:17
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
//line app/vmalert/tpl/header.qtpl:17
|
||||
qw422016.N().S(`</title>
|
||||
<link href="`)
|
||||
//line app/vmalert/tpl/header.qtpl:11
|
||||
qw422016.E().S(pathPrefix)
|
||||
//line app/vmalert/tpl/header.qtpl:11
|
||||
qw422016.N().S(`/static/css/bootstrap.min.css" rel="stylesheet" />
|
||||
//line app/vmalert/tpl/header.qtpl:18
|
||||
qw422016.E().S(prefix)
|
||||
//line app/vmalert/tpl/header.qtpl:18
|
||||
qw422016.N().S(`static/css/bootstrap.min.css" rel="stylesheet" />
|
||||
<style>
|
||||
body{
|
||||
min-height: 75rem;
|
||||
|
@ -100,37 +105,124 @@ func StreamHeader(qw422016 *qt422016.Writer, title string, pages []NavItem) {
|
|||
</head>
|
||||
<body>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:60
|
||||
StreamPrintNavItems(qw422016, title, pages)
|
||||
//line app/vmalert/tpl/header.qtpl:60
|
||||
//line app/vmalert/tpl/header.qtpl:67
|
||||
streamprintNavItems(qw422016, r, title, navItems)
|
||||
//line app/vmalert/tpl/header.qtpl:67
|
||||
qw422016.N().S(`
|
||||
<main class="px-2">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
func WriteHeader(qq422016 qtio422016.Writer, title string, pages []NavItem) {
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
func WriteHeader(qq422016 qtio422016.Writer, r *http.Request, navItems []NavItem, title string) {
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
StreamHeader(qw422016, title, pages)
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
StreamHeader(qw422016, r, navItems, title)
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
func Header(title string, pages []NavItem) string {
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
func Header(r *http.Request, navItems []NavItem, title string) string {
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
WriteHeader(qb422016, title, pages)
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
WriteHeader(qb422016, r, navItems, title)
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/header.qtpl:62
|
||||
//line app/vmalert/tpl/header.qtpl:69
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:73
|
||||
type NavItem struct {
|
||||
Name string
|
||||
Url string
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:79
|
||||
func streamprintNavItems(qw422016 *qt422016.Writer, r *http.Request, current string, items []NavItem) {
|
||||
//line app/vmalert/tpl/header.qtpl:79
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:81
|
||||
prefix := "/vmalert/"
|
||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
||||
prefix = ""
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:85
|
||||
qw422016.N().S(`
|
||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||
<div class="container-fluid">
|
||||
<div class="collapse navbar-collapse" id="navbarCollapse">
|
||||
<ul class="navbar-nav me-auto mb-2 mb-md-0">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:90
|
||||
for _, item := range items {
|
||||
//line app/vmalert/tpl/header.qtpl:90
|
||||
qw422016.N().S(`
|
||||
<li class="nav-item">
|
||||
<a class="nav-link`)
|
||||
//line app/vmalert/tpl/header.qtpl:92
|
||||
if current == item.Name {
|
||||
//line app/vmalert/tpl/header.qtpl:92
|
||||
qw422016.N().S(` active`)
|
||||
//line app/vmalert/tpl/header.qtpl:92
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:92
|
||||
qw422016.N().S(`" href="`)
|
||||
//line app/vmalert/tpl/header.qtpl:92
|
||||
qw422016.E().S(path.Join(prefix, item.Url))
|
||||
//line app/vmalert/tpl/header.qtpl:92
|
||||
qw422016.N().S(`">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:93
|
||||
qw422016.E().S(item.Name)
|
||||
//line app/vmalert/tpl/header.qtpl:93
|
||||
qw422016.N().S(`
|
||||
</a>
|
||||
</li>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:96
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:96
|
||||
qw422016.N().S(`
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
func writeprintNavItems(qq422016 qtio422016.Writer, r *http.Request, current string, items []NavItem) {
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
streamprintNavItems(qw422016, r, current, items)
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
func printNavItems(r *http.Request, current string, items []NavItem) string {
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
writeprintNavItems(qb422016, r, current, items)
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
}
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
{% code
|
||||
type NavItem struct {
|
||||
Name string
|
||||
Url string
|
||||
}
|
||||
%}
|
||||
|
||||
{% func PrintNavItems(current string, items []NavItem) %}
|
||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||
<div class="container-fluid">
|
||||
<div class="collapse navbar-collapse" id="navbarCollapse">
|
||||
<ul class="navbar-nav me-auto mb-2 mb-md-0">
|
||||
{% for _, item := range items %}
|
||||
<li class="nav-item">
|
||||
<a class="nav-link{% if current == item.Name %} active{% endif %}" href="{%s item.Url %}">
|
||||
{%s item.Name %}
|
||||
</a>
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
{% endfunc %}
|
||||
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
// Code generated by qtc from "nav.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:1
|
||||
package tpl
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:1
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:1
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:2
|
||||
type NavItem struct {
|
||||
Name string
|
||||
Url string
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:8
|
||||
func StreamPrintNavItems(qw422016 *qt422016.Writer, current string, items []NavItem) {
|
||||
//line app/vmalert/tpl/nav.qtpl:8
|
||||
qw422016.N().S(`
|
||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||
<div class="container-fluid">
|
||||
<div class="collapse navbar-collapse" id="navbarCollapse">
|
||||
<ul class="navbar-nav me-auto mb-2 mb-md-0">
|
||||
`)
|
||||
//line app/vmalert/tpl/nav.qtpl:13
|
||||
for _, item := range items {
|
||||
//line app/vmalert/tpl/nav.qtpl:13
|
||||
qw422016.N().S(`
|
||||
<li class="nav-item">
|
||||
<a class="nav-link`)
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
if current == item.Name {
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
qw422016.N().S(` active`)
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
}
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
qw422016.N().S(`" href="`)
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
qw422016.E().S(item.Url)
|
||||
//line app/vmalert/tpl/nav.qtpl:15
|
||||
qw422016.N().S(`">
|
||||
`)
|
||||
//line app/vmalert/tpl/nav.qtpl:16
|
||||
qw422016.E().S(item.Name)
|
||||
//line app/vmalert/tpl/nav.qtpl:16
|
||||
qw422016.N().S(`
|
||||
</a>
|
||||
</li>
|
||||
`)
|
||||
//line app/vmalert/tpl/nav.qtpl:19
|
||||
}
|
||||
//line app/vmalert/tpl/nav.qtpl:19
|
||||
qw422016.N().S(`
|
||||
</ul>
|
||||
</div>
|
||||
</nav>
|
||||
`)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
func WritePrintNavItems(qq422016 qtio422016.Writer, current string, items []NavItem) {
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
StreamPrintNavItems(qw422016, current, items)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
func PrintNavItems(current string, items []NavItem) string {
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
WritePrintNavItems(qb422016, current, items)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/nav.qtpl:23
|
||||
}
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -24,30 +23,24 @@ var (
|
|||
navItems []tpl.NavItem
|
||||
)
|
||||
|
||||
var (
|
||||
//go:embed static
|
||||
staticFiles embed.FS
|
||||
staticServer = http.FileServer(http.FS(staticFiles))
|
||||
)
|
||||
|
||||
func initLinks() {
|
||||
pathPrefix := httpserver.GetPathPrefix()
|
||||
if pathPrefix == "" {
|
||||
pathPrefix = "/"
|
||||
}
|
||||
apiLinks = [][2]string{
|
||||
{path.Join(pathPrefix, "api/v1/rules"), "list all loaded groups and rules"},
|
||||
{path.Join(pathPrefix, "api/v1/alerts"), "list all active alerts"},
|
||||
{path.Join(pathPrefix, "api/v1/groupID/alertID/status"), "get alert status by ID"},
|
||||
{path.Join(pathPrefix, "flags"), "command-line flags"},
|
||||
{path.Join(pathPrefix, "metrics"), "list of application metrics"},
|
||||
{path.Join(pathPrefix, "-/reload"), "reload configuration"},
|
||||
// api links are relative since they can be used by external clients
|
||||
// such as Grafana and proxied via vmselect.
|
||||
{"api/v1/rules", "list all loaded groups and rules"},
|
||||
{"api/v1/alerts", "list all active alerts"},
|
||||
{"api/v1/groupID/alertID/status", "get alert status by ID"},
|
||||
|
||||
// system links
|
||||
{"/flags", "command-line flags"},
|
||||
{"/metrics", "list of application metrics"},
|
||||
{"/-/reload", "reload configuration"},
|
||||
}
|
||||
navItems = []tpl.NavItem{
|
||||
{Name: "vmalert", Url: path.Join(pathPrefix, "/")},
|
||||
{Name: "Groups", Url: path.Join(pathPrefix, "groups")},
|
||||
{Name: "Alerts", Url: path.Join(pathPrefix, "alerts")},
|
||||
{Name: "Notifiers", Url: path.Join(pathPrefix, "notifiers")},
|
||||
{Name: "vmalert", Url: "home"},
|
||||
{Name: "Groups", Url: "groups"},
|
||||
{Name: "Alerts", Url: "alerts"},
|
||||
{Name: "Notifiers", Url: "notifiers"},
|
||||
{Name: "Docs", Url: "https://docs.victoriametrics.com/vmalert.html"},
|
||||
}
|
||||
}
|
||||
|
@ -56,33 +49,50 @@ type requestHandler struct {
|
|||
m *manager
|
||||
}
|
||||
|
||||
var (
|
||||
//go:embed static
|
||||
staticFiles embed.FS
|
||||
staticHandler = http.FileServer(http.FS(staticFiles))
|
||||
staticServer = http.StripPrefix("/vmalert", staticHandler)
|
||||
)
|
||||
|
||||
func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
||||
once.Do(func() {
|
||||
initLinks()
|
||||
})
|
||||
|
||||
pathPrefix := httpserver.GetPathPrefix()
|
||||
if pathPrefix == "" {
|
||||
pathPrefix = "/"
|
||||
if strings.HasPrefix(r.URL.Path, "/vmalert/static") {
|
||||
staticServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
switch r.URL.Path {
|
||||
case "/":
|
||||
case "/", "/vmalert", "/vmalert/home":
|
||||
if r.Method != "GET" {
|
||||
return false
|
||||
}
|
||||
WriteWelcome(w)
|
||||
WriteWelcome(w, r)
|
||||
return true
|
||||
case "/alerts":
|
||||
WriteListAlerts(w, pathPrefix, rh.groupAlerts())
|
||||
case "/vmalert/alerts":
|
||||
WriteListAlerts(w, r, rh.groupAlerts())
|
||||
return true
|
||||
case "/groups", "/rules":
|
||||
WriteListGroups(w, rh.groups())
|
||||
case "/vmalert/groups":
|
||||
WriteListGroups(w, r, rh.groups())
|
||||
return true
|
||||
case "/notifiers":
|
||||
WriteListTargets(w, notifier.GetTargets())
|
||||
case "/vmalert/notifiers":
|
||||
WriteListTargets(w, r, notifier.GetTargets())
|
||||
return true
|
||||
case "/api/v1/rules":
|
||||
|
||||
// special cases for Grafana requests,
|
||||
// served without `vmalert` prefix:
|
||||
case "/rules":
|
||||
// Grafana makes an extra request to `/rules`
|
||||
// handler in addition to `/api/v1/rules` calls in alerts UI,
|
||||
WriteListGroups(w, r, rh.groups())
|
||||
return true
|
||||
|
||||
case "/vmalert/api/v1/rules", "/api/v1/rules":
|
||||
// path used by Grafana for ng alerting
|
||||
data, err := rh.listGroups()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
|
@ -91,7 +101,8 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(data)
|
||||
return true
|
||||
case "/api/v1/alerts":
|
||||
case "/vmalert/api/v1/alerts", "/api/v1/alerts":
|
||||
// path used by Grafana for ng alerting
|
||||
data, err := rh.listAlerts()
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
|
@ -100,17 +111,14 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(data)
|
||||
return true
|
||||
|
||||
case "/-/reload":
|
||||
logger.Infof("api config reload was called, sending sighup")
|
||||
procutil.SelfSIGHUP()
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return true
|
||||
default:
|
||||
if strings.HasPrefix(r.URL.Path, "/static") {
|
||||
staticServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
default:
|
||||
if !strings.HasSuffix(r.URL.Path, "/status") {
|
||||
return false
|
||||
}
|
||||
|
@ -133,7 +141,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
|
||||
// <groupID>/<alertID>/status
|
||||
WriteAlert(w, pathPrefix, alert)
|
||||
WriteAlert(w, r, alert)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,14 +4,15 @@
|
|||
"time"
|
||||
"sort"
|
||||
"path"
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/tpl"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
) %}
|
||||
|
||||
|
||||
{% func Welcome() %}
|
||||
{%= tpl.Header("vmalert", navItems) %}
|
||||
{% func Welcome(r *http.Request) %}
|
||||
{%= tpl.Header(r, navItems, "vmalert") %}
|
||||
<p>
|
||||
API:<br>
|
||||
{% for _, p := range apiLinks %}
|
||||
|
@ -21,11 +22,11 @@
|
|||
<a href="{%s p %}">{%s p %}</a> - {%s doc %}<br/>
|
||||
{% endfor %}
|
||||
</p>
|
||||
{%= tpl.Footer() %}
|
||||
{%= tpl.Footer(r) %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ListGroups(groups []APIGroup) %}
|
||||
{%= tpl.Header("Groups", navItems) %}
|
||||
{% func ListGroups(r *http.Request, groups []APIGroup) %}
|
||||
{%= tpl.Header(r, navItems, "Groups") %}
|
||||
{% if len(groups) > 0 %}
|
||||
{%code
|
||||
rOk := make(map[string]int)
|
||||
|
@ -112,13 +113,13 @@
|
|||
</div>
|
||||
{% endif %}
|
||||
|
||||
{%= tpl.Footer() %}
|
||||
{%= tpl.Footer(r) %}
|
||||
|
||||
{% endfunc %}
|
||||
|
||||
|
||||
{% func ListAlerts(pathPrefix string, groupAlerts []GroupAlerts) %}
|
||||
{%= tpl.Header("Alerts", navItems) %}
|
||||
{% func ListAlerts(r *http.Request, groupAlerts []GroupAlerts) %}
|
||||
{%= tpl.Header(r, navItems, "Alerts") %}
|
||||
{% if len(groupAlerts) > 0 %}
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
|
@ -182,7 +183,7 @@
|
|||
</td>
|
||||
<td>{%s ar.Value %}</td>
|
||||
<td>
|
||||
<a href="{%s path.Join(pathPrefix, g.ID, ar.ID, "status") %}">Details</a>
|
||||
<a href="{%s path.Join(g.ID, ar.ID, "status") %}">Details</a>
|
||||
</td>
|
||||
</tr>
|
||||
{% endfor %}
|
||||
|
@ -199,12 +200,12 @@
|
|||
</div>
|
||||
{% endif %}
|
||||
|
||||
{%= tpl.Footer() %}
|
||||
{%= tpl.Footer(r) %}
|
||||
|
||||
{% endfunc %}
|
||||
|
||||
{% func ListTargets(targets map[notifier.TargetType][]notifier.Target) %}
|
||||
{%= tpl.Header("Notifiers", navItems) %}
|
||||
{% func ListTargets(r *http.Request, targets map[notifier.TargetType][]notifier.Target) %}
|
||||
{%= tpl.Header(r, navItems, "Notifiers") %}
|
||||
{% if len(targets) > 0 %}
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
|
@ -255,12 +256,12 @@
|
|||
</div>
|
||||
{% endif %}
|
||||
|
||||
{%= tpl.Footer() %}
|
||||
{%= tpl.Footer(r) %}
|
||||
|
||||
{% endfunc %}
|
||||
|
||||
{% func Alert(pathPrefix string, alert *APIAlert) %}
|
||||
{%= tpl.Header("", navItems) %}
|
||||
{% func Alert(r *http.Request, alert *APIAlert) %}
|
||||
{%= tpl.Header(r, navItems, "") %}
|
||||
{%code
|
||||
var labelKeys []string
|
||||
for k := range alert.Labels {
|
||||
|
@ -326,7 +327,7 @@
|
|||
Group
|
||||
</div>
|
||||
<div class="col">
|
||||
<a target="_blank" href="{%s path.Join(pathPrefix,"groups") %}#group-{%s alert.GroupID %}">{%s alert.GroupID %}</a>
|
||||
<a target="_blank" href="/groups#group-{%s alert.GroupID %}">{%s alert.GroupID %}</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -340,7 +341,7 @@
|
|||
</div>
|
||||
</div>
|
||||
</div>
|
||||
{%= tpl.Footer() %}
|
||||
{%= tpl.Footer(r) %}
|
||||
|
||||
{% endfunc %}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,6 +1,7 @@
|
|||
package graphite
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
@ -17,6 +18,8 @@ import (
|
|||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var maxTagValueSuffixes = flag.Int("search.maxTagValueSuffixesPerSearch", 100e3, "The maximum number of tag value suffixes returned from /metrics/find")
|
||||
|
||||
// MetricsFindHandler implements /metrics/find handler.
|
||||
//
|
||||
// See https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find
|
||||
|
@ -219,7 +222,7 @@ func metricsFind(tr storage.TimeRange, label, qHead, qTail string, delimiter byt
|
|||
n := strings.IndexAny(qTail, "*{[")
|
||||
if n < 0 {
|
||||
query := qHead + qTail
|
||||
suffixes, err := netstorage.TagValueSuffixes(nil, tr, label, query, delimiter, deadline)
|
||||
suffixes, err := netstorage.TagValueSuffixes(nil, tr, label, query, delimiter, *maxTagValueSuffixes, deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -239,7 +242,7 @@ func metricsFind(tr storage.TimeRange, label, qHead, qTail string, delimiter byt
|
|||
}
|
||||
if n == len(qTail)-1 && strings.HasSuffix(qTail, "*") {
|
||||
query := qHead + qTail[:len(qTail)-1]
|
||||
suffixes, err := netstorage.TagValueSuffixes(nil, tr, label, query, delimiter, deadline)
|
||||
suffixes, err := netstorage.TagValueSuffixes(nil, tr, label, query, delimiter, *maxTagValueSuffixes, deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ var (
|
|||
"limit is reached; see also -search.maxQueryDuration")
|
||||
resetCacheAuthKey = flag.String("search.resetCacheAuthKey", "", "Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call")
|
||||
logSlowQueryDuration = flag.Duration("search.logSlowQueryDuration", 5*time.Second, "Log queries with execution time exceeding this value. Zero disables slow query logging")
|
||||
vmalertProxyURL = flag.String("vmalert.proxyURL", "", "Optional URL for proxying alerting API requests from Grafana. For example, if -vmalert.proxyURL is set to http://vmalert:8880 , then requests to /api/v1/rules are proxied to http://vmalert:8880/api/v1/rules")
|
||||
vmalertProxyURL = flag.String("vmalert.proxyURL", "", "Optional URL for proxying requests to vmalert. For example, if -vmalert.proxyURL=http://vmalert:8880 , then alerting API requests such as /api/v1/rules from Grafana will be proxied to http://vmalert:8880/api/v1/rules")
|
||||
)
|
||||
|
||||
var slowQueries = metrics.NewCounter(`vm_slow_queries_total`)
|
||||
|
@ -211,6 +211,19 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
fmt.Fprintf(w, "%s", `{}`)
|
||||
return true
|
||||
}
|
||||
|
||||
if strings.HasPrefix(path, "/vmalert") {
|
||||
vmalertRequests.Inc()
|
||||
if len(*vmalertProxyURL) == 0 {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintf(w, "%s", `{"status":"error","msg":"for accessing vmalert flag '-vmalert.proxyURL' must be configured"}`)
|
||||
return true
|
||||
}
|
||||
proxyVMAlertRequests(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "/api/v1/query":
|
||||
queryRequests.Inc()
|
||||
|
@ -402,14 +415,24 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
return true
|
||||
case "/api/v1/rules", "/rules":
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#rules
|
||||
rulesRequests.Inc()
|
||||
mayProxyVMAlertRequests(w, r, `{"status":"success","data":{"groups":[]}}`)
|
||||
if len(*vmalertProxyURL) > 0 {
|
||||
proxyVMAlertRequests(w, r)
|
||||
return true
|
||||
}
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#rules
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprint(w, `{"status":"success","data":{"groups":[]}}`)
|
||||
return true
|
||||
case "/api/v1/alerts", "/alerts":
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#alerts
|
||||
alertsRequests.Inc()
|
||||
mayProxyVMAlertRequests(w, r, `{"status":"success","data":{"alerts":[]}}`)
|
||||
if len(*vmalertProxyURL) > 0 {
|
||||
proxyVMAlertRequests(w, r)
|
||||
return true
|
||||
}
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#alerts
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprint(w, `{"status":"success","data":{"alerts":[]}}`)
|
||||
return true
|
||||
case "/api/v1/metadata":
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#querying-metric-metadata
|
||||
|
@ -551,20 +574,16 @@ var (
|
|||
|
||||
graphiteFunctionsRequests = metrics.NewCounter(`vm_http_requests_total{path="/functions"}`)
|
||||
|
||||
rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`)
|
||||
alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/alerts"}`)
|
||||
vmalertRequests = metrics.NewCounter(`vm_http_requests_total{path="/vmalert"}`)
|
||||
rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`)
|
||||
alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/alerts"}`)
|
||||
|
||||
metadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/metadata"}`)
|
||||
buildInfoRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/buildinfo"}`)
|
||||
queryExemplarsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/query_exemplars"}`)
|
||||
)
|
||||
|
||||
func mayProxyVMAlertRequests(w http.ResponseWriter, r *http.Request, stubResponse string) {
|
||||
if len(*vmalertProxyURL) == 0 {
|
||||
// Return dumb placeholder for https://prometheus.io/docs/prometheus/latest/querying/api/#rules
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
fmt.Fprintf(w, "%s", stubResponse)
|
||||
return
|
||||
}
|
||||
func proxyVMAlertRequests(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err == nil || err == http.ErrAbortHandler {
|
||||
|
|
|
@ -25,11 +25,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 100e3, "The maximum number of tag keys returned from /api/v1/labels")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 100e3, "The maximum number of tag values returned from /api/v1/label/<label_name>/values")
|
||||
maxTagValueSuffixesPerSearch = flag.Int("search.maxTagValueSuffixesPerSearch", 100e3, "The maximum number of tag value suffixes returned from /metrics/find")
|
||||
maxSamplesPerSeries = flag.Int("search.maxSamplesPerSeries", 30e6, "The maximum number of raw samples a single query can scan per each time series. This option allows limiting memory usage")
|
||||
maxSamplesPerQuery = flag.Int("search.maxSamplesPerQuery", 1e9, "The maximum number of raw samples a single query can process across all time series. This protects from heavy queries, which select unexpectedly high number of raw samples. See also -search.maxSamplesPerSeries")
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 100e3, "The maximum number of tag keys returned from /api/v1/labels")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 100e3, "The maximum number of tag values returned from /api/v1/label/<label_name>/values")
|
||||
maxSamplesPerSeries = flag.Int("search.maxSamplesPerSeries", 30e6, "The maximum number of raw samples a single query can scan per each time series. This option allows limiting memory usage")
|
||||
maxSamplesPerQuery = flag.Int("search.maxSamplesPerQuery", 1e9, "The maximum number of raw samples a single query can process across all time series. This protects from heavy queries, which select unexpectedly high number of raw samples. See also -search.maxSamplesPerSeries")
|
||||
)
|
||||
|
||||
// Result is a single timeseries result.
|
||||
|
@ -639,15 +638,12 @@ func (sbh *sortBlocksHeap) Pop() interface{} {
|
|||
func DeleteSeries(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline searchutils.Deadline) (int, error) {
|
||||
qt = qt.NewChild("delete series: %s", sq)
|
||||
defer qt.Done()
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
tr := sq.GetTimeRange()
|
||||
tfss, err := setupTfss(qt, tr, sq.TagFilterss, sq.MaxMetrics, deadline)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return vmstorage.DeleteMetrics(qt, tfss)
|
||||
return vmstorage.DeleteSeries(qt, tfss)
|
||||
}
|
||||
|
||||
// LabelNames returns label names matching the given sq until the given deadline.
|
||||
|
@ -660,10 +656,7 @@ func LabelNames(qt *querytracer.Tracer, sq *storage.SearchQuery, maxLabelNames i
|
|||
if maxLabelNames > *maxTagKeysPerSearch || maxLabelNames <= 0 {
|
||||
maxLabelNames = *maxTagKeysPerSearch
|
||||
}
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
tr := sq.GetTimeRange()
|
||||
tfss, err := setupTfss(qt, tr, sq.TagFilterss, sq.MaxMetrics, deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -745,10 +738,7 @@ func LabelValues(qt *querytracer.Tracer, labelName string, sq *storage.SearchQue
|
|||
if maxLabelValues > *maxTagValuesPerSearch || maxLabelValues <= 0 {
|
||||
maxLabelValues = *maxTagValuesPerSearch
|
||||
}
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
tr := sq.GetTimeRange()
|
||||
tfss, err := setupTfss(qt, tr, sq.TagFilterss, sq.MaxMetrics, deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -823,21 +813,21 @@ func GraphiteTagValues(qt *querytracer.Tracer, tagName, filter string, limit int
|
|||
// TagValueSuffixes returns tag value suffixes for the given tagKey and the given tagValuePrefix.
|
||||
//
|
||||
// It can be used for implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find
|
||||
func TagValueSuffixes(qt *querytracer.Tracer, tr storage.TimeRange, tagKey, tagValuePrefix string, delimiter byte, deadline searchutils.Deadline) ([]string, error) {
|
||||
qt = qt.NewChild("get tag value suffixes for tagKey=%s, tagValuePrefix=%s, timeRange=%s", tagKey, tagValuePrefix, &tr)
|
||||
func TagValueSuffixes(qt *querytracer.Tracer, tr storage.TimeRange, tagKey, tagValuePrefix string, delimiter byte, maxSuffixes int, deadline searchutils.Deadline) ([]string, error) {
|
||||
qt = qt.NewChild("get tag value suffixes for tagKey=%s, tagValuePrefix=%s, maxSuffixes=%d, timeRange=%s", tagKey, tagValuePrefix, maxSuffixes, &tr)
|
||||
defer qt.Done()
|
||||
if deadline.Exceeded() {
|
||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||
}
|
||||
suffixes, err := vmstorage.SearchTagValueSuffixes(qt, tr, []byte(tagKey), []byte(tagValuePrefix), delimiter, *maxTagValueSuffixesPerSearch, deadline.Deadline())
|
||||
suffixes, err := vmstorage.SearchTagValueSuffixes(qt, tr, tagKey, tagValuePrefix, delimiter, maxSuffixes, deadline.Deadline())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during search for suffixes for tagKey=%q, tagValuePrefix=%q, delimiter=%c on time range %s: %w",
|
||||
tagKey, tagValuePrefix, delimiter, tr.String(), err)
|
||||
}
|
||||
if len(suffixes) >= *maxTagValueSuffixesPerSearch {
|
||||
if len(suffixes) >= maxSuffixes {
|
||||
return nil, fmt.Errorf("more than -search.maxTagValueSuffixesPerSearch=%d tag value suffixes found for tagKey=%q, tagValuePrefix=%q, delimiter=%c on time range %s; "+
|
||||
"either narrow down the query or increase -search.maxTagValueSuffixesPerSearch command-line flag value",
|
||||
*maxTagValueSuffixesPerSearch, tagKey, tagValuePrefix, delimiter, tr.String())
|
||||
maxSuffixes, tagKey, tagValuePrefix, delimiter, tr.String())
|
||||
}
|
||||
return suffixes, nil
|
||||
}
|
||||
|
@ -851,10 +841,7 @@ func TSDBStatus(qt *querytracer.Tracer, sq *storage.SearchQuery, focusLabel stri
|
|||
if deadline.Exceeded() {
|
||||
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
|
||||
}
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
tr := sq.GetTimeRange()
|
||||
tfss, err := setupTfss(qt, tr, sq.TagFilterss, sq.MaxMetrics, deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -908,10 +895,7 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
|
|||
if deadline.Exceeded() {
|
||||
return fmt.Errorf("timeout exceeded before starting data export: %s", deadline.String())
|
||||
}
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
tr := sq.GetTimeRange()
|
||||
if err := vmstorage.CheckTimeRange(tr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1024,10 +1008,7 @@ func SearchMetricNames(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline
|
|||
}
|
||||
|
||||
// Setup search.
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
tr := sq.GetTimeRange()
|
||||
if err := vmstorage.CheckTimeRange(tr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1056,10 +1037,7 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
|
|||
}
|
||||
|
||||
// Setup search.
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
tr := sq.GetTimeRange()
|
||||
if err := vmstorage.CheckTimeRange(tr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -3839,14 +3839,14 @@ func TestExecSuccess(t *testing.T) {
|
|||
})
|
||||
t.Run(`histogram_quantile(nan-bucket-count-some)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `histogram_quantile(0.6,
|
||||
q := `round(histogram_quantile(0.6,
|
||||
label_set(90, "foo", "bar", "le", "10")
|
||||
or label_set(NaN, "foo", "bar", "le", "30")
|
||||
or label_set(300, "foo", "bar", "le", "+Inf")
|
||||
)`
|
||||
),0.01)`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{30, 30, 30, 30, 30, 30},
|
||||
Values: []float64{18.57, 18.57, 18.57, 18.57, 18.57, 18.57},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.Tags = []storage.Tag{{
|
||||
|
|
|
@ -994,16 +994,32 @@ func groupLeTimeseries(tss []*timeseries) map[string][]leTimeseries {
|
|||
}
|
||||
|
||||
func fixBrokenBuckets(i int, xss []leTimeseries) {
|
||||
// Fix broken buckets.
|
||||
// They are already sorted by le, so their values must be in ascending order,
|
||||
// Buckets are already sorted by le, so their values must be in ascending order,
|
||||
// since the next bucket includes all the previous buckets.
|
||||
vPrev := float64(0)
|
||||
for _, xs := range xss {
|
||||
v := xs.ts.Values[i]
|
||||
if v < vPrev || math.IsNaN(v) {
|
||||
xs.ts.Values[i] = vPrev
|
||||
// If the next bucket has lower value than the current bucket,
|
||||
// then the current bucket must be substituted with the next bucket value.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2819
|
||||
if len(xss) < 2 {
|
||||
return
|
||||
}
|
||||
for j := len(xss) - 1; j >= 0; j-- {
|
||||
v := xss[j].ts.Values[i]
|
||||
if !math.IsNaN(v) {
|
||||
j++
|
||||
for j < len(xss) {
|
||||
xss[j].ts.Values[i] = v
|
||||
j++
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
vNext := xss[len(xss)-1].ts.Values[i]
|
||||
for j := len(xss) - 2; j >= 0; j-- {
|
||||
v := xss[j].ts.Values[i]
|
||||
if math.IsNaN(v) || v > vNext {
|
||||
xss[j].ts.Values[i] = vNext
|
||||
} else {
|
||||
vPrev = v
|
||||
vNext = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
34
app/vmselect/promql/transform_test.go
Normal file
34
app/vmselect/promql/transform_test.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFixBrokenBuckets(t *testing.T) {
|
||||
f := func(values, expectedResult []float64) {
|
||||
t.Helper()
|
||||
xss := make([]leTimeseries, len(values))
|
||||
for i, v := range values {
|
||||
xss[i].ts = ×eries{
|
||||
Values: []float64{v},
|
||||
}
|
||||
}
|
||||
fixBrokenBuckets(0, xss)
|
||||
result := make([]float64, len(values))
|
||||
for i, xs := range xss {
|
||||
result[i] = xs.ts.Values[0]
|
||||
}
|
||||
if !reflect.DeepEqual(result, expectedResult) {
|
||||
t.Fatalf("unexpected result for values=%v\ngot\n%v\nwant\n%v", values, result, expectedResult)
|
||||
}
|
||||
}
|
||||
f(nil, []float64{})
|
||||
f([]float64{1}, []float64{1})
|
||||
f([]float64{1, 2}, []float64{1, 2})
|
||||
f([]float64{2, 1}, []float64{1, 1})
|
||||
f([]float64{1, 2, 3, nan, nan}, []float64{1, 2, 3, 3, 3})
|
||||
f([]float64{5, 1, 2, 3, nan}, []float64{1, 1, 2, 3, 3})
|
||||
f([]float64{1, 5, 2, nan, 6, 3}, []float64{1, 2, 2, 3, 3, 3})
|
||||
f([]float64{5, 10, 4, 3}, []float64{3, 3, 3, 3})
|
||||
}
|
|
@ -165,12 +165,12 @@ func RegisterMetricNames(qt *querytracer.Tracer, mrs []storage.MetricRow) error
|
|||
return err
|
||||
}
|
||||
|
||||
// DeleteMetrics deletes metrics matching tfss.
|
||||
// DeleteSeries deletes series matching tfss.
|
||||
//
|
||||
// Returns the number of deleted metrics.
|
||||
func DeleteMetrics(qt *querytracer.Tracer, tfss []*storage.TagFilters) (int, error) {
|
||||
// Returns the number of deleted series.
|
||||
func DeleteSeries(qt *querytracer.Tracer, tfss []*storage.TagFilters) (int, error) {
|
||||
WG.Add(1)
|
||||
n, err := Storage.DeleteMetrics(qt, tfss)
|
||||
n, err := Storage.DeleteSeries(qt, tfss)
|
||||
WG.Done()
|
||||
return n, err
|
||||
}
|
||||
|
@ -203,7 +203,7 @@ func SearchLabelValuesWithFiltersOnTimeRange(qt *querytracer.Tracer, labelName s
|
|||
// SearchTagValueSuffixes returns all the tag value suffixes for the given tagKey and tagValuePrefix on the given tr.
|
||||
//
|
||||
// This allows implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find or similar APIs.
|
||||
func SearchTagValueSuffixes(qt *querytracer.Tracer, tr storage.TimeRange, tagKey, tagValuePrefix []byte, delimiter byte, maxTagValueSuffixes int, deadline uint64) ([]string, error) {
|
||||
func SearchTagValueSuffixes(qt *querytracer.Tracer, tr storage.TimeRange, tagKey, tagValuePrefix string, delimiter byte, maxTagValueSuffixes int, deadline uint64) ([]string, error) {
|
||||
WG.Add(1)
|
||||
suffixes, err := Storage.SearchTagValueSuffixes(qt, tr, tagKey, tagValuePrefix, delimiter, maxTagValueSuffixes, deadline)
|
||||
WG.Done()
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -18,9 +18,11 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
**Update notes:** this release introduces backwards-incompatible changes to `vm_partial_results_total` metric by changing its labels to be consistent with `vm_requests_total` metric.
|
||||
If you use alerting rules or Grafana dashboards, which rely on this metric, then they must be updated. The official dashboards for VictoriaMetrics don't use this metric.
|
||||
|
||||
* FEATURE: [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html): allow accessing [vmalert's](https://docs.victoriametrics.com/vmalert.html) UI when `-vmalert.proxyURL` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#vmalert) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2825).
|
||||
* FEATURE: add `-search.setLookbackToStep` command-line flag, which enables InfluxDB-like gap filling during querying. See [these docs](https://docs.victoriametrics.com/guides/migrate-from-influx.html) for details.
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add an UI for [query tracing](https://docs.victoriametrics.com/#query-tracing). It can be enabled by clicking `enable query tracing` checkbox and re-running the query. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2703).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `-remoteWrite.headers` command-line option for specifying optional HTTP headers to send to the configured `-remoteWrite.url`. For example, `-remoteWrite.headers='Foo:Bar^^Baz:x'` would send `Foo: Bar` and `Baz: x` HTTP headers with every request to `-remoteWrite.url`. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2805).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): push per-target `scrape_samples_limit` metric to the cofigured `-remoteWrite.url` if `sample_limit` option is set for this target in [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config). See [this feature request](https://github.com/VictoriaMetrics/operator/issues/497).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add ability to specify additional HTTP headers to send to scrape targets via `headers` section in `scrape_configs`. This can be used when the scrape target requires custom authorization and authentication like in [this stackoverflow question](https://stackoverflow.com/questions/66032498/prometheus-scrape-metric-with-custom-header). For example, the following config instructs sending `My-Auth: top-secret` and `TenantID: FooBar` headers with each request to `http://host123:8080/metrics`:
|
||||
|
||||
```yaml
|
||||
|
@ -43,6 +45,7 @@ scrape_configs:
|
|||
* `vm_rows_read_per_series` - the number of raw samples read per queried series.
|
||||
* `vm_series_read_per_query` - the number of series read per query.
|
||||
|
||||
* BUGFIX: properly register time series in per-day inverted index. Previously some series could miss registration in the per-day inverted index. This could result in missing time series during querying. The issue has been introduced in [v1.78.0](#v1780).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow using `__name__` label (aka [metric name](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)) in alerting annotations. For example:
|
||||
|
||||
{% raw %}
|
||||
|
@ -53,6 +56,7 @@ scrape_configs:
|
|||
|
||||
* BUGFIX: limit max memory occupied by the cache, which stores parsed regular expressions. Previously too long regular expressions passed in [MetricsQL queries](https://docs.victoriametrics.com/MetricsQL.html) could result in big amounts of used memory (e.g. multiple of gigabytes). Now the max cache size for parsed regexps is limited to a a few megabytes.
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly handle partial counter resets when calculating [rate](https://docs.victoriametrics.com/MetricsQL.html#rate), [irate](https://docs.victoriametrics.com/MetricsQL.html#irate) and [increase](https://docs.victoriametrics.com/MetricsQL.html#increase) functions. Previously these functions could return zero values after partial counter resets until the counter increases to the last value before partial counter reset. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2787).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate [histogram_quantile](https://docs.victoriametrics.com/MetricsQL.html#histogram_quantile) over Prometheus buckets with unexpected values. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2819).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): make sure that [stale markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) are generated with the actual timestamp when unsuccessful scrape occurs. This should prevent from possible time series overlap on scrape target restart in dynmaic envirnoments such as Kubernetes.
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly reload changed `-promscrape.config` file when `-promscrape.configCheckInterval` option is set. The changed config file wasn't reloaded in this case since [v1.69.0](#v1690). See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2786). Thanks to @ttyv for the fix.
|
||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): assume that the response is complete if `-search.denyPartialResponse` is enabled and up to `-replicationFactor - 1` `vmstorage` nodes are unavailable. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1767).
|
||||
|
@ -60,12 +64,11 @@ scrape_configs:
|
|||
|
||||
## [v1.78.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.78.0)
|
||||
|
||||
**Warning (03-07-2022):** some users report issues with incomplete data returned from queries for cluster version.
|
||||
The problem is currently under investigation. This message will be updated as soon as the problem
|
||||
will be localized and solved. Meanwhile, we recommend postpone updating to 1.78.0.
|
||||
|
||||
Released at 20-06-2022
|
||||
|
||||
**Warning (03-07-2022):** VictoriaMetrics v1.78.0 contains a bug, which may result in missing time series during queries.
|
||||
It is recommended downgrading to [v1.77.2](#v1772) until the bugfix release.
|
||||
|
||||
**Update notes:** this release introduces backwards-incompatible changes to communication protocol between `vmselect` and `vmstorage` nodes in cluster version of VictoriaMetrics because of added [query tracing](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#query-tracing), so `vmselect` and `vmstorage` nodes will experience communication errors and read requests to `vmselect` will fail until the upgrade is complete. These errors will stop after all the `vmselect` and `vmstorage` nodes are updated to the new release. It is safe to downgrade to previous releases.
|
||||
|
||||
* SECURITY: add `-flagsAuthKey` command-line flag for protecting `/flags` endpoint from unauthorized access. Though this endpoint already hides values for command-line flags with `key` and `password` substrings in their names, other sensitive information could be exposed there. See [This issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2753).
|
||||
|
|
|
@ -256,6 +256,9 @@ See [trobuleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html
|
|||
Note that the `delete_series` handler should be used only in exceptional cases such as deletion of accidentally ingested incorrect time series. It shouldn't
|
||||
be used on a regular basis, since it carries non-zero overhead.
|
||||
|
||||
- URL for accessing [vmalert's](https://docs.victoriametrics.com/vmalert.html) UI: `http://<vmselect>:8481/select/<accountID>/prometheus/vmalert/home`.
|
||||
This URL works only when `-vmalert.proxyURL` flag is set. See more about vmalert [here](#vmalert).
|
||||
|
||||
- `vmstorage` nodes provide the following HTTP endpoints on `8482` port:
|
||||
- `/internal/force_merge` - initiate [forced compactions](https://docs.victoriametrics.com/#forced-merge) on the given `vmstorage` node.
|
||||
- `/snapshot/create` - create [instant snapshot](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282),
|
||||
|
@ -468,6 +471,17 @@ curl http://0.0.0.0:8480/debug/pprof/heap > mem.pprof
|
|||
|
||||
</div>
|
||||
|
||||
## vmalert
|
||||
|
||||
vmselect is capable of proxying requests to [vmalert](https://docs.victoriametrics.com/vmalert.html)
|
||||
when `-vmalert.proxyURL` flag is set. Use this feature for the following cases:
|
||||
* for proxying requests from [Grafana Alerting UI](https://grafana.com/docs/grafana/latest/alerting/);
|
||||
* for accessing vmalert's UI through vmselect's Web interface.
|
||||
|
||||
For accessing vmalert's UI through vmselect configure `-vmalert.proxyURL` flag and visit
|
||||
`http://<vmselect>:8481/select/<accountID>/prometheus/vmalert/home` link.
|
||||
|
||||
|
||||
## Community and contributions
|
||||
|
||||
We are open to third-party pull requests provided they follow the [KISS design principle](https://en.wikipedia.org/wiki/KISS_principle):
|
||||
|
@ -821,7 +835,7 @@ Below is the output for `/path/to/vmselect -help`:
|
|||
-version
|
||||
Show VictoriaMetrics version
|
||||
-vmalert.proxyURL string
|
||||
Optional URL for proxying alerting API requests from Grafana. For example, if -vmalert.proxyURL is set to http://vmalert:8880 , then requests to /api/v1/rules are proxied to http://vmalert:8880/api/v1/rules
|
||||
Optional URL for proxying requests to vmalert. For example, if -vmalert.proxyURL=http://vmalert:8880 , then alerting API requests such as /api/v1/rules from Grafana will be proxied to http://vmalert:8880/api/v1/rules
|
||||
-vmstorageDialTimeout duration
|
||||
Timeout for establishing RPC connections from vmselect to vmstorage (default 5s)
|
||||
```
|
||||
|
|
|
@ -1713,6 +1713,16 @@ and [vmrestore](https://docs.victoriametrics.com/vmrestore.html) tools.
|
|||
We also provide [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html) tool for enterprise subscribers.
|
||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
## vmalert
|
||||
|
||||
A single-node VictoriaMetrics is capable of proxying requests to [vmalert](https://docs.victoriametrics.com/vmalert.html)
|
||||
when `-vmalert.proxyURL` flag is set. Use this feature for the following cases:
|
||||
* for proxying requests from [Grafana Alerting UI](https://grafana.com/docs/grafana/latest/alerting/);
|
||||
* for accessing vmalert's UI through single-node VictoriaMetrics Web interface.
|
||||
|
||||
For accessing vmalert's UI through single-node VictoriaMetrics configure `-vmalert.proxyURL` flag and visit
|
||||
`http://<victoriametrics-addr>:8428/vmalert/home` link.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Note, that vendors (including VictoriaMetrics) are often biased when doing such tests. E.g. they try highlighting
|
||||
|
@ -2181,5 +2191,5 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-version
|
||||
Show VictoriaMetrics version
|
||||
-vmalert.proxyURL string
|
||||
Optional URL for proxying alerting API requests from Grafana. For example, if -vmalert.proxyURL is set to http://vmalert:8880 , then requests to /api/v1/rules are proxied to http://vmalert:8880/api/v1/rules
|
||||
Optional URL for proxying requests to vmalert. For example, if -vmalert.proxyURL=http://vmalert:8880 , then alerting API requests such as /api/v1/rules from Grafana will be proxied to http://vmalert:8880/api/v1/rules
|
||||
```
|
||||
|
|
|
@ -1717,6 +1717,16 @@ and [vmrestore](https://docs.victoriametrics.com/vmrestore.html) tools.
|
|||
We also provide [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanager.html) tool for enterprise subscribers.
|
||||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
## vmalert
|
||||
|
||||
A single-node VictoriaMetrics is capable of proxying requests to [vmalert](https://docs.victoriametrics.com/vmalert.html)
|
||||
when `-vmalert.proxyURL` flag is set. Use this feature for the following cases:
|
||||
* for proxying requests from [Grafana Alerting UI](https://grafana.com/docs/grafana/latest/alerting/);
|
||||
* for accessing vmalert's UI through single-node VictoriaMetrics Web interface.
|
||||
|
||||
For accessing vmalert's UI through single-node VictoriaMetrics configure `-vmalert.proxyURL` flag and visit
|
||||
`http://<victoriametrics-addr>:8428/vmalert/home` link.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Note, that vendors (including VictoriaMetrics) are often biased when doing such tests. E.g. they try highlighting
|
||||
|
@ -2185,5 +2195,5 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-version
|
||||
Show VictoriaMetrics version
|
||||
-vmalert.proxyURL string
|
||||
Optional URL for proxying alerting API requests from Grafana. For example, if -vmalert.proxyURL is set to http://vmalert:8880 , then requests to /api/v1/rules are proxied to http://vmalert:8880/api/v1/rules
|
||||
Optional URL for proxying requests to vmalert. For example, if -vmalert.proxyURL=http://vmalert:8880 , then alerting API requests such as /api/v1/rules from Grafana will be proxied to http://vmalert:8880/api/v1/rules
|
||||
```
|
||||
|
|
|
@ -4,21 +4,18 @@ sort: 2
|
|||
|
||||
# Additional Scrape Configuration
|
||||
|
||||
AdditionalScrapeConfigs allows specifying a key of a Secret containing
|
||||
additional Prometheus scrape configurations or define scrape configuration at CRD spec.
|
||||
Scrape configurations specified
|
||||
are appended to the configurations generated by the operator.
|
||||
AdditionalScrapeConfigs is an additional way to add scrape targets in VMAgent CRD.
|
||||
There are two options for adding targets into VMAgent: inline configuration into CRD or defining it as a Kubernetes Secret.
|
||||
|
||||
Job configurations specified must have the form as specified in the official
|
||||
[Prometheus documentation](
|
||||
https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
||||
As scrape configs are appended, the user is responsible to make sure it is
|
||||
valid.
|
||||
No validation happens during the creation of configuration. However, you must validate job specs, and it must follow job spec configuration.
|
||||
Please check official Prometheus documentation as references.
|
||||
[Prometheus documentation](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config).
|
||||
|
||||
## Creating an additional configuration inline at CRD
|
||||
## Inline Additional Scrape Configuration in VMAgent CRD
|
||||
|
||||
You need to add scrape configuration directly to the vmagent spec.inlineScrapeConfig. It is raw text in YAML format.
|
||||
See example below
|
||||
|
||||
Add needed scrape configuration directly to the vmagent spec.inlineScrapeConfig
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: operator.victoriametrics.com/v1beta1
|
||||
|
@ -38,14 +35,14 @@ spec:
|
|||
EOF
|
||||
```
|
||||
|
||||
NOTE: Do not use password and tokens with inlineScrapeConfig.
|
||||
**Note**: Do not use passwords and tokens with inlineScrapeConfig use Secret instead of
|
||||
|
||||
|
||||
## Creating an additional configuration with secret
|
||||
## Define Additional Scrape Configuration as a Kubernetes Secret
|
||||
|
||||
First, you will need to create the additional configuration.
|
||||
Below we are making a simple "prometheus" config. Name this
|
||||
`prometheus-additional.yaml` or something similar.
|
||||
You need to define Kubernetes Secret with a key.
|
||||
|
||||
The key is `prometheus-additional.yaml` in the example below
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
|
@ -61,7 +58,7 @@ stringData:
|
|||
EOF
|
||||
```
|
||||
|
||||
Finally, reference this additional configuration in your `vmagent.yaml` CRD.
|
||||
After that, you need to specify the secret's name and key in VMAgent CRD in `additionalScrapeConfigs` section
|
||||
|
||||
```yaml
|
||||
cat <<EOF | kubectl apply -f -
|
||||
|
@ -81,5 +78,5 @@ spec:
|
|||
EOF
|
||||
```
|
||||
|
||||
NOTE: Use only one secret for ALL additional scrape configurations.
|
||||
**Note**: You can specify only one Secret in the VMAgent CRD configuration so use it for all additional scrape configurations.
|
||||
|
||||
|
|
|
@ -81,16 +81,16 @@ type TLSConfig struct {
|
|||
MinVersion string `yaml:"min_version,omitempty"`
|
||||
}
|
||||
|
||||
// String returns human-readable representation of tlsConfig
|
||||
func (tlsConfig *TLSConfig) String() string {
|
||||
if tlsConfig == nil {
|
||||
// String returns human-readable representation of tc
|
||||
func (tc *TLSConfig) String() string {
|
||||
if tc == nil {
|
||||
return ""
|
||||
}
|
||||
caHash := xxhash.Sum64(tlsConfig.CA)
|
||||
certHash := xxhash.Sum64(tlsConfig.Cert)
|
||||
keyHash := xxhash.Sum64(tlsConfig.Key)
|
||||
caHash := xxhash.Sum64(tc.CA)
|
||||
certHash := xxhash.Sum64(tc.Cert)
|
||||
keyHash := xxhash.Sum64(tc.Key)
|
||||
return fmt.Sprintf("hash(ca)=%d, ca_file=%q, hash(cert)=%d, cert_file=%q, hash(key)=%d, key_file=%q, server_name=%q, insecure_skip_verify=%v, min_version=%q",
|
||||
caHash, tlsConfig.CAFile, certHash, tlsConfig.CertFile, keyHash, tlsConfig.KeyFile, tlsConfig.ServerName, tlsConfig.InsecureSkipVerify, tlsConfig.MinVersion)
|
||||
caHash, tc.CAFile, certHash, tc.CertFile, keyHash, tc.KeyFile, tc.ServerName, tc.InsecureSkipVerify, tc.MinVersion)
|
||||
}
|
||||
|
||||
// Authorization represents generic authorization config.
|
||||
|
@ -198,7 +198,11 @@ func newOAuth2ConfigInternal(baseDir string, o *OAuth2Config) (*oauth2ConfigInte
|
|||
}
|
||||
oi.cfg.ClientSecret = secret
|
||||
}
|
||||
ac, err := o.NewConfig(baseDir)
|
||||
opts := &Options{
|
||||
BaseDir: baseDir,
|
||||
TLSConfig: o.TLSConfig,
|
||||
}
|
||||
ac, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize TLS config for OAuth2: %w", err)
|
||||
}
|
||||
|
@ -400,220 +404,325 @@ func (ac *Config) NewTLSConfig() *tls.Config {
|
|||
|
||||
// NewConfig creates auth config for the given hcc.
|
||||
func (hcc *HTTPClientConfig) NewConfig(baseDir string) (*Config, error) {
|
||||
return NewConfig(baseDir, hcc.Authorization, hcc.BasicAuth, hcc.BearerToken.String(), hcc.BearerTokenFile, hcc.OAuth2, hcc.TLSConfig, hcc.Headers)
|
||||
opts := &Options{
|
||||
BaseDir: baseDir,
|
||||
Authorization: hcc.Authorization,
|
||||
BasicAuth: hcc.BasicAuth,
|
||||
BearerToken: hcc.BearerToken.String(),
|
||||
BearerTokenFile: hcc.BearerTokenFile,
|
||||
OAuth2: hcc.OAuth2,
|
||||
TLSConfig: hcc.TLSConfig,
|
||||
Headers: hcc.Headers,
|
||||
}
|
||||
return opts.NewConfig()
|
||||
}
|
||||
|
||||
// NewConfig creates auth config for the given pcc.
|
||||
func (pcc *ProxyClientConfig) NewConfig(baseDir string) (*Config, error) {
|
||||
return NewConfig(baseDir, pcc.Authorization, pcc.BasicAuth, pcc.BearerToken.String(), pcc.BearerTokenFile, pcc.OAuth2, pcc.TLSConfig, pcc.Headers)
|
||||
}
|
||||
|
||||
// NewConfig creates auth config for the given o.
|
||||
func (o *OAuth2Config) NewConfig(baseDir string) (*Config, error) {
|
||||
return NewConfig(baseDir, nil, nil, "", "", nil, o.TLSConfig, nil)
|
||||
opts := &Options{
|
||||
BaseDir: baseDir,
|
||||
Authorization: pcc.Authorization,
|
||||
BasicAuth: pcc.BasicAuth,
|
||||
BearerToken: pcc.BearerToken.String(),
|
||||
BearerTokenFile: pcc.BearerTokenFile,
|
||||
OAuth2: pcc.OAuth2,
|
||||
TLSConfig: pcc.TLSConfig,
|
||||
Headers: pcc.Headers,
|
||||
}
|
||||
return opts.NewConfig()
|
||||
}
|
||||
|
||||
// NewConfig creates auth config for the given ba.
|
||||
func (ba *BasicAuthConfig) NewConfig(baseDir string) (*Config, error) {
|
||||
return NewConfig(baseDir, nil, ba, "", "", nil, nil, nil)
|
||||
opts := &Options{
|
||||
BaseDir: baseDir,
|
||||
BasicAuth: ba,
|
||||
}
|
||||
return opts.NewConfig()
|
||||
}
|
||||
|
||||
// NewConfig creates auth config from the given args.
|
||||
//
|
||||
// headers must be in the form 'HeaderName: header value'
|
||||
func NewConfig(baseDir string, az *Authorization, basicAuth *BasicAuthConfig, bearerToken, bearerTokenFile string, o *OAuth2Config, tlsConfig *TLSConfig, headers []string) (*Config, error) {
|
||||
var getAuthHeader func() string
|
||||
authDigest := ""
|
||||
if az != nil {
|
||||
azType := "Bearer"
|
||||
if az.Type != "" {
|
||||
azType = az.Type
|
||||
}
|
||||
if az.CredentialsFile != "" {
|
||||
if az.Credentials != nil {
|
||||
return nil, fmt.Errorf("both `credentials`=%q and `credentials_file`=%q are set", az.Credentials, az.CredentialsFile)
|
||||
}
|
||||
filePath := fs.GetFilepath(baseDir, az.CredentialsFile)
|
||||
getAuthHeader = func() string {
|
||||
token, err := readPasswordFromFile(filePath)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read credentials from `credentials_file`=%q: %s", az.CredentialsFile, err)
|
||||
return ""
|
||||
}
|
||||
return azType + " " + token
|
||||
}
|
||||
authDigest = fmt.Sprintf("custom(type=%q, credsFile=%q)", az.Type, filePath)
|
||||
} else {
|
||||
getAuthHeader = func() string {
|
||||
return azType + " " + az.Credentials.String()
|
||||
}
|
||||
authDigest = fmt.Sprintf("custom(type=%q, creds=%q)", az.Type, az.Credentials)
|
||||
}
|
||||
// Options contain options, which must be passed to NewConfig.
|
||||
type Options struct {
|
||||
// BaseDir is an optional path to a base directory for resolving
|
||||
// relative filepaths in various config options.
|
||||
//
|
||||
// It is set to the current directory by default.
|
||||
BaseDir string
|
||||
|
||||
// Authorization contains optional Authorization.
|
||||
Authorization *Authorization
|
||||
|
||||
// BasicAuth contains optional BasicAuthConfig.
|
||||
BasicAuth *BasicAuthConfig
|
||||
|
||||
// BearerToken contains optional bearer token.
|
||||
BearerToken string
|
||||
|
||||
// BearerTokenFile contains optional path to a file with bearer token.
|
||||
BearerTokenFile string
|
||||
|
||||
// OAuth2 contains optional OAuth2Config.
|
||||
OAuth2 *OAuth2Config
|
||||
|
||||
// TLSconfig contains optional TLSConfig.
|
||||
TLSConfig *TLSConfig
|
||||
|
||||
// Headers contains optional http request headers in the form 'Foo: bar'.
|
||||
Headers []string
|
||||
}
|
||||
|
||||
// NewConfig creates auth config from the given opts.
|
||||
func (opts *Options) NewConfig() (*Config, error) {
|
||||
baseDir := opts.BaseDir
|
||||
if baseDir == "" {
|
||||
baseDir = "."
|
||||
}
|
||||
if basicAuth != nil {
|
||||
if getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot use both `authorization` and `basic_auth`")
|
||||
}
|
||||
if basicAuth.Username == "" {
|
||||
return nil, fmt.Errorf("missing `username` in `basic_auth` section")
|
||||
}
|
||||
if basicAuth.PasswordFile != "" {
|
||||
if basicAuth.Password != nil {
|
||||
return nil, fmt.Errorf("both `password`=%q and `password_file`=%q are set in `basic_auth` section", basicAuth.Password, basicAuth.PasswordFile)
|
||||
}
|
||||
filePath := fs.GetFilepath(baseDir, basicAuth.PasswordFile)
|
||||
getAuthHeader = func() string {
|
||||
password, err := readPasswordFromFile(filePath)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %s", basicAuth.PasswordFile, err)
|
||||
return ""
|
||||
}
|
||||
// See https://en.wikipedia.org/wiki/Basic_access_authentication
|
||||
token := basicAuth.Username + ":" + password
|
||||
token64 := base64.StdEncoding.EncodeToString([]byte(token))
|
||||
return "Basic " + token64
|
||||
}
|
||||
authDigest = fmt.Sprintf("basic(username=%q, passwordFile=%q)", basicAuth.Username, filePath)
|
||||
} else {
|
||||
getAuthHeader = func() string {
|
||||
// See https://en.wikipedia.org/wiki/Basic_access_authentication
|
||||
token := basicAuth.Username + ":" + basicAuth.Password.String()
|
||||
token64 := base64.StdEncoding.EncodeToString([]byte(token))
|
||||
return "Basic " + token64
|
||||
}
|
||||
authDigest = fmt.Sprintf("basic(username=%q, password=%q)", basicAuth.Username, basicAuth.Password)
|
||||
}
|
||||
}
|
||||
if bearerTokenFile != "" {
|
||||
if getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth` and `bearer_token_file`")
|
||||
}
|
||||
if bearerToken != "" {
|
||||
return nil, fmt.Errorf("both `bearer_token`=%q and `bearer_token_file`=%q are set", bearerToken, bearerTokenFile)
|
||||
}
|
||||
filePath := fs.GetFilepath(baseDir, bearerTokenFile)
|
||||
getAuthHeader = func() string {
|
||||
token, err := readPasswordFromFile(filePath)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read bearer token from `bearer_token_file`=%q: %s", bearerTokenFile, err)
|
||||
return ""
|
||||
}
|
||||
return "Bearer " + token
|
||||
}
|
||||
authDigest = fmt.Sprintf("bearer(tokenFile=%q)", filePath)
|
||||
}
|
||||
if bearerToken != "" {
|
||||
if getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth` and `bearer_token`")
|
||||
}
|
||||
getAuthHeader = func() string {
|
||||
return "Bearer " + bearerToken
|
||||
}
|
||||
authDigest = fmt.Sprintf("bearer(token=%q)", bearerToken)
|
||||
}
|
||||
if o != nil {
|
||||
if getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth, `bearer_token` and `ouath2`")
|
||||
}
|
||||
oi, err := newOAuth2ConfigInternal(baseDir, o)
|
||||
if err != nil {
|
||||
var actx authContext
|
||||
if opts.Authorization != nil {
|
||||
if err := actx.initFromAuthorization(baseDir, opts.Authorization); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
getAuthHeader = func() string {
|
||||
ts, err := oi.getTokenSource()
|
||||
if err != nil {
|
||||
logger.Errorf("cannot get OAuth2 tokenSource: %s", err)
|
||||
return ""
|
||||
}
|
||||
t, err := ts.Token()
|
||||
if err != nil {
|
||||
logger.Errorf("cannot get OAuth2 token: %s", err)
|
||||
return ""
|
||||
}
|
||||
return t.Type() + " " + t.AccessToken
|
||||
}
|
||||
authDigest = fmt.Sprintf("oauth2(%s)", o.String())
|
||||
}
|
||||
var tlsRootCA *x509.CertPool
|
||||
var getTLSCert func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
|
||||
tlsCertDigest := ""
|
||||
tlsServerName := ""
|
||||
tlsInsecureSkipVerify := false
|
||||
tlsMinVersion := uint16(0)
|
||||
if tlsConfig != nil {
|
||||
tlsServerName = tlsConfig.ServerName
|
||||
tlsInsecureSkipVerify = tlsConfig.InsecureSkipVerify
|
||||
if len(tlsConfig.Key) != 0 || len(tlsConfig.Cert) != 0 {
|
||||
cert, err := tls.X509KeyPair(tlsConfig.Cert, tlsConfig.Key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load TLS certificate from the provided `cert` and `key` values: %w", err)
|
||||
}
|
||||
getTLSCert = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
return &cert, nil
|
||||
}
|
||||
h := xxhash.Sum64(tlsConfig.Key) ^ xxhash.Sum64(tlsConfig.Cert)
|
||||
tlsCertDigest = fmt.Sprintf("digest(key+cert)=%d", h)
|
||||
} else if tlsConfig.CertFile != "" || tlsConfig.KeyFile != "" {
|
||||
getTLSCert = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
// Re-read TLS certificate from disk. This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1420
|
||||
certPath := fs.GetFilepath(baseDir, tlsConfig.CertFile)
|
||||
keyPath := fs.GetFilepath(baseDir, tlsConfig.KeyFile)
|
||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", tlsConfig.CertFile, tlsConfig.KeyFile, err)
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
// Check whether the configured TLS cert can be loaded.
|
||||
if _, err := getTLSCert(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsCertDigest = fmt.Sprintf("certFile=%q, keyFile=%q", tlsConfig.CertFile, tlsConfig.KeyFile)
|
||||
if opts.BasicAuth != nil {
|
||||
if actx.getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot use both `authorization` and `basic_auth`")
|
||||
}
|
||||
if len(tlsConfig.CA) != 0 {
|
||||
tlsRootCA = x509.NewCertPool()
|
||||
if !tlsRootCA.AppendCertsFromPEM(tlsConfig.CA) {
|
||||
return nil, fmt.Errorf("cannot parse data from `ca` value")
|
||||
}
|
||||
} else if tlsConfig.CAFile != "" {
|
||||
path := fs.GetFilepath(baseDir, tlsConfig.CAFile)
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read `ca_file` %q: %w", tlsConfig.CAFile, err)
|
||||
}
|
||||
tlsRootCA = x509.NewCertPool()
|
||||
if !tlsRootCA.AppendCertsFromPEM(data) {
|
||||
return nil, fmt.Errorf("cannot parse data from `ca_file` %q", tlsConfig.CAFile)
|
||||
}
|
||||
}
|
||||
if tlsConfig.MinVersion != "" {
|
||||
v, err := parseTLSVersion(tlsConfig.MinVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse `min_version`: %w", err)
|
||||
}
|
||||
tlsMinVersion = v
|
||||
if err := actx.initFromBasicAuthConfig(baseDir, opts.BasicAuth); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
parsedHeaders, err := parseHeaders(headers)
|
||||
if opts.BearerTokenFile != "" {
|
||||
if actx.getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth` and `bearer_token_file`")
|
||||
}
|
||||
if opts.BearerToken != "" {
|
||||
return nil, fmt.Errorf("both `bearer_token`=%q and `bearer_token_file`=%q are set", opts.BearerToken, opts.BearerTokenFile)
|
||||
}
|
||||
if err := actx.initFromBearerTokenFile(baseDir, opts.BearerTokenFile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.BearerToken != "" {
|
||||
if actx.getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth` and `bearer_token`")
|
||||
}
|
||||
if err := actx.initFromBearerToken(opts.BearerToken); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if opts.OAuth2 != nil {
|
||||
if actx.getAuthHeader != nil {
|
||||
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth, `bearer_token` and `ouath2`")
|
||||
}
|
||||
if err := actx.initFromOAuth2Config(baseDir, opts.OAuth2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var tctx tlsContext
|
||||
if opts.TLSConfig != nil {
|
||||
if err := tctx.initFromTLSConfig(baseDir, opts.TLSConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
headers, err := parseHeaders(opts.Headers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ac := &Config{
|
||||
TLSRootCA: tlsRootCA,
|
||||
TLSServerName: tlsServerName,
|
||||
TLSInsecureSkipVerify: tlsInsecureSkipVerify,
|
||||
TLSMinVersion: tlsMinVersion,
|
||||
TLSRootCA: tctx.rootCA,
|
||||
TLSServerName: tctx.serverName,
|
||||
TLSInsecureSkipVerify: tctx.insecureSkipVerify,
|
||||
TLSMinVersion: tctx.minVersion,
|
||||
|
||||
getTLSCert: getTLSCert,
|
||||
tlsCertDigest: tlsCertDigest,
|
||||
getTLSCert: tctx.getTLSCert,
|
||||
tlsCertDigest: tctx.tlsCertDigest,
|
||||
|
||||
getAuthHeader: getAuthHeader,
|
||||
headers: parsedHeaders,
|
||||
authDigest: authDigest,
|
||||
getAuthHeader: actx.getAuthHeader,
|
||||
headers: headers,
|
||||
authDigest: actx.authDigest,
|
||||
}
|
||||
return ac, nil
|
||||
}
|
||||
|
||||
type authContext struct {
|
||||
// getAuthHeader must return <value> for 'Authorization: <value>' http request header
|
||||
getAuthHeader func() string
|
||||
|
||||
// authDigest must contain the digest for the used authorization
|
||||
// The digest must be changed whenever the original config changes.
|
||||
authDigest string
|
||||
}
|
||||
|
||||
func (actx *authContext) initFromAuthorization(baseDir string, az *Authorization) error {
|
||||
azType := "Bearer"
|
||||
if az.Type != "" {
|
||||
azType = az.Type
|
||||
}
|
||||
if az.CredentialsFile == "" {
|
||||
actx.getAuthHeader = func() string {
|
||||
return azType + " " + az.Credentials.String()
|
||||
}
|
||||
actx.authDigest = fmt.Sprintf("custom(type=%q, creds=%q)", az.Type, az.Credentials)
|
||||
return nil
|
||||
}
|
||||
if az.Credentials != nil {
|
||||
return fmt.Errorf("both `credentials`=%q and `credentials_file`=%q are set", az.Credentials, az.CredentialsFile)
|
||||
}
|
||||
filePath := fs.GetFilepath(baseDir, az.CredentialsFile)
|
||||
actx.getAuthHeader = func() string {
|
||||
token, err := readPasswordFromFile(filePath)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read credentials from `credentials_file`=%q: %s", az.CredentialsFile, err)
|
||||
return ""
|
||||
}
|
||||
return azType + " " + token
|
||||
}
|
||||
actx.authDigest = fmt.Sprintf("custom(type=%q, credsFile=%q)", az.Type, filePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (actx *authContext) initFromBasicAuthConfig(baseDir string, ba *BasicAuthConfig) error {
|
||||
if ba.Username == "" {
|
||||
return fmt.Errorf("missing `username` in `basic_auth` section")
|
||||
}
|
||||
if ba.PasswordFile == "" {
|
||||
actx.getAuthHeader = func() string {
|
||||
// See https://en.wikipedia.org/wiki/Basic_access_authentication
|
||||
token := ba.Username + ":" + ba.Password.String()
|
||||
token64 := base64.StdEncoding.EncodeToString([]byte(token))
|
||||
return "Basic " + token64
|
||||
}
|
||||
actx.authDigest = fmt.Sprintf("basic(username=%q, password=%q)", ba.Username, ba.Password)
|
||||
return nil
|
||||
}
|
||||
if ba.Password != nil {
|
||||
return fmt.Errorf("both `password`=%q and `password_file`=%q are set in `basic_auth` section", ba.Password, ba.PasswordFile)
|
||||
}
|
||||
filePath := fs.GetFilepath(baseDir, ba.PasswordFile)
|
||||
actx.getAuthHeader = func() string {
|
||||
password, err := readPasswordFromFile(filePath)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section: %s", ba.PasswordFile, err)
|
||||
return ""
|
||||
}
|
||||
// See https://en.wikipedia.org/wiki/Basic_access_authentication
|
||||
token := ba.Username + ":" + password
|
||||
token64 := base64.StdEncoding.EncodeToString([]byte(token))
|
||||
return "Basic " + token64
|
||||
}
|
||||
actx.authDigest = fmt.Sprintf("basic(username=%q, passwordFile=%q)", ba.Username, filePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (actx *authContext) initFromBearerTokenFile(baseDir string, bearerTokenFile string) error {
|
||||
filePath := fs.GetFilepath(baseDir, bearerTokenFile)
|
||||
actx.getAuthHeader = func() string {
|
||||
token, err := readPasswordFromFile(filePath)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read bearer token from `bearer_token_file`=%q: %s", bearerTokenFile, err)
|
||||
return ""
|
||||
}
|
||||
return "Bearer " + token
|
||||
}
|
||||
actx.authDigest = fmt.Sprintf("bearer(tokenFile=%q)", filePath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (actx *authContext) initFromBearerToken(bearerToken string) error {
|
||||
actx.getAuthHeader = func() string {
|
||||
return "Bearer " + bearerToken
|
||||
}
|
||||
actx.authDigest = fmt.Sprintf("bearer(token=%q)", bearerToken)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (actx *authContext) initFromOAuth2Config(baseDir string, o *OAuth2Config) error {
|
||||
oi, err := newOAuth2ConfigInternal(baseDir, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
actx.getAuthHeader = func() string {
|
||||
ts, err := oi.getTokenSource()
|
||||
if err != nil {
|
||||
logger.Errorf("cannot get OAuth2 tokenSource: %s", err)
|
||||
return ""
|
||||
}
|
||||
t, err := ts.Token()
|
||||
if err != nil {
|
||||
logger.Errorf("cannot get OAuth2 token: %s", err)
|
||||
return ""
|
||||
}
|
||||
return t.Type() + " " + t.AccessToken
|
||||
}
|
||||
actx.authDigest = fmt.Sprintf("oauth2(%s)", o.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
type tlsContext struct {
|
||||
getTLSCert func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
|
||||
tlsCertDigest string
|
||||
rootCA *x509.CertPool
|
||||
serverName string
|
||||
insecureSkipVerify bool
|
||||
minVersion uint16
|
||||
}
|
||||
|
||||
func (tctx *tlsContext) initFromTLSConfig(baseDir string, tc *TLSConfig) error {
|
||||
tctx.serverName = tc.ServerName
|
||||
tctx.insecureSkipVerify = tc.InsecureSkipVerify
|
||||
if len(tc.Key) != 0 || len(tc.Cert) != 0 {
|
||||
cert, err := tls.X509KeyPair(tc.Cert, tc.Key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot load TLS certificate from the provided `cert` and `key` values: %w", err)
|
||||
}
|
||||
tctx.getTLSCert = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
return &cert, nil
|
||||
}
|
||||
h := xxhash.Sum64(tc.Key) ^ xxhash.Sum64(tc.Cert)
|
||||
tctx.tlsCertDigest = fmt.Sprintf("digest(key+cert)=%d", h)
|
||||
} else if tc.CertFile != "" || tc.KeyFile != "" {
|
||||
tctx.getTLSCert = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
// Re-read TLS certificate from disk. This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1420
|
||||
certPath := fs.GetFilepath(baseDir, tc.CertFile)
|
||||
keyPath := fs.GetFilepath(baseDir, tc.KeyFile)
|
||||
cert, err := tls.LoadX509KeyPair(certPath, keyPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot load TLS certificate from `cert_file`=%q, `key_file`=%q: %w", tc.CertFile, tc.KeyFile, err)
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
||||
// Check whether the configured TLS cert can be loaded.
|
||||
if _, err := tctx.getTLSCert(nil); err != nil {
|
||||
return err
|
||||
}
|
||||
tctx.tlsCertDigest = fmt.Sprintf("certFile=%q, keyFile=%q", tc.CertFile, tc.KeyFile)
|
||||
}
|
||||
if len(tc.CA) != 0 {
|
||||
tctx.rootCA = x509.NewCertPool()
|
||||
if !tctx.rootCA.AppendCertsFromPEM(tc.CA) {
|
||||
return fmt.Errorf("cannot parse data from `ca` value")
|
||||
}
|
||||
} else if tc.CAFile != "" {
|
||||
path := fs.GetFilepath(baseDir, tc.CAFile)
|
||||
data, err := fs.ReadFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read `ca_file` %q: %w", tc.CAFile, err)
|
||||
}
|
||||
tctx.rootCA = x509.NewCertPool()
|
||||
if !tctx.rootCA.AppendCertsFromPEM(data) {
|
||||
return fmt.Errorf("cannot parse data from `ca_file` %q", tc.CAFile)
|
||||
}
|
||||
}
|
||||
if tc.MinVersion != "" {
|
||||
v, err := parseTLSVersion(tc.MinVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse `min_version`: %w", err)
|
||||
}
|
||||
tctx.minVersion = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseTLSVersion(s string) (uint16, error) {
|
||||
switch strings.ToUpper(s) {
|
||||
case "TLS13":
|
||||
|
|
|
@ -9,25 +9,16 @@ import (
|
|||
)
|
||||
|
||||
func TestNewConfig(t *testing.T) {
|
||||
type args struct {
|
||||
baseDir string
|
||||
az *Authorization
|
||||
basicAuth *BasicAuthConfig
|
||||
bearerToken string
|
||||
bearerTokenFile string
|
||||
oauth *OAuth2Config
|
||||
tlsConfig *TLSConfig
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
opts Options
|
||||
wantErr bool
|
||||
expectHeader string
|
||||
}{
|
||||
{
|
||||
name: "OAuth2 config",
|
||||
args: args{
|
||||
oauth: &OAuth2Config{
|
||||
opts: Options{
|
||||
OAuth2: &OAuth2Config{
|
||||
ClientID: "some-id",
|
||||
ClientSecret: NewSecret("some-secret"),
|
||||
TokenURL: "http://localhost:8511",
|
||||
|
@ -37,8 +28,8 @@ func TestNewConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "OAuth2 config with file",
|
||||
args: args{
|
||||
oauth: &OAuth2Config{
|
||||
opts: Options{
|
||||
OAuth2: &OAuth2Config{
|
||||
ClientID: "some-id",
|
||||
ClientSecretFile: "testdata/test_secretfile.txt",
|
||||
TokenURL: "http://localhost:8511",
|
||||
|
@ -48,8 +39,8 @@ func TestNewConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "OAuth2 want err",
|
||||
args: args{
|
||||
oauth: &OAuth2Config{
|
||||
opts: Options{
|
||||
OAuth2: &OAuth2Config{
|
||||
ClientID: "some-id",
|
||||
ClientSecret: NewSecret("some-secret"),
|
||||
ClientSecretFile: "testdata/test_secretfile.txt",
|
||||
|
@ -60,8 +51,8 @@ func TestNewConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "basic Auth config",
|
||||
args: args{
|
||||
basicAuth: &BasicAuthConfig{
|
||||
opts: Options{
|
||||
BasicAuth: &BasicAuthConfig{
|
||||
Username: "user",
|
||||
Password: NewSecret("password"),
|
||||
},
|
||||
|
@ -70,8 +61,8 @@ func TestNewConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "basic Auth config with file",
|
||||
args: args{
|
||||
basicAuth: &BasicAuthConfig{
|
||||
opts: Options{
|
||||
BasicAuth: &BasicAuthConfig{
|
||||
Username: "user",
|
||||
PasswordFile: "testdata/test_secretfile.txt",
|
||||
},
|
||||
|
@ -80,8 +71,8 @@ func TestNewConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "want Authorization",
|
||||
args: args{
|
||||
az: &Authorization{
|
||||
opts: Options{
|
||||
Authorization: &Authorization{
|
||||
Type: "Bearer",
|
||||
Credentials: NewSecret("Value"),
|
||||
},
|
||||
|
@ -90,16 +81,16 @@ func TestNewConfig(t *testing.T) {
|
|||
},
|
||||
{
|
||||
name: "token file",
|
||||
args: args{
|
||||
bearerTokenFile: "testdata/test_secretfile.txt",
|
||||
opts: Options{
|
||||
BearerTokenFile: "testdata/test_secretfile.txt",
|
||||
},
|
||||
expectHeader: "Bearer secret-content",
|
||||
},
|
||||
{
|
||||
name: "token with tls",
|
||||
args: args{
|
||||
bearerToken: "some-token",
|
||||
tlsConfig: &TLSConfig{
|
||||
opts: Options{
|
||||
BearerToken: "some-token",
|
||||
TLSConfig: &TLSConfig{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
|
@ -108,7 +99,7 @@ func TestNewConfig(t *testing.T) {
|
|||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.args.oauth != nil {
|
||||
if tt.opts.OAuth2 != nil {
|
||||
r := http.NewServeMux()
|
||||
r.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
@ -116,9 +107,9 @@ func TestNewConfig(t *testing.T) {
|
|||
|
||||
})
|
||||
mock := httptest.NewServer(r)
|
||||
tt.args.oauth.TokenURL = mock.URL
|
||||
tt.opts.OAuth2.TokenURL = mock.URL
|
||||
}
|
||||
got, err := NewConfig(tt.args.baseDir, tt.args.az, tt.args.basicAuth, tt.args.bearerToken, tt.args.bearerTokenFile, tt.args.oauth, tt.args.tlsConfig, nil)
|
||||
got, err := tt.opts.NewConfig()
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("NewConfig() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
|
@ -140,7 +131,6 @@ func TestNewConfig(t *testing.T) {
|
|||
t.Fatalf("unexpected auth header from fasthttp request; got %q; want %q", ahb, tt.expectHeader)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -186,7 +176,10 @@ func TestConfigHeaders(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("cannot parse headers: %s", err)
|
||||
}
|
||||
c, err := NewConfig("", nil, nil, "", "", nil, nil, headers)
|
||||
opts := Options{
|
||||
Headers: headers,
|
||||
}
|
||||
c, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create config: %s", err)
|
||||
}
|
||||
|
|
|
@ -1656,11 +1656,17 @@ scrape_configs:
|
|||
},
|
||||
})
|
||||
|
||||
ac, err := promauth.NewConfig(".", nil, nil, "", "", nil, nil, []string{"My-Auth: foo-Bar"})
|
||||
opts := &promauth.Options{
|
||||
Headers: []string{"My-Auth: foo-Bar"},
|
||||
}
|
||||
ac, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when creating promauth.Config: %s", err)
|
||||
}
|
||||
proxyAC, err := promauth.NewConfig(".", nil, nil, "", "", nil, nil, []string{"Foo:bar"})
|
||||
opts = &promauth.Options{
|
||||
Headers: []string{"Foo:bar"},
|
||||
}
|
||||
proxyAC, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when creating promauth.Config for proxy: %s", err)
|
||||
}
|
||||
|
|
|
@ -31,7 +31,16 @@ func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFu
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot build kube config from the specified `kubeconfig_file` config option: %w", err)
|
||||
}
|
||||
acNew, err := promauth.NewConfig(".", nil, kc.basicAuth, kc.token, kc.tokenFile, cc.OAuth2, kc.tlsConfig, cc.Headers)
|
||||
opts := &promauth.Options{
|
||||
BaseDir: baseDir,
|
||||
BasicAuth: kc.basicAuth,
|
||||
BearerToken: kc.token,
|
||||
BearerTokenFile: kc.tokenFile,
|
||||
OAuth2: cc.OAuth2,
|
||||
TLSConfig: kc.tlsConfig,
|
||||
Headers: cc.Headers,
|
||||
}
|
||||
acNew, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize auth config from `kubeconfig_file: %q`: %w", sdc.KubeConfigFile, err)
|
||||
}
|
||||
|
@ -58,7 +67,14 @@ func newAPIConfig(sdc *SDConfig, baseDir string, swcFunc ScrapeWorkConstructorFu
|
|||
tlsConfig := promauth.TLSConfig{
|
||||
CAFile: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
||||
}
|
||||
acNew, err := promauth.NewConfig(".", nil, nil, "", "/var/run/secrets/kubernetes.io/serviceaccount/token", cc.OAuth2, &tlsConfig, cc.Headers)
|
||||
opts := &promauth.Options{
|
||||
BaseDir: baseDir,
|
||||
BearerTokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token",
|
||||
OAuth2: cc.OAuth2,
|
||||
TLSConfig: &tlsConfig,
|
||||
Headers: cc.Headers,
|
||||
}
|
||||
acNew, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot initialize service account auth: %w; probably, `kubernetes_sd_config->api_server` is missing in Prometheus configs?", err)
|
||||
}
|
||||
|
|
|
@ -81,7 +81,11 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) {
|
|||
port: sdc.Port,
|
||||
}
|
||||
if sdc.TLSConfig != nil {
|
||||
ac, err := promauth.NewConfig(baseDir, nil, nil, "", "", nil, sdc.TLSConfig, nil)
|
||||
opts := &promauth.Options{
|
||||
BaseDir: baseDir,
|
||||
TLSConfig: sdc.TLSConfig,
|
||||
}
|
||||
ac, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -482,6 +482,11 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
|||
sw.addAutoTimeseries(wc, "scrape_samples_post_metric_relabeling", float64(samplesPostRelabeling), scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_series_added", float64(seriesAdded), scrapeTimestamp)
|
||||
sw.addAutoTimeseries(wc, "scrape_timeout_seconds", sw.Config.ScrapeTimeout.Seconds(), scrapeTimestamp)
|
||||
if sw.Config.SampleLimit > 0 {
|
||||
// Expose scrape_samples_limit metric if sample_limt config is set for the target.
|
||||
// See https://github.com/VictoriaMetrics/operator/issues/497
|
||||
sw.addAutoTimeseries(wc, "scrape_samples_limit", float64(sw.Config.SampleLimit), scrapeTimestamp)
|
||||
}
|
||||
sw.pushData(&wc.writeRequest)
|
||||
sw.prevLabelsLen = len(wc.labels)
|
||||
sw.prevBodyLen = len(bodyString)
|
||||
|
|
|
@ -1038,7 +1038,7 @@ func (is *indexSearch) searchLabelValuesWithFiltersOnDate(qt *querytracer.Tracer
|
|||
// This allows implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find or similar APIs.
|
||||
//
|
||||
// If it returns maxTagValueSuffixes suffixes, then it is likely more than maxTagValueSuffixes suffixes is found.
|
||||
func (db *indexDB) SearchTagValueSuffixes(qt *querytracer.Tracer, tr TimeRange, tagKey, tagValuePrefix []byte, delimiter byte, maxTagValueSuffixes int, deadline uint64) ([]string, error) {
|
||||
func (db *indexDB) SearchTagValueSuffixes(qt *querytracer.Tracer, tr TimeRange, tagKey, tagValuePrefix string, delimiter byte, maxTagValueSuffixes int, deadline uint64) ([]string, error) {
|
||||
qt = qt.NewChild("search tag value suffixes for timeRange=%s, tagKey=%q, tagValuePrefix=%q, delimiter=%c, maxTagValueSuffixes=%d",
|
||||
&tr, tagKey, tagValuePrefix, delimiter, maxTagValueSuffixes)
|
||||
defer qt.Done()
|
||||
|
@ -1078,7 +1078,7 @@ func (db *indexDB) SearchTagValueSuffixes(qt *querytracer.Tracer, tr TimeRange,
|
|||
return suffixes, nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) searchTagValueSuffixesForTimeRange(tvss map[string]struct{}, tr TimeRange, tagKey, tagValuePrefix []byte, delimiter byte, maxTagValueSuffixes int) error {
|
||||
func (is *indexSearch) searchTagValueSuffixesForTimeRange(tvss map[string]struct{}, tr TimeRange, tagKey, tagValuePrefix string, delimiter byte, maxTagValueSuffixes int) error {
|
||||
minDate := uint64(tr.MinTimestamp) / msecPerDay
|
||||
maxDate := uint64(tr.MaxTimestamp-1) / msecPerDay
|
||||
if minDate > maxDate || maxDate-minDate > maxDaysForPerDaySearch {
|
||||
|
@ -1119,24 +1119,24 @@ func (is *indexSearch) searchTagValueSuffixesForTimeRange(tvss map[string]struct
|
|||
return errGlobal
|
||||
}
|
||||
|
||||
func (is *indexSearch) searchTagValueSuffixesAll(tvss map[string]struct{}, tagKey, tagValuePrefix []byte, delimiter byte, maxTagValueSuffixes int) error {
|
||||
func (is *indexSearch) searchTagValueSuffixesAll(tvss map[string]struct{}, tagKey, tagValuePrefix string, delimiter byte, maxTagValueSuffixes int) error {
|
||||
kb := &is.kb
|
||||
nsPrefix := byte(nsPrefixTagToMetricIDs)
|
||||
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefix)
|
||||
kb.B = marshalTagValue(kb.B, tagKey)
|
||||
kb.B = marshalTagValue(kb.B, tagValuePrefix)
|
||||
kb.B = marshalTagValue(kb.B, bytesutil.ToUnsafeBytes(tagKey))
|
||||
kb.B = marshalTagValue(kb.B, bytesutil.ToUnsafeBytes(tagValuePrefix))
|
||||
kb.B = kb.B[:len(kb.B)-1] // remove tagSeparatorChar from the end of kb.B
|
||||
prefix := append([]byte(nil), kb.B...)
|
||||
return is.searchTagValueSuffixesForPrefix(tvss, nsPrefix, prefix, len(tagValuePrefix), delimiter, maxTagValueSuffixes)
|
||||
}
|
||||
|
||||
func (is *indexSearch) searchTagValueSuffixesForDate(tvss map[string]struct{}, date uint64, tagKey, tagValuePrefix []byte, delimiter byte, maxTagValueSuffixes int) error {
|
||||
func (is *indexSearch) searchTagValueSuffixesForDate(tvss map[string]struct{}, date uint64, tagKey, tagValuePrefix string, delimiter byte, maxTagValueSuffixes int) error {
|
||||
nsPrefix := byte(nsPrefixDateTagToMetricIDs)
|
||||
kb := &is.kb
|
||||
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefix)
|
||||
kb.B = encoding.MarshalUint64(kb.B, date)
|
||||
kb.B = marshalTagValue(kb.B, tagKey)
|
||||
kb.B = marshalTagValue(kb.B, tagValuePrefix)
|
||||
kb.B = marshalTagValue(kb.B, bytesutil.ToUnsafeBytes(tagKey))
|
||||
kb.B = marshalTagValue(kb.B, bytesutil.ToUnsafeBytes(tagValuePrefix))
|
||||
kb.B = kb.B[:len(kb.B)-1] // remove tagSeparatorChar from the end of kb.B
|
||||
prefix := append([]byte(nil), kb.B...)
|
||||
return is.searchTagValueSuffixesForPrefix(tvss, nsPrefix, prefix, len(tagValuePrefix), delimiter, maxTagValueSuffixes)
|
||||
|
@ -2724,6 +2724,7 @@ func (is *indexSearch) createPerDayIndexes(date, metricID uint64, mn *MetricName
|
|||
if err := is.db.tb.AddItems(ii.Items); err != nil {
|
||||
return fmt.Errorf("cannot add per-day entires for metricID %d: %w", metricID, err)
|
||||
}
|
||||
is.db.s.dateMetricIDCache.Set(date, metricID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -2067,9 +2067,10 @@ func newTestStorage() *Storage {
|
|||
s := &Storage{
|
||||
cachePath: "test-storage-cache",
|
||||
|
||||
metricIDCache: workingsetcache.New(1234),
|
||||
metricNameCache: workingsetcache.New(1234),
|
||||
tsidCache: workingsetcache.New(1234),
|
||||
metricIDCache: workingsetcache.New(1234),
|
||||
metricNameCache: workingsetcache.New(1234),
|
||||
tsidCache: workingsetcache.New(1234),
|
||||
dateMetricIDCache: newDateMetricIDCache(),
|
||||
}
|
||||
s.setDeletedMetricIDs(&uint64set.Set{})
|
||||
return s
|
||||
|
|
|
@ -236,6 +236,14 @@ type SearchQuery struct {
|
|||
MaxMetrics int
|
||||
}
|
||||
|
||||
// GetTimeRange returns time range for the given sq.
|
||||
func (sq *SearchQuery) GetTimeRange() TimeRange {
|
||||
return TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
}
|
||||
|
||||
// NewSearchQuery creates new search query for the given args.
|
||||
func NewSearchQuery(start, end int64, tagFilterss [][]TagFilter, maxMetrics int) *SearchQuery {
|
||||
if maxMetrics <= 0 {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"unsafe"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bloomfilter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
|
@ -1273,10 +1274,10 @@ func (s *Storage) prefetchMetricNames(qt *querytracer.Tracer, tsids []TSID, dead
|
|||
// ErrDeadlineExceeded is returned when the request times out.
|
||||
var ErrDeadlineExceeded = fmt.Errorf("deadline exceeded")
|
||||
|
||||
// DeleteMetrics deletes all the metrics matching the given tfss.
|
||||
// DeleteSeries deletes all the series matching the given tfss.
|
||||
//
|
||||
// Returns the number of metrics deleted.
|
||||
func (s *Storage) DeleteMetrics(qt *querytracer.Tracer, tfss []*TagFilters) (int, error) {
|
||||
func (s *Storage) DeleteSeries(qt *querytracer.Tracer, tfss []*TagFilters) (int, error) {
|
||||
deletedCount, err := s.idb().DeleteTSIDs(qt, tfss)
|
||||
if err != nil {
|
||||
return deletedCount, fmt.Errorf("cannot delete tsids: %w", err)
|
||||
|
@ -1306,7 +1307,7 @@ func (s *Storage) SearchLabelValuesWithFiltersOnTimeRange(qt *querytracer.Tracer
|
|||
// This allows implementing https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find or similar APIs.
|
||||
//
|
||||
// If more than maxTagValueSuffixes suffixes is found, then only the first maxTagValueSuffixes suffixes is returned.
|
||||
func (s *Storage) SearchTagValueSuffixes(qt *querytracer.Tracer, tr TimeRange, tagKey, tagValuePrefix []byte,
|
||||
func (s *Storage) SearchTagValueSuffixes(qt *querytracer.Tracer, tr TimeRange, tagKey, tagValuePrefix string,
|
||||
delimiter byte, maxTagValueSuffixes int, deadline uint64) ([]string, error) {
|
||||
return s.idb().SearchTagValueSuffixes(qt, tr, tagKey, tagValuePrefix, delimiter, maxTagValueSuffixes, deadline)
|
||||
}
|
||||
|
@ -1364,7 +1365,7 @@ func (s *Storage) searchGraphitePaths(qt *querytracer.Tracer, tr TimeRange, qHea
|
|||
if n < 0 {
|
||||
// Verify that qHead matches a metric name.
|
||||
qHead = append(qHead, qTail...)
|
||||
suffixes, err := s.SearchTagValueSuffixes(qt, tr, nil, qHead, '.', 1, deadline)
|
||||
suffixes, err := s.SearchTagValueSuffixes(qt, tr, "", bytesutil.ToUnsafeString(qHead), '.', 1, deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1379,7 +1380,7 @@ func (s *Storage) searchGraphitePaths(qt *querytracer.Tracer, tr TimeRange, qHea
|
|||
return []string{string(qHead)}, nil
|
||||
}
|
||||
qHead = append(qHead, qTail[:n]...)
|
||||
suffixes, err := s.SearchTagValueSuffixes(qt, tr, nil, qHead, '.', maxPaths, deadline)
|
||||
suffixes, err := s.SearchTagValueSuffixes(qt, tr, "", bytesutil.ToUnsafeString(qHead), '.', maxPaths, deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1708,7 +1709,6 @@ func (s *Storage) RegisterMetricNames(qt *querytracer.Tracer, mrs []MetricRow) e
|
|||
}
|
||||
genTSID.generation = idb.generation
|
||||
s.putTSIDToCache(&genTSID, mr.MetricNameRaw)
|
||||
s.dateMetricIDCache.Set(date, genTSID.TSID.MetricID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1781,7 +1781,7 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
|
|||
// Fast path - the TSID for the given MetricNameRaw has been found in cache and isn't deleted.
|
||||
// There is no need in checking whether r.TSID.MetricID is deleted, since tsidCache doesn't
|
||||
// contain MetricName->TSID entries for deleted time series.
|
||||
// See Storage.DeleteMetrics code for details.
|
||||
// See Storage.DeleteSeries code for details.
|
||||
prevTSID = r.TSID
|
||||
prevMetricNameRaw = mr.MetricNameRaw
|
||||
|
||||
|
@ -1797,7 +1797,6 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
|
|||
if created {
|
||||
genTSID.generation = idb.generation
|
||||
s.putTSIDToCache(&genTSID, mr.MetricNameRaw)
|
||||
s.dateMetricIDCache.Set(date, genTSID.TSID.MetricID)
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
@ -1860,7 +1859,6 @@ func (s *Storage) add(rows []rawRow, dstMrs []*MetricRow, mrs []MetricRow, preci
|
|||
genTSID.generation = idb.generation
|
||||
genTSID.TSID = r.TSID
|
||||
s.putTSIDToCache(&genTSID, mr.MetricNameRaw)
|
||||
s.dateMetricIDCache.Set(date, genTSID.TSID.MetricID)
|
||||
|
||||
prevTSID = r.TSID
|
||||
prevMetricNameRaw = mr.MetricNameRaw
|
||||
|
|
|
@ -495,8 +495,8 @@ func testStorageRandTimestamps(s *Storage) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestStorageDeleteMetrics(t *testing.T) {
|
||||
path := "TestStorageDeleteMetrics"
|
||||
func TestStorageDeleteSeries(t *testing.T) {
|
||||
path := "TestStorageDeleteSeries"
|
||||
s, err := OpenStorage(path, 0, 0, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot open storage: %s", err)
|
||||
|
@ -513,7 +513,7 @@ func TestStorageDeleteMetrics(t *testing.T) {
|
|||
|
||||
t.Run("serial", func(t *testing.T) {
|
||||
for i := 0; i < 3; i++ {
|
||||
if err = testStorageDeleteMetrics(s, 0); err != nil {
|
||||
if err = testStorageDeleteSeries(s, 0); err != nil {
|
||||
t.Fatalf("unexpected error on iteration %d: %s", i, err)
|
||||
}
|
||||
|
||||
|
@ -533,7 +533,7 @@ func TestStorageDeleteMetrics(t *testing.T) {
|
|||
go func(workerNum int) {
|
||||
var err error
|
||||
for j := 0; j < 2; j++ {
|
||||
err = testStorageDeleteMetrics(s, workerNum)
|
||||
err = testStorageDeleteSeries(s, workerNum)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
@ -568,7 +568,7 @@ func TestStorageDeleteMetrics(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func testStorageDeleteMetrics(s *Storage, workerNum int) error {
|
||||
func testStorageDeleteSeries(s *Storage, workerNum int) error {
|
||||
const rowsPerMetric = 100
|
||||
const metricsCount = 30
|
||||
|
||||
|
@ -654,7 +654,7 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error {
|
|||
if n := metricBlocksCount(tfs); n == 0 {
|
||||
return fmt.Errorf("expecting non-zero number of metric blocks for tfs=%s", tfs)
|
||||
}
|
||||
deletedCount, err := s.DeleteMetrics(nil, []*TagFilters{tfs})
|
||||
deletedCount, err := s.DeleteSeries(nil, []*TagFilters{tfs})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete metrics: %w", err)
|
||||
}
|
||||
|
@ -662,11 +662,11 @@ func testStorageDeleteMetrics(s *Storage, workerNum int) error {
|
|||
return fmt.Errorf("expecting non-zero number of deleted metrics on iteration %d", i)
|
||||
}
|
||||
if n := metricBlocksCount(tfs); n != 0 {
|
||||
return fmt.Errorf("expecting zero metric blocks after DeleteMetrics call for tfs=%s; got %d blocks", tfs, n)
|
||||
return fmt.Errorf("expecting zero metric blocks after DeleteSeries call for tfs=%s; got %d blocks", tfs, n)
|
||||
}
|
||||
|
||||
// Try deleting empty tfss
|
||||
deletedCount, err = s.DeleteMetrics(nil, nil)
|
||||
deletedCount, err = s.DeleteSeries(nil, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete empty tfss: %w", err)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue