mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-10 15:14:09 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
ac58ab9664
69 changed files with 1118 additions and 15197 deletions
|
@ -596,7 +596,7 @@ Additionally, the `action: graphite` relabeling rules usually work much faster t
|
||||||
`vmagent` and [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
|
`vmagent` and [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
|
||||||
provide the following tools for debugging target-level and metric-level relabeling:
|
provide the following tools for debugging target-level and metric-level relabeling:
|
||||||
|
|
||||||
- Target-level relabeling (e.g. `relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs))
|
- Target-level debugging (e.g. `relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs))
|
||||||
can be performed by navigating to `http://vmagent:8429/targets` page (`http://victoriametrics:8428/targets` page for single-node VictoriaMetrics)
|
can be performed by navigating to `http://vmagent:8429/targets` page (`http://victoriametrics:8428/targets` page for single-node VictoriaMetrics)
|
||||||
and clicking the `debug` link at the target, which must be debugged.
|
and clicking the `debug` link at the target, which must be debugged.
|
||||||
The opened page will show step-by-step results for the actual relabeling rules applied to the target labels.
|
The opened page will show step-by-step results for the actual relabeling rules applied to the target labels.
|
||||||
|
@ -607,7 +607,7 @@ provide the following tools for debugging target-level and metric-level relabeli
|
||||||
and click the `debug` link there. The opened page will show step-by-step results for the actual relabeling rules,
|
and click the `debug` link there. The opened page will show step-by-step results for the actual relabeling rules,
|
||||||
which result to target drop.
|
which result to target drop.
|
||||||
|
|
||||||
- Metric-level relabeling (e.g. `metric_relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs)
|
- Metric-level debugging (e.g. `metric_relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs)
|
||||||
and all the relabeling, which can be set up via `-relabelConfig`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`
|
and all the relabeling, which can be set up via `-relabelConfig`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`
|
||||||
command-line flags) can be performed by navigating to `http://vmagent:8429/metric-relabel-debug` page
|
command-line flags) can be performed by navigating to `http://vmagent:8429/metric-relabel-debug` page
|
||||||
(`http://victoriametrics:8428/metric-relabel-debug` page for single-node VictoriaMetrics)
|
(`http://victoriametrics:8428/metric-relabel-debug` page for single-node VictoriaMetrics)
|
||||||
|
|
|
@ -214,6 +214,7 @@ The following variables are available in templating:
|
||||||
| $alertID or .AlertID | The current alert's ID generated by vmalert. | {% raw %}Link: vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}{% endraw %} |
|
| $alertID or .AlertID | The current alert's ID generated by vmalert. | {% raw %}Link: vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}{% endraw %} |
|
||||||
| $groupID or .GroupID | The current alert's group ID generated by vmalert. | {% raw %}Link: vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}{% endraw %} |
|
| $groupID or .GroupID | The current alert's group ID generated by vmalert. | {% raw %}Link: vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}{% endraw %} |
|
||||||
| $expr or .Expr | Alert's expression. Can be used for generating links to Grafana or other systems. | {% raw %}/api/v1/query?query={{ $expr|queryEscape }}{% endraw %} |
|
| $expr or .Expr | Alert's expression. Can be used for generating links to Grafana or other systems. | {% raw %}/api/v1/query?query={{ $expr|queryEscape }}{% endraw %} |
|
||||||
|
| $for or .For | Alert's configured for param. | {% raw %}Number of connections is too high for more than {{ .For }}{% endraw %} |
|
||||||
| $externalLabels or .ExternalLabels | List of labels configured via `-external.label` command-line flag. | {% raw %}Issues with {{ $labels.instance }} (datacenter-{{ $externalLabels.dc }}){% endraw %} |
|
| $externalLabels or .ExternalLabels | List of labels configured via `-external.label` command-line flag. | {% raw %}Issues with {{ $labels.instance }} (datacenter-{{ $externalLabels.dc }}){% endraw %} |
|
||||||
| $externalURL or .ExternalURL | URL configured via `-external.url` command-line flag. Used for cases when vmalert is hidden behind proxy. | {% raw %}Visit {{ $externalURL }} for more details{% endraw %} |
|
| $externalURL or .ExternalURL | URL configured via `-external.url` command-line flag. Used for cases when vmalert is hidden behind proxy. | {% raw %}Visit {{ $externalURL }} for more details{% endraw %} |
|
||||||
|
|
||||||
|
|
|
@ -456,6 +456,7 @@ func (ar *AlertingRule) newAlert(m datasource.Metric, ls *labelSet, start time.T
|
||||||
Value: m.Values[0],
|
Value: m.Values[0],
|
||||||
ActiveAt: start,
|
ActiveAt: start,
|
||||||
Expr: ar.Expr,
|
Expr: ar.Expr,
|
||||||
|
For: ar.For,
|
||||||
}
|
}
|
||||||
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
a.Annotations, err = a.ExecTemplate(qFn, ls.origin, ar.Annotations)
|
||||||
return a, err
|
return a, err
|
||||||
|
|
|
@ -45,6 +45,8 @@ type Alert struct {
|
||||||
ID uint64
|
ID uint64
|
||||||
// Restored is true if Alert was restored after restart
|
// Restored is true if Alert was restored after restart
|
||||||
Restored bool
|
Restored bool
|
||||||
|
// For defines for how long Alert needs to be active to become StateFiring
|
||||||
|
For time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// AlertState type indicates the Alert state
|
// AlertState type indicates the Alert state
|
||||||
|
@ -80,6 +82,7 @@ type AlertTplData struct {
|
||||||
AlertID uint64
|
AlertID uint64
|
||||||
GroupID uint64
|
GroupID uint64
|
||||||
ActiveAt time.Time
|
ActiveAt time.Time
|
||||||
|
For time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
var tplHeaders = []string{
|
var tplHeaders = []string{
|
||||||
|
@ -91,6 +94,7 @@ var tplHeaders = []string{
|
||||||
"{{ $alertID := .AlertID }}",
|
"{{ $alertID := .AlertID }}",
|
||||||
"{{ $groupID := .GroupID }}",
|
"{{ $groupID := .GroupID }}",
|
||||||
"{{ $activeAt := .ActiveAt }}",
|
"{{ $activeAt := .ActiveAt }}",
|
||||||
|
"{{ $for := .For }}",
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExecTemplate executes the Alert template for given
|
// ExecTemplate executes the Alert template for given
|
||||||
|
@ -98,7 +102,15 @@ var tplHeaders = []string{
|
||||||
// Every alert could have a different datasource, so function
|
// Every alert could have a different datasource, so function
|
||||||
// requires a queryFunction as an argument.
|
// requires a queryFunction as an argument.
|
||||||
func (a *Alert) ExecTemplate(q templates.QueryFn, labels, annotations map[string]string) (map[string]string, error) {
|
func (a *Alert) ExecTemplate(q templates.QueryFn, labels, annotations map[string]string) (map[string]string, error) {
|
||||||
tplData := AlertTplData{Value: a.Value, Labels: labels, Expr: a.Expr, AlertID: a.ID, GroupID: a.GroupID, ActiveAt: a.ActiveAt}
|
tplData := AlertTplData{
|
||||||
|
Value: a.Value,
|
||||||
|
Labels: labels,
|
||||||
|
Expr: a.Expr,
|
||||||
|
AlertID: a.ID,
|
||||||
|
GroupID: a.GroupID,
|
||||||
|
ActiveAt: a.ActiveAt,
|
||||||
|
For: a.For,
|
||||||
|
}
|
||||||
tmpl, err := templates.GetWithFuncs(templates.FuncsWithQuery(q))
|
tmpl, err := templates.GetWithFuncs(templates.FuncsWithQuery(q))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error getting a template: %w", err)
|
return nil, fmt.Errorf("error getting a template: %w", err)
|
||||||
|
|
|
@ -54,14 +54,15 @@ func TestAlert_ExecTemplate(t *testing.T) {
|
||||||
"job": "staging",
|
"job": "staging",
|
||||||
"instance": "localhost",
|
"instance": "localhost",
|
||||||
},
|
},
|
||||||
|
For: 5 * time.Minute,
|
||||||
},
|
},
|
||||||
annotations: map[string]string{
|
annotations: map[string]string{
|
||||||
"summary": "Too high connection number for {{$labels.instance}} for job {{$labels.job}}",
|
"summary": "Too high connection number for {{$labels.instance}} for job {{$labels.job}}",
|
||||||
"description": "It is {{ $value }} connections for {{$labels.instance}}",
|
"description": "It is {{ $value }} connections for {{$labels.instance}} for more than {{ .For }}",
|
||||||
},
|
},
|
||||||
expTpl: map[string]string{
|
expTpl: map[string]string{
|
||||||
"summary": "Too high connection number for localhost for job staging",
|
"summary": "Too high connection number for localhost for job staging",
|
||||||
"description": "It is 10000 connections for localhost",
|
"description": "It is 10000 connections for localhost for more than 5m0s",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -152,7 +153,7 @@ func TestAlert_ExecTemplate(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ActiveAt custome format",
|
name: "ActiveAt custom format",
|
||||||
alert: &Alert{
|
alert: &Alert{
|
||||||
ActiveAt: time.Date(2022, 8, 19, 20, 34, 58, 651387237, time.UTC),
|
ActiveAt: time.Date(2022, 8, 19, 20, 34, 58, 651387237, time.UTC),
|
||||||
},
|
},
|
||||||
|
|
|
@ -361,7 +361,7 @@ func templateFuncs() textTpl.FuncMap {
|
||||||
return fmt.Sprintf("%.4g%s", v, prefix), nil
|
return fmt.Sprintf("%.4g%s", v, prefix), nil
|
||||||
},
|
},
|
||||||
|
|
||||||
// humanizeDuration converts given seconds to a human readable duration
|
// humanizeDuration converts given seconds to a human-readable duration
|
||||||
"humanizeDuration": func(i interface{}) (string, error) {
|
"humanizeDuration": func(i interface{}) (string, error) {
|
||||||
v, err := toFloat64(i)
|
v, err := toFloat64(i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -21,7 +21,7 @@ creation of hourly, daily, weekly and monthly backups.
|
||||||
|
|
||||||
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
||||||
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
||||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<bucket>/<path/to/backup>`
|
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<container>/<path/to/backup>`
|
||||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||||
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
||||||
|
|
||||||
|
@ -187,7 +187,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
||||||
-customS3Endpoint string
|
-customS3Endpoint string
|
||||||
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set
|
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set
|
||||||
-dst string
|
-dst string
|
||||||
Where to put the backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup/dir
|
Where to put the backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://container/path/to/backup or fs:///path/to/local/backup/dir
|
||||||
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded
|
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded
|
||||||
-enableTCP6
|
-enableTCP6
|
||||||
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
||||||
|
|
|
@ -30,7 +30,7 @@ var (
|
||||||
snapshotDeleteURL = flag.String("snapshot.deleteURL", "", "VictoriaMetrics delete snapshot url. Optional. Will be generated from -snapshot.createURL if not provided. "+
|
snapshotDeleteURL = flag.String("snapshot.deleteURL", "", "VictoriaMetrics delete snapshot url. Optional. Will be generated from -snapshot.createURL if not provided. "+
|
||||||
"All created snapshots will be automatically deleted. Example: http://victoriametrics:8428/snapshot/delete")
|
"All created snapshots will be automatically deleted. Example: http://victoriametrics:8428/snapshot/delete")
|
||||||
dst = flag.String("dst", "", "Where to put the backup on the remote storage. "+
|
dst = flag.String("dst", "", "Where to put the backup on the remote storage. "+
|
||||||
"Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup/dir\n"+
|
"Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://container/path/to/backup or fs:///path/to/local/backup/dir\n"+
|
||||||
"-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded")
|
"-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded")
|
||||||
origin = flag.String("origin", "", "Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups")
|
origin = flag.String("origin", "", "Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups")
|
||||||
concurrency = flag.Int("concurrency", 10, "The number of concurrent workers. Higher concurrency may reduce backup duration")
|
concurrency = flag.Int("concurrency", 10, "The number of concurrent workers. Higher concurrency may reduce backup duration")
|
||||||
|
|
|
@ -19,7 +19,7 @@ Run the following command to restore backup from the given `-src` into the given
|
||||||
`vmrestore` can restore backups from the following storage types:
|
`vmrestore` can restore backups from the following storage types:
|
||||||
* [GCS](https://cloud.google.com/storage/). Example: `-src=gs://<bucket>/<path/to/backup>`
|
* [GCS](https://cloud.google.com/storage/). Example: `-src=gs://<bucket>/<path/to/backup>`
|
||||||
* [S3](https://aws.amazon.com/s3/). Example: `-src=s3://<bucket>/<path/to/backup>`
|
* [S3](https://aws.amazon.com/s3/). Example: `-src=s3://<bucket>/<path/to/backup>`
|
||||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `-src=azblob://<bucket>/<path/to/backup>`
|
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `-src=azblob://<container>/<path/to/backup>`
|
||||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/)
|
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/)
|
||||||
or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||||
* Local filesystem. Example: `-src=fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup
|
* Local filesystem. Example: `-src=fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup
|
||||||
|
@ -162,7 +162,7 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
||||||
-skipBackupCompleteCheck
|
-skipBackupCompleteCheck
|
||||||
Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file
|
Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file
|
||||||
-src string
|
-src string
|
||||||
Source path with backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup
|
Source path with backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://container/path/to/backup or fs:///path/to/local/backup
|
||||||
-storageDataPath string
|
-storageDataPath string
|
||||||
Destination path where backup must be restored. VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir is synchronized with -src contents, i.e. it works like 'rsync --delete' (default "victoria-metrics-data")
|
Destination path where backup must be restored. VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir is synchronized with -src contents, i.e. it works like 'rsync --delete' (default "victoria-metrics-data")
|
||||||
-tls
|
-tls
|
||||||
|
|
|
@ -20,7 +20,7 @@ import (
|
||||||
var (
|
var (
|
||||||
httpListenAddr = flag.String("httpListenAddr", ":8421", "TCP address for exporting metrics at /metrics page")
|
httpListenAddr = flag.String("httpListenAddr", ":8421", "TCP address for exporting metrics at /metrics page")
|
||||||
src = flag.String("src", "", "Source path with backup on the remote storage. "+
|
src = flag.String("src", "", "Source path with backup on the remote storage. "+
|
||||||
"Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup")
|
"Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://container/path/to/backup or fs:///path/to/local/backup")
|
||||||
storageDataPath = flag.String("storageDataPath", "victoria-metrics-data", "Destination path where backup must be restored. "+
|
storageDataPath = flag.String("storageDataPath", "victoria-metrics-data", "Destination path where backup must be restored. "+
|
||||||
"VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir "+
|
"VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir "+
|
||||||
"is synchronized with -src contents, i.e. it works like 'rsync --delete'")
|
"is synchronized with -src contents, i.e. it works like 'rsync --delete'")
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
{
|
{
|
||||||
"files": {
|
"files": {
|
||||||
"main.css": "./static/css/main.89abca0f.css",
|
"main.css": "./static/css/main.fdc77f08.css",
|
||||||
"main.js": "./static/js/main.c552245f.js",
|
"main.js": "./static/js/main.2d332988.js",
|
||||||
"static/js/27.c1ccfd29.chunk.js": "./static/js/27.c1ccfd29.chunk.js",
|
"static/js/27.c1ccfd29.chunk.js": "./static/js/27.c1ccfd29.chunk.js",
|
||||||
"index.html": "./index.html"
|
"index.html": "./index.html"
|
||||||
},
|
},
|
||||||
"entrypoints": [
|
"entrypoints": [
|
||||||
"static/css/main.89abca0f.css",
|
"static/css/main.fdc77f08.css",
|
||||||
"static/js/main.c552245f.js"
|
"static/js/main.2d332988.js"
|
||||||
]
|
]
|
||||||
}
|
}
|
|
@ -1 +1 @@
|
||||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="preconnect" href="https://fonts.googleapis.com"><link rel="preconnect" href="https://fonts.gstatic.com" crossorigin><link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono&family=Lato:wght@300;400;700&display=swap" rel="stylesheet"><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.c552245f.js"></script><link href="./static/css/main.89abca0f.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="preconnect" href="https://fonts.googleapis.com"><link rel="preconnect" href="https://fonts.gstatic.com" crossorigin><link href="https://fonts.googleapis.com/css2?family=JetBrains+Mono&family=Lato:wght@300;400;700&display=swap" rel="stylesheet"><script src="./dashboards/index.js" type="module"></script><script defer="defer" src="./static/js/main.2d332988.js"></script><link href="./static/css/main.fdc77f08.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
1
app/vmselect/vmui/static/css/main.fdc77f08.css
Normal file
1
app/vmselect/vmui/static/css/main.fdc77f08.css
Normal file
File diff suppressed because one or more lines are too long
2
app/vmselect/vmui/static/js/main.2d332988.js
Normal file
2
app/vmselect/vmui/static/js/main.2d332988.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -7,7 +7,7 @@
|
||||||
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @remix-run/router v1.0.3
|
* @remix-run/router v1.0.5
|
||||||
*
|
*
|
||||||
* Copyright (c) Remix Software Inc.
|
* Copyright (c) Remix Software Inc.
|
||||||
*
|
*
|
||||||
|
@ -18,7 +18,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* React Router v6.4.3
|
* React Router v6.4.5
|
||||||
*
|
*
|
||||||
* Copyright (c) Remix Software Inc.
|
* Copyright (c) Remix Software Inc.
|
||||||
*
|
*
|
File diff suppressed because one or more lines are too long
|
@ -562,8 +562,15 @@ func registerStorageMetrics(strg *storage.Storage) {
|
||||||
metrics.NewGauge(`vm_assisted_merges_total{type="storage/inmemory"}`, func() float64 {
|
metrics.NewGauge(`vm_assisted_merges_total{type="storage/inmemory"}`, func() float64 {
|
||||||
return float64(tm().InmemoryAssistedMerges)
|
return float64(tm().InmemoryAssistedMerges)
|
||||||
})
|
})
|
||||||
|
metrics.NewGauge(`vm_assisted_merges_total{type="storage/small"}`, func() float64 {
|
||||||
|
return float64(tm().SmallAssistedMerges)
|
||||||
|
})
|
||||||
|
|
||||||
metrics.NewGauge(`vm_assisted_merges_total{type="indexdb/inmemory"}`, func() float64 {
|
metrics.NewGauge(`vm_assisted_merges_total{type="indexdb/inmemory"}`, func() float64 {
|
||||||
return float64(idbm().AssistedInmemoryMerges)
|
return float64(idbm().InmemoryAssistedMerges)
|
||||||
|
})
|
||||||
|
metrics.NewGauge(`vm_assisted_merges_total{type="indexdb/file"}`, func() float64 {
|
||||||
|
return float64(idbm().FileAssistedMerges)
|
||||||
})
|
})
|
||||||
|
|
||||||
metrics.NewGauge(`vm_indexdb_items_added_total`, func() float64 {
|
metrics.NewGauge(`vm_indexdb_items_added_total`, func() float64 {
|
||||||
|
|
15158
app/vmui/packages/vmui/package-lock.json
generated
15158
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -2,7 +2,7 @@ import React, { FC, useState } from "preact/compat";
|
||||||
import { HashRouter, Route, Routes } from "react-router-dom";
|
import { HashRouter, Route, Routes } from "react-router-dom";
|
||||||
import router from "./router";
|
import router from "./router";
|
||||||
import AppContextProvider from "./contexts/AppContextProvider";
|
import AppContextProvider from "./contexts/AppContextProvider";
|
||||||
import HomeLayout from "./components/Home/HomeLayout";
|
import Layout from "./components/Layout/Layout";
|
||||||
import CustomPanel from "./pages/CustomPanel";
|
import CustomPanel from "./pages/CustomPanel";
|
||||||
import DashboardsLayout from "./pages/PredefinedPanels";
|
import DashboardsLayout from "./pages/PredefinedPanels";
|
||||||
import CardinalityPanel from "./pages/CardinalityPanel";
|
import CardinalityPanel from "./pages/CardinalityPanel";
|
||||||
|
@ -28,7 +28,7 @@ const App: FC = () => {
|
||||||
<Routes>
|
<Routes>
|
||||||
<Route
|
<Route
|
||||||
path={"/"}
|
path={"/"}
|
||||||
element={<HomeLayout/>}
|
element={<Layout/>}
|
||||||
>
|
>
|
||||||
<Route
|
<Route
|
||||||
path={router.home}
|
path={router.home}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
import React, { FC, useRef } from "preact/compat";
|
import React, { FC, useRef, useState } from "preact/compat";
|
||||||
import { KeyboardEvent } from "react";
|
import { KeyboardEvent } from "react";
|
||||||
import { ErrorTypes } from "../../../types";
|
import { ErrorTypes } from "../../../types";
|
||||||
import TextField from "../../Main/TextField/TextField";
|
import TextField from "../../Main/TextField/TextField";
|
||||||
|
@ -32,6 +32,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||||
disabled = false
|
disabled = false
|
||||||
}) => {
|
}) => {
|
||||||
|
|
||||||
|
const [openAutocomplete, setOpenAutocomplete] = useState(false);
|
||||||
const autocompleteAnchorEl = useRef<HTMLDivElement>(null);
|
const autocompleteAnchorEl = useRef<HTMLDivElement>(null);
|
||||||
|
|
||||||
const handleSelect = (val: string) => {
|
const handleSelect = (val: string) => {
|
||||||
|
@ -59,7 +60,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||||
}
|
}
|
||||||
|
|
||||||
// execute query
|
// execute query
|
||||||
if (enter && !shiftKey) {
|
if (enter && !shiftKey && !openAutocomplete) {
|
||||||
onEnter();
|
onEnter();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -84,6 +85,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
||||||
options={options}
|
options={options}
|
||||||
anchor={autocompleteAnchorEl}
|
anchor={autocompleteAnchorEl}
|
||||||
onSelect={handleSelect}
|
onSelect={handleSelect}
|
||||||
|
onOpenAutocomplete={setOpenAutocomplete}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
</div>;
|
</div>;
|
||||||
|
|
|
@ -1,56 +0,0 @@
|
||||||
@use "src/styles/variables" as *;
|
|
||||||
|
|
||||||
.vm-header {
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: flex-start;
|
|
||||||
padding: $padding-small $padding-medium;
|
|
||||||
gap: $padding-large;
|
|
||||||
|
|
||||||
&_app {
|
|
||||||
padding: $padding-small 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
&-logo {
|
|
||||||
display: grid;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: center;
|
|
||||||
|
|
||||||
&__icon {
|
|
||||||
position: relative;
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
cursor: pointer;
|
|
||||||
width: 100%;
|
|
||||||
margin-bottom: 2px;
|
|
||||||
}
|
|
||||||
|
|
||||||
&__issue {
|
|
||||||
text-align: center;
|
|
||||||
font-size: 10px;
|
|
||||||
opacity: 0.4;
|
|
||||||
color: inherit;
|
|
||||||
text-decoration: underline;
|
|
||||||
transition: 0.2s opacity;
|
|
||||||
white-space: nowrap;
|
|
||||||
cursor: pointer;
|
|
||||||
|
|
||||||
&:hover {
|
|
||||||
opacity: 0.8;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
&-nav {
|
|
||||||
font-size: $font-size-small;
|
|
||||||
font-weight: 600;
|
|
||||||
}
|
|
||||||
|
|
||||||
&__settings {
|
|
||||||
display: flex;
|
|
||||||
align-items: center;
|
|
||||||
justify-content: flex-end;
|
|
||||||
gap: $padding-small;
|
|
||||||
flex-grow: 1;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,33 @@
|
||||||
|
import React, { FC } from "preact/compat";
|
||||||
|
import dayjs from "dayjs";
|
||||||
|
import "./style.scss";
|
||||||
|
import { LogoIcon } from "../../Main/Icons";
|
||||||
|
|
||||||
|
const Footer: FC = () => {
|
||||||
|
const copyrightYears = `2019-${dayjs().format("YYYY")}`;
|
||||||
|
|
||||||
|
return <footer className="vm-footer">
|
||||||
|
<a
|
||||||
|
className="vm-footer__link vm-footer__website"
|
||||||
|
target="_blank"
|
||||||
|
href="https://victoriametrics.com/"
|
||||||
|
rel="noreferrer"
|
||||||
|
>
|
||||||
|
<LogoIcon/>
|
||||||
|
victoriametrics.com
|
||||||
|
</a>
|
||||||
|
<a
|
||||||
|
className="vm-footer__link"
|
||||||
|
target="_blank"
|
||||||
|
href="https://github.com/VictoriaMetrics/VictoriaMetrics/issues/new"
|
||||||
|
rel="noreferrer"
|
||||||
|
>
|
||||||
|
create an issue
|
||||||
|
</a>
|
||||||
|
<div className="vm-footer__copyright">
|
||||||
|
© {copyrightYears} VictoriaMetrics
|
||||||
|
</div>
|
||||||
|
</footer>;
|
||||||
|
};
|
||||||
|
|
||||||
|
export default Footer;
|
|
@ -0,0 +1,34 @@
|
||||||
|
@use "src/styles/variables" as *;
|
||||||
|
|
||||||
|
.vm-footer {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
padding: $padding-medium;
|
||||||
|
gap: $padding-large;
|
||||||
|
border-top: $border-divider;
|
||||||
|
color: $color-text-secondary;
|
||||||
|
|
||||||
|
&__website {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: 12px auto;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
gap: 6px;
|
||||||
|
}
|
||||||
|
|
||||||
|
&__link {
|
||||||
|
transition: color 200ms ease;
|
||||||
|
cursor: pointer;
|
||||||
|
|
||||||
|
&:hover {
|
||||||
|
color: $color-primary;
|
||||||
|
text-decoration: underline;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
&__copyright {
|
||||||
|
text-align: right;
|
||||||
|
flex-grow: 1;
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,17 +1,17 @@
|
||||||
import React, { FC, useMemo, useState } from "preact/compat";
|
import React, { FC, useMemo, useState } from "preact/compat";
|
||||||
import { ExecutionControls } from "../Configurators/TimeRangeSettings/ExecutionControls/ExecutionControls";
|
import { ExecutionControls } from "../../Configurators/TimeRangeSettings/ExecutionControls/ExecutionControls";
|
||||||
import { setQueryStringWithoutPageReload } from "../../utils/query-string";
|
import { setQueryStringWithoutPageReload } from "../../../utils/query-string";
|
||||||
import { TimeSelector } from "../Configurators/TimeRangeSettings/TimeSelector/TimeSelector";
|
import { TimeSelector } from "../../Configurators/TimeRangeSettings/TimeSelector/TimeSelector";
|
||||||
import GlobalSettings from "../Configurators/GlobalSettings/GlobalSettings";
|
import GlobalSettings from "../../Configurators/GlobalSettings/GlobalSettings";
|
||||||
import { useLocation, useNavigate } from "react-router-dom";
|
import { useLocation, useNavigate } from "react-router-dom";
|
||||||
import router, { RouterOptions, routerOptions } from "../../router";
|
import router, { RouterOptions, routerOptions } from "../../../router";
|
||||||
import { useEffect } from "react";
|
import { useEffect } from "react";
|
||||||
import ShortcutKeys from "../Main/ShortcutKeys/ShortcutKeys";
|
import ShortcutKeys from "../../Main/ShortcutKeys/ShortcutKeys";
|
||||||
import { getAppModeEnable, getAppModeParams } from "../../utils/app-mode";
|
import { getAppModeEnable, getAppModeParams } from "../../../utils/app-mode";
|
||||||
import CardinalityDatePicker from "../Configurators/CardinalityDatePicker/CardinalityDatePicker";
|
import CardinalityDatePicker from "../../Configurators/CardinalityDatePicker/CardinalityDatePicker";
|
||||||
import { LogoIcon } from "../Main/Icons";
|
import { LogoFullIcon } from "../../Main/Icons";
|
||||||
import { getCssVariable } from "../../utils/theme";
|
import { getCssVariable } from "../../../utils/theme";
|
||||||
import Tabs from "../Main/Tabs/Tabs";
|
import Tabs from "../../Main/Tabs/Tabs";
|
||||||
import "./style.scss";
|
import "./style.scss";
|
||||||
import classNames from "classnames";
|
import classNames from "classnames";
|
||||||
|
|
||||||
|
@ -84,23 +84,11 @@ const Header: FC = () => {
|
||||||
>
|
>
|
||||||
{!appModeEnable && (
|
{!appModeEnable && (
|
||||||
<div
|
<div
|
||||||
className="vm-header-logo"
|
className="vm-header__logo"
|
||||||
|
onClick={onClickLogo}
|
||||||
style={{ color }}
|
style={{ color }}
|
||||||
>
|
>
|
||||||
<div
|
<LogoFullIcon/>
|
||||||
className="vm-header-logo__icon"
|
|
||||||
onClick={onClickLogo}
|
|
||||||
>
|
|
||||||
<LogoIcon/>
|
|
||||||
</div>
|
|
||||||
<a
|
|
||||||
className="vm-header-logo__issue"
|
|
||||||
target="_blank"
|
|
||||||
href="https://github.com/VictoriaMetrics/VictoriaMetrics/issues/new"
|
|
||||||
rel="noreferrer"
|
|
||||||
>
|
|
||||||
create an issue
|
|
||||||
</a>
|
|
||||||
</div>
|
</div>
|
||||||
)}
|
)}
|
||||||
<div className="vm-header-nav">
|
<div className="vm-header-nav">
|
|
@ -0,0 +1,37 @@
|
||||||
|
@use "../../../styles/variables" as *;
|
||||||
|
|
||||||
|
.vm-header {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: flex-start;
|
||||||
|
padding: $padding-small $padding-medium;
|
||||||
|
gap: $padding-large;
|
||||||
|
|
||||||
|
&_app {
|
||||||
|
padding: $padding-small 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
&__logo {
|
||||||
|
position: relative;
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: center;
|
||||||
|
cursor: pointer;
|
||||||
|
width: 100%;
|
||||||
|
max-width: 65px;
|
||||||
|
margin-bottom: 2px;
|
||||||
|
}
|
||||||
|
|
||||||
|
&-nav {
|
||||||
|
font-size: $font-size-small;
|
||||||
|
font-weight: 600;
|
||||||
|
}
|
||||||
|
|
||||||
|
&__settings {
|
||||||
|
display: flex;
|
||||||
|
align-items: center;
|
||||||
|
justify-content: flex-end;
|
||||||
|
gap: $padding-small;
|
||||||
|
flex-grow: 1;
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,11 +1,12 @@
|
||||||
import Header from "../Header/Header";
|
import Header from "./Header/Header";
|
||||||
import React, { FC } from "preact/compat";
|
import React, { FC } from "preact/compat";
|
||||||
import { Outlet } from "react-router-dom";
|
import { Outlet } from "react-router-dom";
|
||||||
import "./style.scss";
|
import "./style.scss";
|
||||||
import { getAppModeEnable } from "../../utils/app-mode";
|
import { getAppModeEnable } from "../../utils/app-mode";
|
||||||
import classNames from "classnames";
|
import classNames from "classnames";
|
||||||
|
import Footer from "./Footer/Footer";
|
||||||
|
|
||||||
const HomeLayout: FC = () => {
|
const Layout: FC = () => {
|
||||||
const appModeEnable = getAppModeEnable();
|
const appModeEnable = getAppModeEnable();
|
||||||
|
|
||||||
return <section className="vm-container">
|
return <section className="vm-container">
|
||||||
|
@ -18,7 +19,8 @@ const HomeLayout: FC = () => {
|
||||||
>
|
>
|
||||||
<Outlet/>
|
<Outlet/>
|
||||||
</div>
|
</div>
|
||||||
|
{!appModeEnable && <Footer/>}
|
||||||
</section>;
|
</section>;
|
||||||
};
|
};
|
||||||
|
|
||||||
export default HomeLayout;
|
export default Layout;
|
|
@ -10,7 +10,8 @@ interface AutocompleteProps {
|
||||||
anchor: Ref<HTMLElement>
|
anchor: Ref<HTMLElement>
|
||||||
disabled?: boolean
|
disabled?: boolean
|
||||||
maxWords?: number
|
maxWords?: number
|
||||||
onSelect: (val: string) => void
|
onSelect: (val: string) => void,
|
||||||
|
onOpenAutocomplete?: (val: boolean) => void
|
||||||
}
|
}
|
||||||
|
|
||||||
const Autocomplete: FC<AutocompleteProps> = ({
|
const Autocomplete: FC<AutocompleteProps> = ({
|
||||||
|
@ -20,6 +21,7 @@ const Autocomplete: FC<AutocompleteProps> = ({
|
||||||
disabled,
|
disabled,
|
||||||
maxWords = 1,
|
maxWords = 1,
|
||||||
onSelect,
|
onSelect,
|
||||||
|
onOpenAutocomplete
|
||||||
}) => {
|
}) => {
|
||||||
const wrapperEl = useRef<HTMLDivElement>(null);
|
const wrapperEl = useRef<HTMLDivElement>(null);
|
||||||
|
|
||||||
|
@ -99,6 +101,10 @@ const Autocomplete: FC<AutocompleteProps> = ({
|
||||||
setFocusOption(-1);
|
setFocusOption(-1);
|
||||||
}, [foundOptions]);
|
}, [foundOptions]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
onOpenAutocomplete && onOpenAutocomplete(openAutocomplete);
|
||||||
|
}, [openAutocomplete]);
|
||||||
|
|
||||||
useClickOutside(wrapperEl, handleCloseAutocomplete);
|
useClickOutside(wrapperEl, handleCloseAutocomplete);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
|
|
|
@ -64,10 +64,6 @@ $checkbox-border-radius: $border-radius-small;
|
||||||
color: $color-white;
|
color: $color-white;
|
||||||
transform: scale(0);
|
transform: scale(0);
|
||||||
transition: transform 100ms ease-in-out;
|
transition: transform 100ms ease-in-out;
|
||||||
|
|
||||||
svg {
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,10 +49,6 @@
|
||||||
justify-content: center;
|
justify-content: center;
|
||||||
width: 14px;
|
width: 14px;
|
||||||
height: 14px;
|
height: 14px;
|
||||||
|
|
||||||
svg {
|
|
||||||
width: 100%;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,17 +1,26 @@
|
||||||
import React from "react";
|
import React from "react";
|
||||||
|
|
||||||
export const LogoIcon = () => (
|
export const LogoFullIcon = () => (
|
||||||
<svg
|
<svg
|
||||||
viewBox="0 0 74 24"
|
viewBox="0 0 74 24"
|
||||||
fill="currentColor"
|
fill="currentColor"
|
||||||
>
|
>
|
||||||
<path
|
<path
|
||||||
d="M6.11771 9.47563C6.4774 9.7554 6.91935 9.90875 7.37507 9.9119H7.42685C7.9076 9.90451 8.38836 9.71964 8.67681 9.46823C10.1856 8.18898 14.5568 4.18115 14.5568 4.18115C15.7254 3.09415 12.4637 2.00716 7.42685 1.99977H7.36768C2.33084 2.00716 -0.930893 3.09415 0.237711 4.18115C0.237711 4.18115 4.60888 8.18898 6.11771 9.47563ZM8.67681 11.6422C8.31807 11.9246 7.87603 12.0806 7.41945 12.0859H7.37507C6.91849 12.0806 6.47645 11.9246 6.11771 11.6422C5.08224 10.7549 1.38413 7.41995 0.00103198 6.14809V8.07806C0.00103198 8.2925 0.0823905 8.57349 0.222919 8.70659L0.293358 8.77097L0.293386 8.77099C1.33788 9.72556 4.83907 12.9253 6.11771 14.0159C6.47645 14.2983 6.91849 14.4543 7.37507 14.4595H7.41945C7.9076 14.4447 8.38096 14.2599 8.67681 14.0159C9.98594 12.9067 13.6249 9.57175 14.5642 8.70659C14.7121 8.57349 14.7861 8.2925 14.7861 8.07806V6.14809C12.7662 7.99781 10.7297 9.82926 8.67681 11.6422ZM7.41945 16.6261C7.87517 16.623 8.31712 16.4696 8.67681 16.1898C10.7298 14.3744 12.7663 12.5405 14.7861 10.6883V12.6257C14.7861 12.8327 14.7121 13.1137 14.5642 13.2468C13.6249 14.1194 9.98594 17.4469 8.67681 18.5561C8.38096 18.8075 7.9076 18.9924 7.41945 18.9998H7.37507C6.91935 18.9966 6.4774 18.8433 6.11771 18.5635C4.91431 17.5371 1.74223 14.6362 0.502336 13.5023C0.3934 13.4027 0.299379 13.3167 0.222919 13.2468C0.0823905 13.1137 0.00103198 12.8327 0.00103198 12.6257V10.6883C1.38413 11.9528 5.08224 15.2951 6.11771 16.1825C6.47645 16.4649 6.91849 16.6209 7.37507 16.6261H7.41945Z"
|
d="M6.11767 10.4759C6.47736 10.7556 6.91931 10.909 7.37503 10.9121H7.42681C7.90756 10.9047 8.38832 10.7199 8.67677 10.4685C10.1856 9.18921 14.5568 5.18138 14.5568 5.18138C15.7254 4.09438 12.4637 3.00739 7.42681 3H7.36764C2.3308 3.00739 -0.930935 4.09438 0.237669 5.18138C0.237669 5.18138 4.60884 9.18921 6.11767 10.4759ZM8.67677 12.6424C8.31803 12.9248 7.87599 13.0808 7.41941 13.0861H7.37503C6.91845 13.0808 6.47641 12.9248 6.11767 12.6424C5.0822 11.7551 1.38409 8.42018 0.000989555 7.14832V9.07829C0.000989555 9.29273 0.0823481 9.57372 0.222877 9.70682L0.293316 9.7712L0.293344 9.77122C1.33784 10.7258 4.83903 13.9255 6.11767 15.0161C6.47641 15.2985 6.91845 15.4545 7.37503 15.4597H7.41941C7.90756 15.4449 8.38092 15.2601 8.67677 15.0161C9.9859 13.9069 13.6249 10.572 14.5642 9.70682C14.7121 9.57372 14.7861 9.29273 14.7861 9.07829V7.14832C12.7662 8.99804 10.7297 10.8295 8.67677 12.6424ZM7.41941 17.6263C7.87513 17.6232 8.31708 17.4698 8.67677 17.19C10.7298 15.3746 12.7663 13.5407 14.7861 11.6885V13.6259C14.7861 13.8329 14.7121 14.1139 14.5642 14.247C13.6249 15.1196 9.9859 18.4471 8.67677 19.5563C8.38092 19.8077 7.90756 19.9926 7.41941 20H7.37503C6.91931 19.9968 6.47736 19.8435 6.11767 19.5637C4.91427 18.5373 1.74219 15.6364 0.502294 14.5025C0.393358 14.4029 0.299337 14.3169 0.222877 14.247C0.0823481 14.1139 0.000989555 13.8329 0.000989555 13.6259V11.6885C1.38409 12.953 5.0822 16.2953 6.11767 17.1827C6.47641 17.4651 6.91845 17.6211 7.37503 17.6263H7.41941Z"
|
||||||
fill="currentColor"
|
|
||||||
/>
|
/>
|
||||||
<path
|
<path
|
||||||
d="M35 3.54L29.16 18H26.73L20.89 3.54H23.05C23.2833 3.54 23.4733 3.59667 23.62 3.71C23.7667 3.82333 23.8767 3.97 23.95 4.15L27.36 12.97C27.4733 13.2567 27.58 13.5733 27.68 13.92C27.7867 14.26 27.8867 14.6167 27.98 14.99C28.06 14.6167 28.1467 14.26 28.24 13.92C28.3333 13.5733 28.4367 13.2567 28.55 12.97L31.94 4.15C31.9933 3.99667 32.0967 3.85667 32.25 3.73C32.41 3.60333 32.6033 3.54 32.83 3.54H35ZM52.1767 3.54V18H49.8067V8.66C49.8067 8.28667 49.8267 7.88333 49.8667 7.45L45.4967 15.66C45.2901 16.0533 44.9734 16.25 44.5467 16.25H44.1667C43.7401 16.25 43.4234 16.0533 43.2167 15.66L38.7967 7.42C38.8167 7.64 38.8334 7.85667 38.8467 8.07C38.8601 8.28333 38.8667 8.48 38.8667 8.66V18H36.4967V3.54H38.5267C38.6467 3.54 38.7501 3.54333 38.8367 3.55C38.9234 3.55667 39.0001 3.57333 39.0667 3.6C39.1401 3.62667 39.2034 3.67 39.2567 3.73C39.3167 3.79 39.3734 3.87 39.4267 3.97L43.7567 12C43.8701 12.2133 43.9734 12.4333 44.0667 12.66C44.1667 12.8867 44.2634 13.12 44.3567 13.36C44.4501 13.1133 44.5467 12.8767 44.6467 12.65C44.7467 12.4167 44.8534 12.1933 44.9667 11.98L49.2367 3.97C49.2901 3.87 49.3467 3.79 49.4067 3.73C49.4667 3.67 49.5301 3.62667 49.5967 3.6C49.6701 3.57333 49.7501 3.55667 49.8367 3.55C49.9234 3.54333 50.0267 3.54 50.1467 3.54H52.1767ZM61.063 17.27C61.743 17.27 62.3496 17.1533 62.883 16.92C63.423 16.68 63.8796 16.35 64.253 15.93C64.6263 15.51 64.9096 15.0167 65.103 14.45C65.303 13.8767 65.403 13.26 65.403 12.6V3.85H66.423V12.6C66.423 13.38 66.2996 14.11 66.053 14.79C65.8063 15.4633 65.4496 16.0533 64.983 16.56C64.523 17.06 63.9596 17.4533 63.293 17.74C62.633 18.0267 61.8896 18.17 61.063 18.17C60.2363 18.17 59.4896 18.0267 58.823 17.74C58.163 17.4533 57.5996 17.06 57.133 16.56C56.673 16.0533 56.3196 15.4633 56.073 14.79C55.8263 14.11 55.703 13.38 55.703 12.6V3.85H56.733V12.59C56.733 13.25 56.8296 13.8667 57.023 14.44C57.223 15.0067 57.5063 15.5 57.873 15.92C58.2463 16.34 58.6996 16.67 59.233 16.91C59.773 17.15 60.383 17.27 61.063 17.27ZM71.4442 18H70.4142V3.85H71.4442V18Z"
|
d="M34.9996 5L29.1596 19.46H26.7296L20.8896 5H23.0496C23.2829 5 23.4729 5.05667 23.6196 5.17C23.7663 5.28333 23.8763 5.43 23.9496 5.61L27.3596 14.43C27.4729 14.7167 27.5796 15.0333 27.6796 15.38C27.7863 15.72 27.8863 16.0767 27.9796 16.45C28.0596 16.0767 28.1463 15.72 28.2396 15.38C28.3329 15.0333 28.4363 14.7167 28.5496 14.43L31.9396 5.61C31.9929 5.45667 32.0963 5.31667 32.2496 5.19C32.4096 5.06333 32.603 5 32.8297 5H34.9996ZM52.1763 5V19.46H49.8064V10.12C49.8064 9.74667 49.8263 9.34333 49.8663 8.91L45.4963 17.12C45.2897 17.5133 44.973 17.71 44.5463 17.71H44.1663C43.7397 17.71 43.4231 17.5133 43.2164 17.12L38.7963 8.88C38.8163 9.1 38.833 9.31667 38.8463 9.53C38.8597 9.74333 38.8663 9.94 38.8663 10.12V19.46H36.4963V5H38.5263C38.6463 5 38.7497 5.00333 38.8363 5.01C38.923 5.01667 38.9997 5.03333 39.0663 5.06C39.1397 5.08667 39.203 5.13 39.2563 5.19C39.3163 5.25 39.373 5.33 39.4263 5.43L43.7563 13.46C43.8697 13.6733 43.973 13.8933 44.0663 14.12C44.1663 14.3467 44.263 14.58 44.3563 14.82C44.4497 14.5733 44.5464 14.3367 44.6464 14.11C44.7464 13.8767 44.8531 13.6533 44.9664 13.44L49.2363 5.43C49.2897 5.33 49.3463 5.25 49.4063 5.19C49.4663 5.13 49.5297 5.08667 49.5963 5.06C49.6697 5.03333 49.7497 5.01667 49.8363 5.01C49.923 5.00333 50.0264 5 50.1464 5H52.1763ZM61.0626 18.73C61.7426 18.73 62.3492 18.6133 62.8826 18.38C63.4226 18.14 63.8792 17.81 64.2526 17.39C64.6259 16.97 64.9092 16.4767 65.1026 15.91C65.3026 15.3367 65.4026 14.72 65.4026 14.06V5.31H66.4226V14.06C66.4226 14.84 66.2993 15.57 66.0527 16.25C65.806 16.9233 65.4493 17.5133 64.9827 18.02C64.5227 18.52 63.9592 18.9133 63.2926 19.2C62.6326 19.4867 61.8892 19.63 61.0626 19.63C60.2359 19.63 59.4893 19.4867 58.8227 19.2C58.1627 18.9133 57.5992 18.52 57.1326 18.02C56.6726 17.5133 56.3193 16.9233 56.0727 16.25C55.826 15.57 55.7026 14.84 55.7026 14.06V5.31H56.7327V14.05C56.7327 14.71 56.8292 15.3267 57.0226 15.9C57.2226 16.4667 57.506 16.96 57.8727 17.38C58.246 17.8 58.6993 18.13 59.2327 18.37C59.7727 18.61 60.3826 18.73 61.0626 18.73ZM71.4438 19.46H70.4138V5.31H71.4438V19.46Z"
|
||||||
|
/>
|
||||||
|
</svg>
|
||||||
|
);
|
||||||
|
|
||||||
|
export const LogoIcon = () => (
|
||||||
|
<svg
|
||||||
|
viewBox="0 0 15 17"
|
||||||
fill="currentColor"
|
fill="currentColor"
|
||||||
|
>
|
||||||
|
<path
|
||||||
|
d="M6.11767 7.47586C6.47736 7.75563 6.91931 7.90898 7.37503 7.91213H7.42681C7.90756 7.90474 8.38832 7.71987 8.67677 7.46846C10.1856 6.18921 14.5568 2.18138 14.5568 2.18138C15.7254 1.09438 12.4637 0.00739 7.42681 0H7.36764C2.3308 0.00739 -0.930935 1.09438 0.237669 2.18138C0.237669 2.18138 4.60884 6.18921 6.11767 7.47586ZM8.67677 9.64243C8.31803 9.92483 7.87599 10.0808 7.41941 10.0861H7.37503C6.91845 10.0808 6.47641 9.92483 6.11767 9.64243C5.0822 8.75513 1.38409 5.42018 0.000989555 4.14832V6.07829C0.000989555 6.29273 0.0823481 6.57372 0.222877 6.70682L0.293316 6.7712L0.293344 6.77122C1.33784 7.72579 4.83903 10.9255 6.11767 12.0161C6.47641 12.2985 6.91845 12.4545 7.37503 12.4597H7.41941C7.90756 12.4449 8.38092 12.2601 8.67677 12.0161C9.9859 10.9069 13.6249 7.57198 14.5642 6.70682C14.7121 6.57372 14.7861 6.29273 14.7861 6.07829V4.14832C12.7662 5.99804 10.7297 7.82949 8.67677 9.64243ZM7.41941 14.6263C7.87513 14.6232 8.31708 14.4698 8.67677 14.19C10.7298 12.3746 12.7663 10.5407 14.7861 8.68853V10.6259C14.7861 10.8329 14.7121 11.1139 14.5642 11.247C13.6249 12.1196 9.9859 15.4471 8.67677 16.5563C8.38092 16.8077 7.90756 16.9926 7.41941 17H7.37503C6.91931 16.9968 6.47736 16.8435 6.11767 16.5637C4.91427 15.5373 1.74219 12.6364 0.502294 11.5025C0.393358 11.4029 0.299337 11.3169 0.222877 11.247C0.0823481 11.1139 0.000989555 10.8329 0.000989555 10.6259V8.68853C1.38409 9.95303 5.0822 13.2953 6.11767 14.1827C6.47641 14.4651 6.91845 14.6211 7.37503 14.6263H7.41941Z"
|
||||||
/>
|
/>
|
||||||
</svg>
|
</svg>
|
||||||
);
|
);
|
||||||
|
|
|
@ -40,11 +40,6 @@ $padding-modal: 22px;
|
||||||
box-sizing: content-box;
|
box-sizing: content-box;
|
||||||
color: $color-white;
|
color: $color-white;
|
||||||
cursor: pointer;
|
cursor: pointer;
|
||||||
|
|
||||||
svg {
|
|
||||||
width: 100%;
|
|
||||||
height: auto;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ import { ReactNode, useEffect } from "react";
|
||||||
import "./style.scss";
|
import "./style.scss";
|
||||||
import classNames from "classnames";
|
import classNames from "classnames";
|
||||||
import { getCssVariable } from "../../../utils/theme";
|
import { getCssVariable } from "../../../utils/theme";
|
||||||
|
import useResize from "../../../hooks/useResize";
|
||||||
|
|
||||||
interface TabsProps {
|
interface TabsProps {
|
||||||
activeItem: string
|
activeItem: string
|
||||||
|
@ -19,6 +20,7 @@ const Tabs: FC<TabsProps> = ({
|
||||||
onChange,
|
onChange,
|
||||||
indicatorPlacement = "bottom"
|
indicatorPlacement = "bottom"
|
||||||
}) => {
|
}) => {
|
||||||
|
const windowSize = useResize(document.body);
|
||||||
const activeNavRef = useRef<HTMLDivElement>(null);
|
const activeNavRef = useRef<HTMLDivElement>(null);
|
||||||
const [indicatorPosition, setIndicatorPosition] = useState({ left: 0, width: 0, bottom: 0 });
|
const [indicatorPosition, setIndicatorPosition] = useState({ left: 0, width: 0, bottom: 0 });
|
||||||
|
|
||||||
|
@ -32,7 +34,7 @@ const Tabs: FC<TabsProps> = ({
|
||||||
const positionTop = indicatorPlacement === "top";
|
const positionTop = indicatorPlacement === "top";
|
||||||
setIndicatorPosition({ left, width, bottom: positionTop ? height - 2 : 0 });
|
setIndicatorPosition({ left, width, bottom: positionTop ? height - 2 : 0 });
|
||||||
}
|
}
|
||||||
}, [activeItem, activeNavRef, items]);
|
}, [windowSize, activeItem, activeNavRef, items]);
|
||||||
|
|
||||||
return <div className="vm-tabs">
|
return <div className="vm-tabs">
|
||||||
{items.map(item => (
|
{items.map(item => (
|
||||||
|
|
|
@ -113,11 +113,6 @@
|
||||||
height: 100%;
|
height: 100%;
|
||||||
position: absolute;
|
position: absolute;
|
||||||
color: $color-text-secondary;
|
color: $color-text-secondary;
|
||||||
|
|
||||||
svg {
|
|
||||||
width: 100%;
|
|
||||||
height: auto;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
&__icon-end {
|
&__icon-end {
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
import { FC } from "react";
|
import React, { FC } from "react";
|
||||||
import EnhancedTable from "../Table/Table";
|
import EnhancedTable from "../Table/Table";
|
||||||
import TableCells from "../Table/TableCells/TableCells";
|
import TableCells from "../Table/TableCells/TableCells";
|
||||||
import BarChart from "../../../components/Chart/BarChart/BarChart";
|
import BarChart from "../../../components/Chart/BarChart/BarChart";
|
||||||
import { barOptions } from "../../../components/Chart/BarChart/consts";
|
import { barOptions } from "../../../components/Chart/BarChart/consts";
|
||||||
import React, { SyntheticEvent } from "react";
|
|
||||||
import { Data, HeadCell } from "../Table/types";
|
import { Data, HeadCell } from "../Table/types";
|
||||||
import { MutableRef } from "preact/hooks";
|
import { MutableRef } from "preact/hooks";
|
||||||
import Tabs from "../../../components/Main/Tabs/Tabs";
|
import Tabs from "../../../components/Main/Tabs/Tabs";
|
||||||
|
@ -15,7 +14,7 @@ interface MetricsProperties {
|
||||||
rows: Data[];
|
rows: Data[];
|
||||||
activeTab: number;
|
activeTab: number;
|
||||||
onChange: (newValue: string, tabId: string) => void;
|
onChange: (newValue: string, tabId: string) => void;
|
||||||
onActionClick: (e: SyntheticEvent) => void;
|
onActionClick: (name: string) => void;
|
||||||
tabs: string[];
|
tabs: string[];
|
||||||
chartContainer: MutableRef<HTMLDivElement> | undefined;
|
chartContainer: MutableRef<HTMLDivElement> | undefined;
|
||||||
totalSeries: number,
|
totalSeries: number,
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
import { SyntheticEvent } from "react";
|
|
||||||
import React, { FC } from "preact/compat";
|
import React, { FC } from "preact/compat";
|
||||||
import { Data } from "../types";
|
import { Data } from "../types";
|
||||||
import LineProgress from "../../../../components/Main/LineProgress/LineProgress";
|
import LineProgress from "../../../../components/Main/LineProgress/LineProgress";
|
||||||
|
@ -9,12 +8,16 @@ import Tooltip from "../../../../components/Main/Tooltip/Tooltip";
|
||||||
interface CardinalityTableCells {
|
interface CardinalityTableCells {
|
||||||
row: Data,
|
row: Data,
|
||||||
totalSeries: number;
|
totalSeries: number;
|
||||||
onActionClick: (e: SyntheticEvent) => void;
|
onActionClick: (name: string) => void;
|
||||||
}
|
}
|
||||||
|
|
||||||
const TableCells: FC<CardinalityTableCells> = ({ row, totalSeries, onActionClick }) => {
|
const TableCells: FC<CardinalityTableCells> = ({ row, totalSeries, onActionClick }) => {
|
||||||
const progress = totalSeries > 0 ? row.value / totalSeries * 100 : -1;
|
const progress = totalSeries > 0 ? row.value / totalSeries * 100 : -1;
|
||||||
|
|
||||||
|
const handleActionClick = () => {
|
||||||
|
onActionClick(row.name);
|
||||||
|
};
|
||||||
|
|
||||||
return <>
|
return <>
|
||||||
<td
|
<td
|
||||||
className="vm-table-cell"
|
className="vm-table-cell"
|
||||||
|
@ -45,7 +48,7 @@ const TableCells: FC<CardinalityTableCells> = ({ row, totalSeries, onActionClick
|
||||||
<Button
|
<Button
|
||||||
variant="text"
|
variant="text"
|
||||||
size="small"
|
size="small"
|
||||||
onClick={onActionClick}
|
onClick={handleActionClick}
|
||||||
>
|
>
|
||||||
<PlayCircleOutlineIcon/>
|
<PlayCircleOutlineIcon/>
|
||||||
</Button>
|
</Button>
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
import React, { FC, useState } from "react";
|
import React, { FC, useState } from "react";
|
||||||
import { SyntheticEvent } from "react";
|
|
||||||
import { useFetchQuery } from "./hooks/useCardinalityFetch";
|
import { useFetchQuery } from "./hooks/useCardinalityFetch";
|
||||||
import { queryUpdater } from "./helpers";
|
import { queryUpdater } from "./helpers";
|
||||||
import { Data } from "./Table/types";
|
import { Data } from "./Table/types";
|
||||||
|
@ -60,8 +59,7 @@ const Index: FC = () => {
|
||||||
setTab({ ...stateTabs, [tabId]: +newValue });
|
setTab({ ...stateTabs, [tabId]: +newValue });
|
||||||
};
|
};
|
||||||
|
|
||||||
const handleFilterClick = (key: string) => (e: SyntheticEvent) => {
|
const handleFilterClick = (key: string) => (name: string) => {
|
||||||
const name = e.currentTarget.id;
|
|
||||||
const query = queryUpdater[key](focusLabel, name);
|
const query = queryUpdater[key](focusLabel, name);
|
||||||
setQuery(query);
|
setQuery(query);
|
||||||
setQueryHistory(prev => [...prev, query]);
|
setQueryHistory(prev => [...prev, query]);
|
||||||
|
|
|
@ -45,3 +45,7 @@ input[type=number]::-webkit-outer-spin-button {
|
||||||
bottom: $padding-global;
|
bottom: $padding-global;
|
||||||
z-index: 999;
|
z-index: 999;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
svg {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
|
|
@ -69,6 +69,19 @@
|
||||||
"iconColor": "red",
|
"iconColor": "red",
|
||||||
"name": "alerts",
|
"name": "alerts",
|
||||||
"titleFormat": "{{alertname}}"
|
"titleFormat": "{{alertname}}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "$ds"
|
||||||
|
},
|
||||||
|
"enable": true,
|
||||||
|
"expr": "sum(vm_app_version{job=~\"$job\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\"} offset 20m) by(short_version))",
|
||||||
|
"hide": true,
|
||||||
|
"iconColor": "dark-blue",
|
||||||
|
"name": "version change",
|
||||||
|
"textFormat": "{{short_version}}",
|
||||||
|
"titleFormat": "Version change"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
|
@ -69,6 +69,19 @@
|
||||||
"iconColor": "red",
|
"iconColor": "red",
|
||||||
"name": "alerts",
|
"name": "alerts",
|
||||||
"titleFormat": "{{alertname}}"
|
"titleFormat": "{{alertname}}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "$ds"
|
||||||
|
},
|
||||||
|
"enable": true,
|
||||||
|
"expr": "sum(vm_app_version{job=~\"$job\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\"} offset 20m) by(short_version))",
|
||||||
|
"hide": true,
|
||||||
|
"iconColor": "dark-blue",
|
||||||
|
"name": "version",
|
||||||
|
"textFormat": "{{short_version}}",
|
||||||
|
"titleFormat": "Version change"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
|
@ -52,6 +52,19 @@
|
||||||
"type": "dashboard"
|
"type": "dashboard"
|
||||||
},
|
},
|
||||||
"type": "dashboard"
|
"type": "dashboard"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "$ds"
|
||||||
|
},
|
||||||
|
"enable": true,
|
||||||
|
"expr": "sum(vm_app_version{job=~\"$job\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\"} offset 20m) by(short_version))",
|
||||||
|
"hide": true,
|
||||||
|
"iconColor": "dark-blue",
|
||||||
|
"name": "version",
|
||||||
|
"textFormat": "{{short_version}}",
|
||||||
|
"titleFormat": "Version change"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
|
@ -16,6 +16,16 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
|
||||||
|
## [v1.85.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.85.1)
|
||||||
|
|
||||||
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): support `$for` or `.For` template variables in alert's annotations. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3246).
|
||||||
|
|
||||||
|
* BUGFIX: [DataDog protocol parser](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent): do not re-use `host` and `device` fields from the previously parsed messages if these fields are missing in the currently parsed message. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3432).
|
||||||
|
* BUGFIX: reduce CPU usage when the regex-based relabeling rules are applied to more than 100K unique Graphite metrics. See [this issue](https://docs.victoriametrics.com/CHANGELOG.html#v1820). The issue was introduced in [v1.82.0](https://docs.victoriametrics.com/CHANGELOG.html#v1820).
|
||||||
|
* BUGFIX: do not block [merges](https://docs.victoriametrics.com/#storage) of small parts by merges of big parts on hosts with small number of CPU cores. This issue could result in the increasing number of `storage/small` parts while big merge is in progress. This, in turn, could result in increased CPU usage and memory usage during querying, since queries need to inspect bigger number of small parts. The issue has been introduced in [v1.85.0](https://docs.victoriametrics.com/CHANGELOG.html#v1850).
|
||||||
|
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): fix the `The source request body for synchronous copy is too large and exceeds the maximum permissible limit (256MB)` error when performing backups to Azure blob storage. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3477).
|
||||||
|
|
||||||
|
|
||||||
## [v1.85.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.85.0)
|
## [v1.85.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.85.0)
|
||||||
|
|
||||||
Released at 11-12-2022
|
Released at 11-12-2022
|
||||||
|
|
|
@ -287,7 +287,13 @@ See [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting.html
|
||||||
|
|
||||||
## Readonly mode
|
## Readonly mode
|
||||||
|
|
||||||
`vmstorage` nodes automatically switch to readonly mode when the directory pointed by `-storageDataPath` contains less than `-storage.minFreeDiskSpaceBytes` of free space. `vminsert` nodes stop sending data to such nodes and start re-routing the data to the remaining `vmstorage` nodes.
|
`vmstorage` nodes automatically switch to readonly mode when the directory pointed by `-storageDataPath`
|
||||||
|
contains less than `-storage.minFreeDiskSpaceBytes` of free space. `vminsert` nodes stop sending data to such nodes
|
||||||
|
and start re-routing the data to the remaining `vmstorage` nodes.
|
||||||
|
|
||||||
|
`vmstorage` sets `vm_storage_is_read_only` metric at `http://vmstorage:8482/metrics` to `1` when it enters read-only mode.
|
||||||
|
The metric is set to `0` when the `vmstorage` isn't in read-only mode.
|
||||||
|
|
||||||
|
|
||||||
## URL format
|
## URL format
|
||||||
|
|
||||||
|
|
|
@ -44,6 +44,7 @@ plus the following additional features:
|
||||||
- [mTLS for cluster components](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection).
|
- [mTLS for cluster components](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection).
|
||||||
- [Kafka integration](https://docs.victoriametrics.com/vmagent.html#kafka-integration).
|
- [Kafka integration](https://docs.victoriametrics.com/vmagent.html#kafka-integration).
|
||||||
- [Multitenant support in vmalert](https://docs.victoriametrics.com/vmalert.html#multitenancy).
|
- [Multitenant support in vmalert](https://docs.victoriametrics.com/vmalert.html#multitenancy).
|
||||||
|
- [Anomaly Detection Service](https://docs.victoriametrics.com/vmanomaly.html)
|
||||||
|
|
||||||
On top of this, enterprise package of VictoriaMetrics includes the following important Enterprise features:
|
On top of this, enterprise package of VictoriaMetrics includes the following important Enterprise features:
|
||||||
|
|
||||||
|
|
|
@ -600,7 +600,7 @@ Additionally, the `action: graphite` relabeling rules usually work much faster t
|
||||||
`vmagent` and [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
|
`vmagent` and [single-node VictoriaMetrics](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html)
|
||||||
provide the following tools for debugging target-level and metric-level relabeling:
|
provide the following tools for debugging target-level and metric-level relabeling:
|
||||||
|
|
||||||
- Target-level relabeling (e.g. `relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs))
|
- Target-level debugging (e.g. `relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs))
|
||||||
can be performed by navigating to `http://vmagent:8429/targets` page (`http://victoriametrics:8428/targets` page for single-node VictoriaMetrics)
|
can be performed by navigating to `http://vmagent:8429/targets` page (`http://victoriametrics:8428/targets` page for single-node VictoriaMetrics)
|
||||||
and clicking the `debug` link at the target, which must be debugged.
|
and clicking the `debug` link at the target, which must be debugged.
|
||||||
The opened page will show step-by-step results for the actual relabeling rules applied to the target labels.
|
The opened page will show step-by-step results for the actual relabeling rules applied to the target labels.
|
||||||
|
@ -611,7 +611,7 @@ provide the following tools for debugging target-level and metric-level relabeli
|
||||||
and click the `debug` link there. The opened page will show step-by-step results for the actual relabeling rules,
|
and click the `debug` link there. The opened page will show step-by-step results for the actual relabeling rules,
|
||||||
which result to target drop.
|
which result to target drop.
|
||||||
|
|
||||||
- Metric-level relabeling (e.g. `metric_relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs)
|
- Metric-level debugging (e.g. `metric_relabel_configs` section at [scrape_configs](https://docs.victoriametrics.com/sd_configs.html#scrape_configs)
|
||||||
and all the relabeling, which can be set up via `-relabelConfig`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`
|
and all the relabeling, which can be set up via `-relabelConfig`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`
|
||||||
command-line flags) can be performed by navigating to `http://vmagent:8429/metric-relabel-debug` page
|
command-line flags) can be performed by navigating to `http://vmagent:8429/metric-relabel-debug` page
|
||||||
(`http://victoriametrics:8428/metric-relabel-debug` page for single-node VictoriaMetrics)
|
(`http://victoriametrics:8428/metric-relabel-debug` page for single-node VictoriaMetrics)
|
||||||
|
|
|
@ -218,6 +218,7 @@ The following variables are available in templating:
|
||||||
| $alertID or .AlertID | The current alert's ID generated by vmalert. | {% raw %}Link: vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}{% endraw %} |
|
| $alertID or .AlertID | The current alert's ID generated by vmalert. | {% raw %}Link: vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}{% endraw %} |
|
||||||
| $groupID or .GroupID | The current alert's group ID generated by vmalert. | {% raw %}Link: vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}{% endraw %} |
|
| $groupID or .GroupID | The current alert's group ID generated by vmalert. | {% raw %}Link: vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}{% endraw %} |
|
||||||
| $expr or .Expr | Alert's expression. Can be used for generating links to Grafana or other systems. | {% raw %}/api/v1/query?query={{ $expr|queryEscape }}{% endraw %} |
|
| $expr or .Expr | Alert's expression. Can be used for generating links to Grafana or other systems. | {% raw %}/api/v1/query?query={{ $expr|queryEscape }}{% endraw %} |
|
||||||
|
| $for or .For | Alert's configured for param. | {% raw %}Number of connections is too high for more than {{ .For }}{% endraw %} |
|
||||||
| $externalLabels or .ExternalLabels | List of labels configured via `-external.label` command-line flag. | {% raw %}Issues with {{ $labels.instance }} (datacenter-{{ $externalLabels.dc }}){% endraw %} |
|
| $externalLabels or .ExternalLabels | List of labels configured via `-external.label` command-line flag. | {% raw %}Issues with {{ $labels.instance }} (datacenter-{{ $externalLabels.dc }}){% endraw %} |
|
||||||
| $externalURL or .ExternalURL | URL configured via `-external.url` command-line flag. Used for cases when vmalert is hidden behind proxy. | {% raw %}Visit {{ $externalURL }} for more details{% endraw %} |
|
| $externalURL or .ExternalURL | URL configured via `-external.url` command-line flag. Used for cases when vmalert is hidden behind proxy. | {% raw %}Visit {{ $externalURL }} for more details{% endraw %} |
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ creation of hourly, daily, weekly and monthly backups.
|
||||||
|
|
||||||
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
|
||||||
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
|
||||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<bucket>/<path/to/backup>`
|
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `azblob://<container>/<path/to/backup>`
|
||||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||||
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
||||||
-customS3Endpoint string
|
-customS3Endpoint string
|
||||||
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set
|
Custom S3 endpoint for use with S3-compatible storages (e.g. MinIO). S3 is used if not set
|
||||||
-dst string
|
-dst string
|
||||||
Where to put the backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup/dir
|
Where to put the backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://container/path/to/backup or fs:///path/to/local/backup/dir
|
||||||
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded
|
-dst can point to the previous backup. In this case incremental backup is performed, i.e. only changed data is uploaded
|
||||||
-enableTCP6
|
-enableTCP6
|
||||||
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
Whether to enable IPv6 for listening and dialing. By default only IPv4 TCP and UDP is used
|
||||||
|
|
|
@ -23,7 +23,7 @@ Run the following command to restore backup from the given `-src` into the given
|
||||||
`vmrestore` can restore backups from the following storage types:
|
`vmrestore` can restore backups from the following storage types:
|
||||||
* [GCS](https://cloud.google.com/storage/). Example: `-src=gs://<bucket>/<path/to/backup>`
|
* [GCS](https://cloud.google.com/storage/). Example: `-src=gs://<bucket>/<path/to/backup>`
|
||||||
* [S3](https://aws.amazon.com/s3/). Example: `-src=s3://<bucket>/<path/to/backup>`
|
* [S3](https://aws.amazon.com/s3/). Example: `-src=s3://<bucket>/<path/to/backup>`
|
||||||
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `-src=azblob://<bucket>/<path/to/backup>`
|
* [Azure Blob Storage](https://azure.microsoft.com/en-us/products/storage/blobs/). Example: `-src=azblob://<container>/<path/to/backup>`
|
||||||
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/)
|
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/)
|
||||||
or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
|
||||||
* Local filesystem. Example: `-src=fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup
|
* Local filesystem. Example: `-src=fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup
|
||||||
|
@ -166,7 +166,7 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
||||||
-skipBackupCompleteCheck
|
-skipBackupCompleteCheck
|
||||||
Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file
|
Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file
|
||||||
-src string
|
-src string
|
||||||
Source path with backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://bucket/path/to/backup or fs:///path/to/local/backup
|
Source path with backup on the remote storage. Example: gs://bucket/path/to/backup, s3://bucket/path/to/backup, azblob://container/path/to/backup or fs:///path/to/local/backup
|
||||||
-storageDataPath string
|
-storageDataPath string
|
||||||
Destination path where backup must be restored. VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir is synchronized with -src contents, i.e. it works like 'rsync --delete' (default "victoria-metrics-data")
|
Destination path where backup must be restored. VictoriaMetrics must be stopped when restoring from backup. -storageDataPath dir can be non-empty. In this case the contents of -storageDataPath dir is synchronized with -src contents, i.e. it works like 'rsync --delete' (default "victoria-metrics-data")
|
||||||
-tls
|
-tls
|
||||||
|
|
10
go.mod
10
go.mod
|
@ -30,8 +30,8 @@ require (
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/prometheus/common v0.38.0 // indirect
|
github.com/prometheus/common v0.39.0 // indirect
|
||||||
github.com/prometheus/prometheus v0.40.6
|
github.com/prometheus/prometheus v0.40.7
|
||||||
github.com/urfave/cli/v2 v2.23.7
|
github.com/urfave/cli/v2 v2.23.7
|
||||||
github.com/valyala/fastjson v1.6.3
|
github.com/valyala/fastjson v1.6.3
|
||||||
github.com/valyala/fastrand v1.1.0
|
github.com/valyala/fastrand v1.1.0
|
||||||
|
@ -50,10 +50,10 @@ require (
|
||||||
cloud.google.com/go/compute v1.14.0 // indirect
|
cloud.google.com/go/compute v1.14.0 // indirect
|
||||||
cloud.google.com/go/compute/metadata v0.2.2 // indirect
|
cloud.google.com/go/compute/metadata v0.2.2 // indirect
|
||||||
cloud.google.com/go/iam v0.8.0 // indirect
|
cloud.google.com/go/iam v0.8.0 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
|
||||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.44.157 // indirect
|
github.com/aws/aws-sdk-go v1.44.160 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect
|
github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect
|
||||||
|
@ -107,7 +107,7 @@ require (
|
||||||
go.opentelemetry.io/otel/trace v1.11.2 // indirect
|
go.opentelemetry.io/otel/trace v1.11.2 // indirect
|
||||||
go.uber.org/atomic v1.10.0 // indirect
|
go.uber.org/atomic v1.10.0 // indirect
|
||||||
go.uber.org/goleak v1.2.0 // indirect
|
go.uber.org/goleak v1.2.0 // indirect
|
||||||
golang.org/x/exp v0.0.0-20221208152030-732eee02a75a // indirect
|
golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 // indirect
|
||||||
golang.org/x/sync v0.1.0 // indirect
|
golang.org/x/sync v0.1.0 // indirect
|
||||||
golang.org/x/text v0.5.0 // indirect
|
golang.org/x/text v0.5.0 // indirect
|
||||||
golang.org/x/time v0.3.0 // indirect
|
golang.org/x/time v0.3.0 // indirect
|
||||||
|
|
20
go.sum
20
go.sum
|
@ -46,8 +46,8 @@ github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9Eb
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 h1:sVW/AFBTGyJxDaMYlq0ct3jUXTtj12tQ6zE2GZUgVQw=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 h1:sVW/AFBTGyJxDaMYlq0ct3jUXTtj12tQ6zE2GZUgVQw=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0/go.mod h1:uGG2W01BaETf0Ozp+QxxKJdMBNRWPdstHG0Fmdwn1/U=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 h1:Oj853U9kG+RLTCQXpjvOnrv0WaZHxgmZz1TlLywgOPY=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 h1:+5VZ72z0Qan5Bog5C+ZkgSqUbeVUd9wgtHOrIKuc5b8=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 h1:YvQv9Mz6T8oR5ypQOL6erY0Z5t71ak1uHV4QFokCOZk=
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1 h1:YvQv9Mz6T8oR5ypQOL6erY0Z5t71ak1uHV4QFokCOZk=
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.6.1/go.mod h1:c6WvOhtmjNUWbLfOG1qxM/q0SPvQNSVJvolm+C52dIU=
|
||||||
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
|
||||||
|
@ -89,8 +89,8 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
|
||||||
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
|
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
|
||||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.44.157 h1:JVBPpEWC8+yA7CbfAuTl/ZFFlHS3yoqWFqxFyTCISwg=
|
github.com/aws/aws-sdk-go v1.44.160 h1:F41sWUel1CJ69ezoBGCg8sDyu9kyeKEpwmDrLXbCuyA=
|
||||||
github.com/aws/aws-sdk-go v1.44.157/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
github.com/aws/aws-sdk-go v1.44.160/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8=
|
github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
|
||||||
|
@ -387,8 +387,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||||
github.com/prometheus/common v0.38.0 h1:VTQitp6mXTdUoCmDMugDVOJ1opi6ADftKfp/yeqTR/E=
|
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||||
github.com/prometheus/common v0.38.0/go.mod h1:MBXfmBQZrK5XpbCkjofnXs96LD2QQ7fEq4C0xjC/yec=
|
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||||
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
|
||||||
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
@ -397,8 +397,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||||
github.com/prometheus/prometheus v0.40.6 h1:JP2Wbm4HJI9OlWbOzCGRL3zlOXFdSzC0TttI09+EodM=
|
github.com/prometheus/prometheus v0.40.7 h1:cYtp4YrR9M99YpTUfXbei/HjIJJ+En23NKsTCeZ2U2w=
|
||||||
github.com/prometheus/prometheus v0.40.6/go.mod h1:nO+vI0cJo1ezp2DPGw5NEnTlYHGRpBFrqE4zb9O0g0U=
|
github.com/prometheus/prometheus v0.40.7/go.mod h1:nO+vI0cJo1ezp2DPGw5NEnTlYHGRpBFrqE4zb9O0g0U=
|
||||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
|
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
|
||||||
|
@ -488,8 +488,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
||||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||||
golang.org/x/exp v0.0.0-20221208152030-732eee02a75a h1:4iLhBPcpqFmylhnkbY3W0ONLUYYkDAW9xMFLfxgsvCw=
|
golang.org/x/exp v0.0.0-20221212164502-fae10dda9338 h1:OvjRkcNHnf6/W5FZXSxODbxwD+X7fspczG7Jn/xQVD4=
|
||||||
golang.org/x/exp v0.0.0-20221208152030-732eee02a75a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
golang.org/x/exp v0.0.0-20221212164502-fae10dda9338/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||||
|
|
|
@ -178,12 +178,35 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
||||||
return fmt.Errorf("failed to generate SAS token of src %q: %w", p.Path, err)
|
return fmt.Errorf("failed to generate SAS token of src %q: %w", p.Path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hotfix for SDK issue: https://github.com/Azure/azure-sdk-for-go/issues/19245
|
|
||||||
t = strings.Replace(t, "/?", "?", -1)
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, err = dbc.CopyFromURL(ctx, t, &blob.CopyFromURLOptions{})
|
|
||||||
|
// In order to support copy of files larger than 256MB, we need to use the async copy
|
||||||
|
// Ref: https://learn.microsoft.com/en-us/rest/api/storageservices/copy-blob-from-url
|
||||||
|
_, err = dbc.StartCopyFromURL(ctx, t, &blob.StartCopyFromURLOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("cannot copy %q from %s to %s: %w", p.Path, src, fs, err)
|
return fmt.Errorf("cannot start async copy %q from %s to %s: %w", p.Path, src, fs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var copyStatus *blob.CopyStatusType
|
||||||
|
var copyStatusDescription *string
|
||||||
|
for {
|
||||||
|
r, err := dbc.GetProperties(ctx, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to check copy status, cannot get properties of %q at %s: %w", p.Path, fs, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// After the copy will be finished status will be changed to success/failed/aborted
|
||||||
|
// Ref: https://learn.microsoft.com/en-us/rest/api/storageservices/get-blob-properties#response-headers - x-ms-copy-status
|
||||||
|
if *r.CopyStatus != blob.CopyStatusTypePending {
|
||||||
|
copyStatus = r.CopyStatus
|
||||||
|
copyStatusDescription = r.CopyStatusDescription
|
||||||
|
break
|
||||||
|
}
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
if *copyStatus != blob.CopyStatusTypeSuccess {
|
||||||
|
return fmt.Errorf("copy of %q from %s to %s failed: expected status %q, received %q (description: %q)", p.Path, src, fs, blob.CopyStatusTypeSuccess, *copyStatus, *copyStatusDescription)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -290,12 +313,12 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
_, err := bc.GetProperties(ctx, nil)
|
_, err := bc.GetProperties(ctx, nil)
|
||||||
logger.Errorf("GetProperties(%q) returned %s", bc.URL(), err)
|
|
||||||
var azerr *azcore.ResponseError
|
var azerr *azcore.ResponseError
|
||||||
if errors.As(err, &azerr) {
|
if errors.As(err, &azerr) {
|
||||||
if azerr.ErrorCode == storageErrorCodeBlobNotFound {
|
if azerr.ErrorCode == storageErrorCodeBlobNotFound {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
logger.Errorf("GetProperties(%q) returned %s", bc.URL(), err)
|
||||||
return false, fmt.Errorf("unexpected error when obtaining properties for %q at %s (remote path %q): %w", filePath, fs, bc.URL(), err)
|
return false, fmt.Errorf("unexpected error when obtaining properties for %q at %s (remote path %q): %w", filePath, fs, bc.URL(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FastStringMatcher implements fast matcher for strings.
|
// FastStringMatcher implements fast matcher for strings.
|
||||||
|
@ -11,44 +13,68 @@ import (
|
||||||
// It caches string match results and returns them back on the next calls
|
// It caches string match results and returns them back on the next calls
|
||||||
// without calling the matchFunc, which may be expensive.
|
// without calling the matchFunc, which may be expensive.
|
||||||
type FastStringMatcher struct {
|
type FastStringMatcher struct {
|
||||||
m atomic.Value
|
lastCleanupTime uint64
|
||||||
mLen uint64
|
|
||||||
|
m sync.Map
|
||||||
|
|
||||||
matchFunc func(s string) bool
|
matchFunc func(s string) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fsmEntry struct {
|
||||||
|
lastAccessTime uint64
|
||||||
|
ok bool
|
||||||
|
}
|
||||||
|
|
||||||
// NewFastStringMatcher creates new matcher, which applies matchFunc to strings passed to Match()
|
// NewFastStringMatcher creates new matcher, which applies matchFunc to strings passed to Match()
|
||||||
//
|
//
|
||||||
// matchFunc must return the same result for the same input.
|
// matchFunc must return the same result for the same input.
|
||||||
func NewFastStringMatcher(matchFunc func(s string) bool) *FastStringMatcher {
|
func NewFastStringMatcher(matchFunc func(s string) bool) *FastStringMatcher {
|
||||||
var fsm FastStringMatcher
|
return &FastStringMatcher{
|
||||||
fsm.m.Store(&sync.Map{})
|
lastCleanupTime: fasttime.UnixTimestamp(),
|
||||||
fsm.matchFunc = matchFunc
|
matchFunc: matchFunc,
|
||||||
return &fsm
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match applies matchFunc to s and returns the result.
|
// Match applies matchFunc to s and returns the result.
|
||||||
func (fsm *FastStringMatcher) Match(s string) bool {
|
func (fsm *FastStringMatcher) Match(s string) bool {
|
||||||
m := fsm.m.Load().(*sync.Map)
|
ct := fasttime.UnixTimestamp()
|
||||||
v, ok := m.Load(s)
|
v, ok := fsm.m.Load(s)
|
||||||
if ok {
|
if ok {
|
||||||
// Fast path - s match result is found in the cache.
|
// Fast path - s match result is found in the cache.
|
||||||
bp := v.(*bool)
|
e := v.(*fsmEntry)
|
||||||
return *bp
|
if atomic.LoadUint64(&e.lastAccessTime)+10 < ct {
|
||||||
|
// Reduce the frequency of e.lastAccessTime update to once per 10 seconds
|
||||||
|
// in order to improve the fast path speed on systems with many CPU cores.
|
||||||
|
atomic.StoreUint64(&e.lastAccessTime, ct)
|
||||||
|
}
|
||||||
|
return e.ok
|
||||||
}
|
}
|
||||||
// Slow path - run matchFunc for s and store the result in the cache.
|
// Slow path - run matchFunc for s and store the result in the cache.
|
||||||
b := fsm.matchFunc(s)
|
b := fsm.matchFunc(s)
|
||||||
bp := &b
|
e := &fsmEntry{
|
||||||
|
lastAccessTime: ct,
|
||||||
|
ok: b,
|
||||||
|
}
|
||||||
// Make a copy of s in order to limit memory usage to the s length,
|
// Make a copy of s in order to limit memory usage to the s length,
|
||||||
// since the s may point to bigger string.
|
// since the s may point to bigger string.
|
||||||
// This also protects from the case when s contains unsafe string, which points to a temporary byte slice.
|
// This also protects from the case when s contains unsafe string, which points to a temporary byte slice.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3227
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3227
|
||||||
s = strings.Clone(s)
|
s = strings.Clone(s)
|
||||||
m.Store(s, bp)
|
fsm.m.Store(s, e)
|
||||||
n := atomic.AddUint64(&fsm.mLen, 1)
|
|
||||||
if n > 100e3 {
|
if atomic.LoadUint64(&fsm.lastCleanupTime)+61 < ct {
|
||||||
atomic.StoreUint64(&fsm.mLen, 0)
|
// Perform a global cleanup for fsm.m by removing items, which weren't accessed
|
||||||
fsm.m.Store(&sync.Map{})
|
// during the last 5 minutes.
|
||||||
|
atomic.StoreUint64(&fsm.lastCleanupTime, ct)
|
||||||
|
m := &fsm.m
|
||||||
|
m.Range(func(k, v interface{}) bool {
|
||||||
|
e := v.(*fsmEntry)
|
||||||
|
if atomic.LoadUint64(&e.lastAccessTime)+5*60 < ct {
|
||||||
|
m.Delete(k)
|
||||||
}
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FastStringTransformer implements fast transformer for strings.
|
// FastStringTransformer implements fast transformer for strings.
|
||||||
|
@ -11,30 +13,41 @@ import (
|
||||||
// It caches transformed strings and returns them back on the next calls
|
// It caches transformed strings and returns them back on the next calls
|
||||||
// without calling the transformFunc, which may be expensive.
|
// without calling the transformFunc, which may be expensive.
|
||||||
type FastStringTransformer struct {
|
type FastStringTransformer struct {
|
||||||
m atomic.Value
|
lastCleanupTime uint64
|
||||||
mLen uint64
|
|
||||||
|
m sync.Map
|
||||||
|
|
||||||
transformFunc func(s string) string
|
transformFunc func(s string) string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type fstEntry struct {
|
||||||
|
lastAccessTime uint64
|
||||||
|
s string
|
||||||
|
}
|
||||||
|
|
||||||
// NewFastStringTransformer creates new transformer, which applies transformFunc to strings passed to Transform()
|
// NewFastStringTransformer creates new transformer, which applies transformFunc to strings passed to Transform()
|
||||||
//
|
//
|
||||||
// transformFunc must return the same result for the same input.
|
// transformFunc must return the same result for the same input.
|
||||||
func NewFastStringTransformer(transformFunc func(s string) string) *FastStringTransformer {
|
func NewFastStringTransformer(transformFunc func(s string) string) *FastStringTransformer {
|
||||||
var fst FastStringTransformer
|
return &FastStringTransformer{
|
||||||
fst.m.Store(&sync.Map{})
|
lastCleanupTime: fasttime.UnixTimestamp(),
|
||||||
fst.transformFunc = transformFunc
|
transformFunc: transformFunc,
|
||||||
return &fst
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transform applies transformFunc to s and returns the result.
|
// Transform applies transformFunc to s and returns the result.
|
||||||
func (fst *FastStringTransformer) Transform(s string) string {
|
func (fst *FastStringTransformer) Transform(s string) string {
|
||||||
m := fst.m.Load().(*sync.Map)
|
ct := fasttime.UnixTimestamp()
|
||||||
v, ok := m.Load(s)
|
v, ok := fst.m.Load(s)
|
||||||
if ok {
|
if ok {
|
||||||
// Fast path - the transformed s is found in the cache.
|
// Fast path - the transformed s is found in the cache.
|
||||||
sp := v.(*string)
|
e := v.(*fstEntry)
|
||||||
return *sp
|
if atomic.LoadUint64(&e.lastAccessTime)+10 < ct {
|
||||||
|
// Reduce the frequency of e.lastAccessTime update to once per 10 seconds
|
||||||
|
// in order to improve the fast path speed on systems with many CPU cores.
|
||||||
|
atomic.StoreUint64(&e.lastAccessTime, ct)
|
||||||
|
}
|
||||||
|
return e.s
|
||||||
}
|
}
|
||||||
// Slow path - transform s and store it in the cache.
|
// Slow path - transform s and store it in the cache.
|
||||||
sTransformed := fst.transformFunc(s)
|
sTransformed := fst.transformFunc(s)
|
||||||
|
@ -48,12 +61,25 @@ func (fst *FastStringTransformer) Transform(s string) string {
|
||||||
// which, in turn, can point to bigger string.
|
// which, in turn, can point to bigger string.
|
||||||
sTransformed = s
|
sTransformed = s
|
||||||
}
|
}
|
||||||
sp := &sTransformed
|
e := &fstEntry{
|
||||||
m.Store(s, sp)
|
lastAccessTime: ct,
|
||||||
n := atomic.AddUint64(&fst.mLen, 1)
|
s: sTransformed,
|
||||||
if n > 100e3 {
|
|
||||||
atomic.StoreUint64(&fst.mLen, 0)
|
|
||||||
fst.m.Store(&sync.Map{})
|
|
||||||
}
|
}
|
||||||
|
fst.m.Store(s, e)
|
||||||
|
|
||||||
|
if atomic.LoadUint64(&fst.lastCleanupTime)+61 < ct {
|
||||||
|
// Perform a global cleanup for fst.m by removing items, which weren't accessed
|
||||||
|
// during the last 5 minutes.
|
||||||
|
atomic.StoreUint64(&fst.lastCleanupTime, ct)
|
||||||
|
m := &fst.m
|
||||||
|
m.Range(func(k, v interface{}) bool {
|
||||||
|
e := v.(*fstEntry)
|
||||||
|
if atomic.LoadUint64(&e.lastAccessTime)+5*60 < ct {
|
||||||
|
m.Delete(k)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return sTransformed
|
return sTransformed
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,35 +1,58 @@
|
||||||
package bytesutil
|
package bytesutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
)
|
)
|
||||||
|
|
||||||
// InternString returns interned s.
|
// InternString returns interned s.
|
||||||
//
|
//
|
||||||
// This may be needed for reducing the amounts of allocated memory.
|
// This may be needed for reducing the amounts of allocated memory.
|
||||||
func InternString(s string) string {
|
func InternString(s string) string {
|
||||||
m := internStringsMap.Load().(*sync.Map)
|
ct := fasttime.UnixTimestamp()
|
||||||
if v, ok := m.Load(s); ok {
|
if v, ok := internStringsMap.Load(s); ok {
|
||||||
sp := v.(*string)
|
e := v.(*ismEntry)
|
||||||
return *sp
|
if atomic.LoadUint64(&e.lastAccessTime)+10 < ct {
|
||||||
|
// Reduce the frequency of e.lastAccessTime update to once per 10 seconds
|
||||||
|
// in order to improve the fast path speed on systems with many CPU cores.
|
||||||
|
atomic.StoreUint64(&e.lastAccessTime, ct)
|
||||||
|
}
|
||||||
|
return e.s
|
||||||
}
|
}
|
||||||
// Make a new copy for s in order to remove references from possible bigger string s refers to.
|
// Make a new copy for s in order to remove references from possible bigger string s refers to.
|
||||||
sCopy := string(append([]byte{}, s...))
|
sCopy := strings.Clone(s)
|
||||||
m.Store(sCopy, &sCopy)
|
e := &ismEntry{
|
||||||
n := atomic.AddUint64(&internStringsMapLen, 1)
|
lastAccessTime: ct,
|
||||||
if n > 100e3 {
|
s: sCopy,
|
||||||
atomic.StoreUint64(&internStringsMapLen, 0)
|
|
||||||
internStringsMap.Store(&sync.Map{})
|
|
||||||
}
|
}
|
||||||
|
internStringsMap.Store(sCopy, e)
|
||||||
|
|
||||||
|
if atomic.LoadUint64(&internStringsMapLastCleanupTime)+61 < ct {
|
||||||
|
// Perform a global cleanup for internStringsMap by removing items, which weren't accessed
|
||||||
|
// during the last 5 minutes.
|
||||||
|
atomic.StoreUint64(&internStringsMapLastCleanupTime, ct)
|
||||||
|
m := &internStringsMap
|
||||||
|
m.Range(func(k, v interface{}) bool {
|
||||||
|
e := v.(*ismEntry)
|
||||||
|
if atomic.LoadUint64(&e.lastAccessTime)+5*60 < ct {
|
||||||
|
m.Delete(k)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return sCopy
|
return sCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
type ismEntry struct {
|
||||||
internStringsMap atomic.Value
|
lastAccessTime uint64
|
||||||
internStringsMapLen uint64
|
s string
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
internStringsMap.Store(&sync.Map{})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
internStringsMap sync.Map
|
||||||
|
internStringsMapLastCleanupTime uint64
|
||||||
|
)
|
||||||
|
|
|
@ -25,8 +25,17 @@ import (
|
||||||
// maxInmemoryParts is the maximum number of inmemory parts in the table.
|
// maxInmemoryParts is the maximum number of inmemory parts in the table.
|
||||||
//
|
//
|
||||||
// This number may be reached when the insertion pace outreaches merger pace.
|
// This number may be reached when the insertion pace outreaches merger pace.
|
||||||
|
// If this number is reached, then assisted merges are performed
|
||||||
|
// during data ingestion.
|
||||||
const maxInmemoryParts = 64
|
const maxInmemoryParts = 64
|
||||||
|
|
||||||
|
// maxFileParts is the maximum number of file parts in the table.
|
||||||
|
//
|
||||||
|
// This number may be reached when the insertion pace outreaches merger pace.
|
||||||
|
// If this number is reached, then assisted merges are performed
|
||||||
|
// during data ingestion.
|
||||||
|
const maxFileParts = 64
|
||||||
|
|
||||||
// Default number of parts to merge at once.
|
// Default number of parts to merge at once.
|
||||||
//
|
//
|
||||||
// This number has been obtained empirically - it gives the lowest possible overhead.
|
// This number has been obtained empirically - it gives the lowest possible overhead.
|
||||||
|
@ -98,7 +107,8 @@ type Table struct {
|
||||||
inmemoryItemsMerged uint64
|
inmemoryItemsMerged uint64
|
||||||
fileItemsMerged uint64
|
fileItemsMerged uint64
|
||||||
|
|
||||||
assistedInmemoryMerges uint64
|
inmemoryAssistedMerges uint64
|
||||||
|
fileAssistedMerges uint64
|
||||||
|
|
||||||
itemsAdded uint64
|
itemsAdded uint64
|
||||||
itemsAddedSizeBytes uint64
|
itemsAddedSizeBytes uint64
|
||||||
|
@ -419,7 +429,8 @@ type TableMetrics struct {
|
||||||
InmemoryItemsMerged uint64
|
InmemoryItemsMerged uint64
|
||||||
FileItemsMerged uint64
|
FileItemsMerged uint64
|
||||||
|
|
||||||
AssistedInmemoryMerges uint64
|
InmemoryAssistedMerges uint64
|
||||||
|
FileAssistedMerges uint64
|
||||||
|
|
||||||
ItemsAdded uint64
|
ItemsAdded uint64
|
||||||
ItemsAddedSizeBytes uint64
|
ItemsAddedSizeBytes uint64
|
||||||
|
@ -469,7 +480,8 @@ func (tb *Table) UpdateMetrics(m *TableMetrics) {
|
||||||
m.InmemoryItemsMerged += atomic.LoadUint64(&tb.inmemoryItemsMerged)
|
m.InmemoryItemsMerged += atomic.LoadUint64(&tb.inmemoryItemsMerged)
|
||||||
m.FileItemsMerged += atomic.LoadUint64(&tb.fileItemsMerged)
|
m.FileItemsMerged += atomic.LoadUint64(&tb.fileItemsMerged)
|
||||||
|
|
||||||
m.AssistedInmemoryMerges += atomic.LoadUint64(&tb.assistedInmemoryMerges)
|
m.InmemoryAssistedMerges += atomic.LoadUint64(&tb.inmemoryAssistedMerges)
|
||||||
|
m.FileAssistedMerges += atomic.LoadUint64(&tb.fileAssistedMerges)
|
||||||
|
|
||||||
m.ItemsAdded += atomic.LoadUint64(&tb.itemsAdded)
|
m.ItemsAdded += atomic.LoadUint64(&tb.itemsAdded)
|
||||||
m.ItemsAddedSizeBytes += atomic.LoadUint64(&tb.itemsAddedSizeBytes)
|
m.ItemsAddedSizeBytes += atomic.LoadUint64(&tb.itemsAddedSizeBytes)
|
||||||
|
@ -739,9 +751,8 @@ func (tb *Table) flushBlocksToParts(ibs []*inmemoryBlock, isFinal bool) {
|
||||||
|
|
||||||
flushConcurrencyCh <- struct{}{}
|
flushConcurrencyCh <- struct{}{}
|
||||||
tb.assistedMergeForInmemoryParts()
|
tb.assistedMergeForInmemoryParts()
|
||||||
|
tb.assistedMergeForFileParts()
|
||||||
<-flushConcurrencyCh
|
<-flushConcurrencyCh
|
||||||
// There is no need in assited merge for file parts,
|
|
||||||
// since the bottleneck is possible only at inmemory parts.
|
|
||||||
|
|
||||||
if tb.flushCallback != nil {
|
if tb.flushCallback != nil {
|
||||||
if isFinal {
|
if isFinal {
|
||||||
|
@ -765,10 +776,10 @@ func (tb *Table) assistedMergeForInmemoryParts() {
|
||||||
|
|
||||||
// Prioritize assisted merges over searches.
|
// Prioritize assisted merges over searches.
|
||||||
storagepacelimiter.Search.Inc()
|
storagepacelimiter.Search.Inc()
|
||||||
|
atomic.AddUint64(&tb.inmemoryAssistedMerges, 1)
|
||||||
err := tb.mergeInmemoryParts()
|
err := tb.mergeInmemoryParts()
|
||||||
storagepacelimiter.Search.Dec()
|
storagepacelimiter.Search.Dec()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
atomic.AddUint64(&tb.assistedInmemoryMerges, 1)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
|
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
|
||||||
|
@ -778,6 +789,30 @@ func (tb *Table) assistedMergeForInmemoryParts() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tb *Table) assistedMergeForFileParts() {
|
||||||
|
for {
|
||||||
|
tb.partsLock.Lock()
|
||||||
|
ok := getNotInMergePartsCount(tb.fileParts) < maxFileParts
|
||||||
|
tb.partsLock.Unlock()
|
||||||
|
if ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prioritize assisted merges over searches.
|
||||||
|
storagepacelimiter.Search.Inc()
|
||||||
|
atomic.AddUint64(&tb.fileAssistedMerges, 1)
|
||||||
|
err := tb.mergeExistingParts(false)
|
||||||
|
storagepacelimiter.Search.Dec()
|
||||||
|
if err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logger.Panicf("FATAL: cannot assist with merging file parts: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func getNotInMergePartsCount(pws []*partWrapper) int {
|
func getNotInMergePartsCount(pws []*partWrapper) int {
|
||||||
n := 0
|
n := 0
|
||||||
for _, pw := range pws {
|
for _, pw := range pws {
|
||||||
|
@ -866,7 +901,10 @@ func newPartWrapperFromInmemoryPart(mp *inmemoryPart, flushToDiskDeadline time.T
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tb *Table) startMergeWorkers() {
|
func (tb *Table) startMergeWorkers() {
|
||||||
for i := 0; i < cap(mergeWorkersLimitCh); i++ {
|
// Start a merge worker per available CPU core.
|
||||||
|
// The actual number of concurrent merges is limited inside mergeWorker() below.
|
||||||
|
workersCount := cgroup.AvailableCPUs()
|
||||||
|
for i := 0; i < workersCount; i++ {
|
||||||
tb.wg.Add(1)
|
tb.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
tb.mergeWorker()
|
tb.mergeWorker()
|
||||||
|
@ -940,6 +978,8 @@ func (tb *Table) mergeWorker() {
|
||||||
isFinal := false
|
isFinal := false
|
||||||
t := time.NewTimer(sleepTime)
|
t := time.NewTimer(sleepTime)
|
||||||
for {
|
for {
|
||||||
|
// Limit the number of concurrent calls to mergeExistingParts, since the total number of merge workers
|
||||||
|
// across tables may exceed the the cap(mergeWorkersLimitCh).
|
||||||
mergeWorkersLimitCh <- struct{}{}
|
mergeWorkersLimitCh <- struct{}{}
|
||||||
err := tb.mergeExistingParts(isFinal)
|
err := tb.mergeExistingParts(isFinal)
|
||||||
<-mergeWorkersLimitCh
|
<-mergeWorkersLimitCh
|
||||||
|
|
|
@ -28,7 +28,15 @@ type Request struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (req *Request) reset() {
|
func (req *Request) reset() {
|
||||||
req.Series = req.Series[:0]
|
// recursively reset all the fields in req in order to avoid field value
|
||||||
|
// re-use in json.Unmarshal() when the corresponding field is missing
|
||||||
|
// in the unmarshaled JSON.
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3432
|
||||||
|
series := req.Series
|
||||||
|
for i := range series {
|
||||||
|
series[i].reset()
|
||||||
|
}
|
||||||
|
req.Series = series[:0]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal unmarshals DataDog /api/v1/series request body from b to req.
|
// Unmarshal unmarshals DataDog /api/v1/series request body from b to req.
|
||||||
|
@ -59,23 +67,42 @@ func (req *Request) Unmarshal(b []byte) error {
|
||||||
//
|
//
|
||||||
// See https://docs.datadoghq.com/api/latest/metrics/#submit-metrics
|
// See https://docs.datadoghq.com/api/latest/metrics/#submit-metrics
|
||||||
type Series struct {
|
type Series struct {
|
||||||
|
Metric string `json:"metric"`
|
||||||
Host string `json:"host"`
|
Host string `json:"host"`
|
||||||
|
|
||||||
// Do not decode Interval, since it isn't used by VictoriaMetrics
|
|
||||||
// Interval int64 `json:"interval"`
|
|
||||||
|
|
||||||
Metric string `json:"metric"`
|
|
||||||
Points []Point `json:"points"`
|
|
||||||
Tags []string `json:"tags"`
|
|
||||||
// The device field does not appear in the datadog docs, but datadog-agent does use it.
|
// The device field does not appear in the datadog docs, but datadog-agent does use it.
|
||||||
// Datadog agent (v7 at least), removes the tag "device" and adds it as its own field. Why? That I don't know!
|
// Datadog agent (v7 at least), removes the tag "device" and adds it as its own field. Why? That I don't know!
|
||||||
// https://github.com/DataDog/datadog-agent/blob/0ada7a97fed6727838a6f4d9c87123d2aafde735/pkg/metrics/series.go#L84-L105
|
// https://github.com/DataDog/datadog-agent/blob/0ada7a97fed6727838a6f4d9c87123d2aafde735/pkg/metrics/series.go#L84-L105
|
||||||
Device string `json:"device"`
|
Device string `json:"device"`
|
||||||
|
|
||||||
|
// Do not decode Interval, since it isn't used by VictoriaMetrics
|
||||||
|
// Interval int64 `json:"interval"`
|
||||||
|
|
||||||
|
Points []Point `json:"points"`
|
||||||
|
Tags []string `json:"tags"`
|
||||||
|
|
||||||
// Do not decode Type, since it isn't used by VictoriaMetrics
|
// Do not decode Type, since it isn't used by VictoriaMetrics
|
||||||
// Type string `json:"type"`
|
// Type string `json:"type"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Series) reset() {
|
||||||
|
s.Metric = ""
|
||||||
|
s.Host = ""
|
||||||
|
s.Device = ""
|
||||||
|
|
||||||
|
points := s.Points
|
||||||
|
for i := range points {
|
||||||
|
points[i] = Point{}
|
||||||
|
}
|
||||||
|
s.Points = points[:0]
|
||||||
|
|
||||||
|
tags := s.Tags
|
||||||
|
for i := range tags {
|
||||||
|
tags[i] = ""
|
||||||
|
}
|
||||||
|
s.Tags = tags[:0]
|
||||||
|
}
|
||||||
|
|
||||||
// Point represents a point from DataDog POST request to /api/v1/series
|
// Point represents a point from DataDog POST request to /api/v1/series
|
||||||
type Point [2]float64
|
type Point [2]float64
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,43 @@ func TestSplitTag(t *testing.T) {
|
||||||
f(":bar", "", "bar")
|
f(":bar", "", "bar")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestRequestUnmarshalMissingHost(t *testing.T) {
|
||||||
|
// This tests https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3432
|
||||||
|
req := Request{
|
||||||
|
Series: []Series{{
|
||||||
|
Host: "prev-host",
|
||||||
|
Device: "prev-device",
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
data := `
|
||||||
|
{
|
||||||
|
"series": [
|
||||||
|
{
|
||||||
|
"metric": "system.load.1",
|
||||||
|
"points": [[
|
||||||
|
1575317847,
|
||||||
|
0.5
|
||||||
|
]]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}`
|
||||||
|
if err := req.Unmarshal([]byte(data)); err != nil {
|
||||||
|
t.Fatalf("unexpected error: %s", err)
|
||||||
|
}
|
||||||
|
reqExpected := Request{
|
||||||
|
Series: []Series{{
|
||||||
|
Metric: "system.load.1",
|
||||||
|
Points: []Point{{
|
||||||
|
1575317847,
|
||||||
|
0.5,
|
||||||
|
}},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(&req, &reqExpected) {
|
||||||
|
t.Fatalf("unexpected request parsed;\ngot\n%+v\nwant\n%+v", req, reqExpected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestRequestUnmarshalFailure(t *testing.T) {
|
func TestRequestUnmarshalFailure(t *testing.T) {
|
||||||
f := func(s string) {
|
f := func(s string) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
|
@ -31,8 +31,15 @@ import (
|
||||||
const maxBigPartSize = 1e12
|
const maxBigPartSize = 1e12
|
||||||
|
|
||||||
// The maximum number of inmemory parts in the partition.
|
// The maximum number of inmemory parts in the partition.
|
||||||
|
//
|
||||||
|
// If the number of inmemory parts reaches this value, then assisted merge runs during data ingestion.
|
||||||
const maxInmemoryPartsPerPartition = 32
|
const maxInmemoryPartsPerPartition = 32
|
||||||
|
|
||||||
|
// The maximum number of small parts in the partition.
|
||||||
|
//
|
||||||
|
// If the number of small parts reaches this value, then assisted merge runs during data ingestion.
|
||||||
|
const maxSmallPartsPerPartition = 64
|
||||||
|
|
||||||
// Default number of parts to merge at once.
|
// Default number of parts to merge at once.
|
||||||
//
|
//
|
||||||
// This number has been obtained empirically - it gives the lowest possible overhead.
|
// This number has been obtained empirically - it gives the lowest possible overhead.
|
||||||
|
@ -112,6 +119,7 @@ type partition struct {
|
||||||
bigRowsDeleted uint64
|
bigRowsDeleted uint64
|
||||||
|
|
||||||
inmemoryAssistedMerges uint64
|
inmemoryAssistedMerges uint64
|
||||||
|
smallAssistedMerges uint64
|
||||||
|
|
||||||
mergeNeedFreeDiskSpace uint64
|
mergeNeedFreeDiskSpace uint64
|
||||||
|
|
||||||
|
@ -338,6 +346,7 @@ type partitionMetrics struct {
|
||||||
BigPartsRefCount uint64
|
BigPartsRefCount uint64
|
||||||
|
|
||||||
InmemoryAssistedMerges uint64
|
InmemoryAssistedMerges uint64
|
||||||
|
SmallAssistedMerges uint64
|
||||||
|
|
||||||
MergeNeedFreeDiskSpace uint64
|
MergeNeedFreeDiskSpace uint64
|
||||||
}
|
}
|
||||||
|
@ -404,6 +413,7 @@ func (pt *partition) UpdateMetrics(m *partitionMetrics) {
|
||||||
m.BigRowsDeleted += atomic.LoadUint64(&pt.bigRowsDeleted)
|
m.BigRowsDeleted += atomic.LoadUint64(&pt.bigRowsDeleted)
|
||||||
|
|
||||||
m.InmemoryAssistedMerges += atomic.LoadUint64(&pt.inmemoryAssistedMerges)
|
m.InmemoryAssistedMerges += atomic.LoadUint64(&pt.inmemoryAssistedMerges)
|
||||||
|
m.SmallAssistedMerges += atomic.LoadUint64(&pt.smallAssistedMerges)
|
||||||
|
|
||||||
m.MergeNeedFreeDiskSpace += atomic.LoadUint64(&pt.mergeNeedFreeDiskSpace)
|
m.MergeNeedFreeDiskSpace += atomic.LoadUint64(&pt.mergeNeedFreeDiskSpace)
|
||||||
}
|
}
|
||||||
|
@ -576,6 +586,7 @@ func (pt *partition) flushRowsToParts(rows []rawRow) {
|
||||||
|
|
||||||
flushConcurrencyCh <- struct{}{}
|
flushConcurrencyCh <- struct{}{}
|
||||||
pt.assistedMergeForInmemoryParts()
|
pt.assistedMergeForInmemoryParts()
|
||||||
|
pt.assistedMergeForSmallParts()
|
||||||
<-flushConcurrencyCh
|
<-flushConcurrencyCh
|
||||||
// There is no need in assisted merges for small and big parts,
|
// There is no need in assisted merges for small and big parts,
|
||||||
// since the bottleneck is possible only at inmemory parts.
|
// since the bottleneck is possible only at inmemory parts.
|
||||||
|
@ -597,10 +608,10 @@ func (pt *partition) assistedMergeForInmemoryParts() {
|
||||||
// Assist with mering inmemory parts.
|
// Assist with mering inmemory parts.
|
||||||
// Prioritize assisted merges over searches.
|
// Prioritize assisted merges over searches.
|
||||||
storagepacelimiter.Search.Inc()
|
storagepacelimiter.Search.Inc()
|
||||||
|
atomic.AddUint64(&pt.inmemoryAssistedMerges, 1)
|
||||||
err := pt.mergeInmemoryParts()
|
err := pt.mergeInmemoryParts()
|
||||||
storagepacelimiter.Search.Dec()
|
storagepacelimiter.Search.Dec()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
atomic.AddUint64(&pt.inmemoryAssistedMerges, 1)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
|
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
|
||||||
|
@ -610,6 +621,33 @@ func (pt *partition) assistedMergeForInmemoryParts() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (pt *partition) assistedMergeForSmallParts() {
|
||||||
|
for {
|
||||||
|
pt.partsLock.Lock()
|
||||||
|
ok := getNotInMergePartsCount(pt.smallParts) < maxSmallPartsPerPartition
|
||||||
|
pt.partsLock.Unlock()
|
||||||
|
if ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// There are too many unmerged small parts.
|
||||||
|
// This usually means that the app cannot keep up with the data ingestion rate.
|
||||||
|
// Assist with mering small parts.
|
||||||
|
// Prioritize assisted merges over searches.
|
||||||
|
storagepacelimiter.Search.Inc()
|
||||||
|
atomic.AddUint64(&pt.smallAssistedMerges, 1)
|
||||||
|
err := pt.mergeExistingParts(false)
|
||||||
|
storagepacelimiter.Search.Dec()
|
||||||
|
if err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logger.Panicf("FATAL: cannot merge small parts: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func getNotInMergePartsCount(pws []*partWrapper) int {
|
func getNotInMergePartsCount(pws []*partWrapper) int {
|
||||||
n := 0
|
n := 0
|
||||||
for _, pw := range pws {
|
for _, pw := range pws {
|
||||||
|
@ -981,7 +1019,10 @@ func SetMergeWorkersCount(n int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pt *partition) startMergeWorkers() {
|
func (pt *partition) startMergeWorkers() {
|
||||||
for i := 0; i < cap(mergeWorkersLimitCh); i++ {
|
// Start a merge worker per available CPU core.
|
||||||
|
// The actual number of concurrent merges is limited inside mergeWorker() below.
|
||||||
|
workersCount := cgroup.AvailableCPUs()
|
||||||
|
for i := 0; i < workersCount; i++ {
|
||||||
pt.wg.Add(1)
|
pt.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
pt.mergeWorker()
|
pt.mergeWorker()
|
||||||
|
@ -1001,7 +1042,8 @@ func (pt *partition) mergeWorker() {
|
||||||
isFinal := false
|
isFinal := false
|
||||||
t := time.NewTimer(sleepTime)
|
t := time.NewTimer(sleepTime)
|
||||||
for {
|
for {
|
||||||
// Limit the number of concurrent calls to mergeExistingParts, cine the number of merge
|
// Limit the number of concurrent calls to mergeExistingParts, since the total number of merge workers
|
||||||
|
// across partitions may exceed the the cap(mergeWorkersLimitCh).
|
||||||
mergeWorkersLimitCh <- struct{}{}
|
mergeWorkersLimitCh <- struct{}{}
|
||||||
err := pt.mergeExistingParts(isFinal)
|
err := pt.mergeExistingParts(isFinal)
|
||||||
<-mergeWorkersLimitCh
|
<-mergeWorkersLimitCh
|
||||||
|
@ -1092,10 +1134,10 @@ func (pt *partition) getMaxBigPartSize() uint64 {
|
||||||
|
|
||||||
func getMaxOutBytes(path string, workersCount int) uint64 {
|
func getMaxOutBytes(path string, workersCount int) uint64 {
|
||||||
n := fs.MustGetFreeSpace(path)
|
n := fs.MustGetFreeSpace(path)
|
||||||
// Do not substract freeDiskSpaceLimitBytes from n before calculating the maxOutBytes,
|
// Do not subtract freeDiskSpaceLimitBytes from n before calculating the maxOutBytes,
|
||||||
// since this will result in sub-optimal merges - e.g. many small parts will be left unmerged.
|
// since this will result in sub-optimal merges - e.g. many small parts will be left unmerged.
|
||||||
|
|
||||||
// Divide free space by the max number concurrent merges.
|
// Divide free space by the max number of concurrent merges.
|
||||||
maxOutBytes := n / uint64(workersCount)
|
maxOutBytes := n / uint64(workersCount)
|
||||||
if maxOutBytes > maxBigPartSize {
|
if maxOutBytes > maxBigPartSize {
|
||||||
maxOutBytes = maxBigPartSize
|
maxOutBytes = maxBigPartSize
|
||||||
|
|
152
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
152
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -3015,6 +3015,9 @@ var awsPartition = partition{
|
||||||
},
|
},
|
||||||
Deprecated: boxedTrue,
|
Deprecated: boxedTrue,
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "sa-east-1",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-east-1",
|
Region: "us-east-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -12296,6 +12299,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-central-1",
|
Region: "eu-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "eu-central-2",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-north-1",
|
Region: "eu-north-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -14369,6 +14375,15 @@ var awsPartition = partition{
|
||||||
},
|
},
|
||||||
Deprecated: boxedTrue,
|
Deprecated: boxedTrue,
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-4-fips",
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "kms-fips.ap-southeast-4.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "ap-southeast-4",
|
||||||
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -15615,6 +15630,10 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ca-central-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-central-1",
|
Region: "eu-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -15627,15 +15646,41 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-west-3",
|
Region: "eu-west-3",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "fips-ca-central-1",
|
||||||
|
}: endpoint{
|
||||||
|
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "fips-us-east-1",
|
||||||
|
}: endpoint{
|
||||||
|
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "fips-us-west-2",
|
||||||
|
}: endpoint{
|
||||||
|
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "sa-east-1",
|
Region: "sa-east-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-east-1",
|
Region: "us-east-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-east-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-west-2",
|
Region: "us-west-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-west-2",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"machinelearning": service{
|
"machinelearning": service{
|
||||||
|
@ -16515,6 +16560,76 @@ var awsPartition = partition{
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"metrics.sagemaker": service{
|
||||||
|
Endpoints: serviceEndpoints{
|
||||||
|
endpointKey{
|
||||||
|
Region: "af-south-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-east-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-northeast-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-northeast-2",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-northeast-3",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-south-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-2",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ca-central-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "eu-central-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "eu-north-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "eu-south-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "eu-west-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "eu-west-2",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "me-central-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "me-south-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "sa-east-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-east-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-east-2",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-west-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-west-2",
|
||||||
|
}: endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"mgh": service{
|
"mgh": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
|
@ -17010,6 +17125,9 @@ var awsPartition = partition{
|
||||||
},
|
},
|
||||||
Deprecated: boxedTrue,
|
Deprecated: boxedTrue,
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "me-central-1",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "me-south-1",
|
Region: "me-south-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -18967,6 +19085,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-south-1",
|
Region: "ap-south-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-south-2",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-1",
|
Region: "ap-southeast-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -28513,6 +28634,16 @@ var awscnPartition = partition{
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"metrics.sagemaker": service{
|
||||||
|
Endpoints: serviceEndpoints{
|
||||||
|
endpointKey{
|
||||||
|
Region: "cn-north-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "cn-northwest-1",
|
||||||
|
}: endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"monitoring": service{
|
"monitoring": service{
|
||||||
Defaults: endpointDefaults{
|
Defaults: endpointDefaults{
|
||||||
defaultKey{}: endpoint{
|
defaultKey{}: endpoint{
|
||||||
|
@ -32280,6 +32411,13 @@ var awsusgovPartition = partition{
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"metrics.sagemaker": service{
|
||||||
|
Endpoints: serviceEndpoints{
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
}: endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"models.lex": service{
|
"models.lex": service{
|
||||||
Defaults: endpointDefaults{
|
Defaults: endpointDefaults{
|
||||||
defaultKey{}: endpoint{
|
defaultKey{}: endpoint{
|
||||||
|
@ -35001,6 +35139,13 @@ var awsisoPartition = partition{
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"metrics.sagemaker": service{
|
||||||
|
Endpoints: serviceEndpoints{
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-iso-east-1",
|
||||||
|
}: endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"monitoring": service{
|
"monitoring": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
|
@ -35655,6 +35800,13 @@ var awsisobPartition = partition{
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"metrics.sagemaker": service{
|
||||||
|
Endpoints: serviceEndpoints{
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-isob-east-1",
|
||||||
|
}: endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"monitoring": service{
|
"monitoring": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.44.157"
|
const SDKVersion = "1.44.160"
|
||||||
|
|
26
vendor/github.com/prometheus/common/config/config.go
generated
vendored
26
vendor/github.com/prometheus/common/config/config.go
generated
vendored
|
@ -18,6 +18,7 @@ package config
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"net/http"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -34,7 +35,7 @@ func (s Secret) MarshalYAML() (interface{}, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets.
|
// UnmarshalYAML implements the yaml.Unmarshaler interface for Secrets.
|
||||||
func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
func (s *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
type plain Secret
|
type plain Secret
|
||||||
return unmarshal((*plain)(s))
|
return unmarshal((*plain)(s))
|
||||||
|
@ -48,6 +49,29 @@ func (s Secret) MarshalJSON() ([]byte, error) {
|
||||||
return json.Marshal(secretToken)
|
return json.Marshal(secretToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Header map[string][]Secret
|
||||||
|
|
||||||
|
func (h *Header) HTTPHeader() http.Header {
|
||||||
|
if h == nil || *h == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
header := make(http.Header)
|
||||||
|
|
||||||
|
for name, values := range *h {
|
||||||
|
var s []string
|
||||||
|
if values != nil {
|
||||||
|
s = make([]string, 0, len(values))
|
||||||
|
for _, value := range values {
|
||||||
|
s = append(s, string(value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
header[name] = s
|
||||||
|
}
|
||||||
|
|
||||||
|
return header
|
||||||
|
}
|
||||||
|
|
||||||
// DirectorySetter is a config type that contains file paths that may
|
// DirectorySetter is a config type that contains file paths that may
|
||||||
// be relative to the file containing the config.
|
// be relative to the file containing the config.
|
||||||
type DirectorySetter interface {
|
type DirectorySetter interface {
|
||||||
|
|
12
vendor/github.com/prometheus/common/config/http_config.go
generated
vendored
12
vendor/github.com/prometheus/common/config/http_config.go
generated
vendored
|
@ -289,6 +289,11 @@ type HTTPClientConfig struct {
|
||||||
BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file,omitempty"`
|
BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file,omitempty"`
|
||||||
// HTTP proxy server to use to connect to the targets.
|
// HTTP proxy server to use to connect to the targets.
|
||||||
ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"`
|
ProxyURL URL `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"`
|
||||||
|
// ProxyConnectHeader optionally specifies headers to send to
|
||||||
|
// proxies during CONNECT requests. Assume that at least _some_ of
|
||||||
|
// these headers are going to contain secrets and use Secret as the
|
||||||
|
// value type instead of string.
|
||||||
|
ProxyConnectHeader Header `yaml:"proxy_connect_header,omitempty" json:"proxy_connect_header,omitempty"`
|
||||||
// TLSConfig to use to connect to the targets.
|
// TLSConfig to use to connect to the targets.
|
||||||
TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"`
|
TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"`
|
||||||
// FollowRedirects specifies whether the client should follow HTTP 3xx redirects.
|
// FollowRedirects specifies whether the client should follow HTTP 3xx redirects.
|
||||||
|
@ -314,7 +319,8 @@ func (c *HTTPClientConfig) SetDirectory(dir string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate validates the HTTPClientConfig to check only one of BearerToken,
|
// Validate validates the HTTPClientConfig to check only one of BearerToken,
|
||||||
// BasicAuth and BearerTokenFile is configured.
|
// BasicAuth and BearerTokenFile is configured. It also validates that ProxyURL
|
||||||
|
// is set if ProxyConnectHeader is set.
|
||||||
func (c *HTTPClientConfig) Validate() error {
|
func (c *HTTPClientConfig) Validate() error {
|
||||||
// Backwards compatibility with the bearer_token field.
|
// Backwards compatibility with the bearer_token field.
|
||||||
if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 {
|
if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 {
|
||||||
|
@ -372,6 +378,9 @@ func (c *HTTPClientConfig) Validate() error {
|
||||||
return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured")
|
return fmt.Errorf("at most one of oauth2 client_secret & client_secret_file must be configured")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(c.ProxyConnectHeader) > 0 && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "") {
|
||||||
|
return fmt.Errorf("if proxy_connect_header is configured proxy_url must also be configured")
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -500,6 +509,7 @@ func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string, optFuncs ...HT
|
||||||
// It is applied on request. So we leave out any timings here.
|
// It is applied on request. So we leave out any timings here.
|
||||||
var rt http.RoundTripper = &http.Transport{
|
var rt http.RoundTripper = &http.Transport{
|
||||||
Proxy: http.ProxyURL(cfg.ProxyURL.URL),
|
Proxy: http.ProxyURL(cfg.ProxyURL.URL),
|
||||||
|
ProxyConnectHeader: cfg.ProxyConnectHeader.HTTPHeader(),
|
||||||
MaxIdleConns: 20000,
|
MaxIdleConns: 20000,
|
||||||
MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
|
MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801
|
||||||
DisableKeepAlives: !opts.keepAlivesEnabled,
|
DisableKeepAlives: !opts.keepAlivesEnabled,
|
||||||
|
|
2
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
2
vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
generated
vendored
|
@ -35,8 +35,6 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package goautoneg
|
package goautoneg
|
||||||
|
|
||||||
|
|
12
vendor/github.com/prometheus/common/version/info.go
generated
vendored
12
vendor/github.com/prometheus/common/version/info.go
generated
vendored
|
@ -31,6 +31,8 @@ var (
|
||||||
BuildUser string
|
BuildUser string
|
||||||
BuildDate string
|
BuildDate string
|
||||||
GoVersion = runtime.Version()
|
GoVersion = runtime.Version()
|
||||||
|
GoOS = runtime.GOOS
|
||||||
|
GoArch = runtime.GOARCH
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewCollector returns a collector that exports metrics about current version
|
// NewCollector returns a collector that exports metrics about current version
|
||||||
|
@ -41,7 +43,7 @@ func NewCollector(program string) prometheus.Collector {
|
||||||
Namespace: program,
|
Namespace: program,
|
||||||
Name: "build_info",
|
Name: "build_info",
|
||||||
Help: fmt.Sprintf(
|
Help: fmt.Sprintf(
|
||||||
"A metric with a constant '1' value labeled by version, revision, branch, and goversion from which %s was built.",
|
"A metric with a constant '1' value labeled by version, revision, branch, goversion from which %s was built, and the goos and goarch for the build.",
|
||||||
program,
|
program,
|
||||||
),
|
),
|
||||||
ConstLabels: prometheus.Labels{
|
ConstLabels: prometheus.Labels{
|
||||||
|
@ -49,6 +51,8 @@ func NewCollector(program string) prometheus.Collector {
|
||||||
"revision": getRevision(),
|
"revision": getRevision(),
|
||||||
"branch": Branch,
|
"branch": Branch,
|
||||||
"goversion": GoVersion,
|
"goversion": GoVersion,
|
||||||
|
"goos": GoOS,
|
||||||
|
"goarch": GoArch,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
func() float64 { return 1 },
|
func() float64 { return 1 },
|
||||||
|
@ -74,7 +78,7 @@ func Print(program string) string {
|
||||||
"buildUser": BuildUser,
|
"buildUser": BuildUser,
|
||||||
"buildDate": BuildDate,
|
"buildDate": BuildDate,
|
||||||
"goVersion": GoVersion,
|
"goVersion": GoVersion,
|
||||||
"platform": runtime.GOOS + "/" + runtime.GOARCH,
|
"platform": GoOS + "/" + GoArch,
|
||||||
}
|
}
|
||||||
t := template.Must(template.New("version").Parse(versionInfoTmpl))
|
t := template.Must(template.New("version").Parse(versionInfoTmpl))
|
||||||
|
|
||||||
|
@ -90,7 +94,7 @@ func Info() string {
|
||||||
return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision())
|
return fmt.Sprintf("(version=%s, branch=%s, revision=%s)", Version, Branch, getRevision())
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildContext returns goVersion, buildUser and buildDate information.
|
// BuildContext returns goVersion, platform, buildUser and buildDate information.
|
||||||
func BuildContext() string {
|
func BuildContext() string {
|
||||||
return fmt.Sprintf("(go=%s, user=%s, date=%s)", GoVersion, BuildUser, BuildDate)
|
return fmt.Sprintf("(go=%s, platform=%s, user=%s, date=%s)", GoVersion, GoOS+"/"+GoArch, BuildUser, BuildDate)
|
||||||
}
|
}
|
||||||
|
|
39
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
generated
vendored
39
vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
generated
vendored
|
@ -176,7 +176,7 @@ func newHistogramIterator(b []byte) *histogramIterator {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *HistogramChunk) iterator(it Iterator) *histogramIterator {
|
func (c *HistogramChunk) iterator(it Iterator) *histogramIterator {
|
||||||
// This commet is copied from XORChunk.iterator:
|
// This comment is copied from XORChunk.iterator:
|
||||||
// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
|
// Should iterators guarantee to act on a copy of the data so it doesn't lock append?
|
||||||
// When using striped locks to guard access to chunks, probably yes.
|
// When using striped locks to guard access to chunks, probably yes.
|
||||||
// Could only copy data if the chunk is not completed yet.
|
// Could only copy data if the chunk is not completed yet.
|
||||||
|
@ -651,7 +651,7 @@ func (it *histogramIterator) Reset(b []byte) {
|
||||||
}
|
}
|
||||||
|
|
||||||
it.pBucketsDelta = it.pBucketsDelta[:0]
|
it.pBucketsDelta = it.pBucketsDelta[:0]
|
||||||
it.pBucketsDelta = it.pBucketsDelta[:0]
|
it.nBucketsDelta = it.nBucketsDelta[:0]
|
||||||
|
|
||||||
it.sum = 0
|
it.sum = 0
|
||||||
it.leading = 0
|
it.leading = 0
|
||||||
|
@ -677,36 +677,17 @@ func (it *histogramIterator) Next() ValueType {
|
||||||
it.zThreshold = zeroThreshold
|
it.zThreshold = zeroThreshold
|
||||||
it.pSpans, it.nSpans = posSpans, negSpans
|
it.pSpans, it.nSpans = posSpans, negSpans
|
||||||
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
numPBuckets, numNBuckets := countSpans(posSpans), countSpans(negSpans)
|
||||||
// Allocate bucket slices as needed, recycling existing slices
|
// The code below recycles existing slices in case this iterator
|
||||||
// in case this iterator was reset and already has slices of a
|
// was reset and already has slices of a sufficient capacity.
|
||||||
// sufficient capacity.
|
|
||||||
if numPBuckets > 0 {
|
if numPBuckets > 0 {
|
||||||
if cap(it.pBuckets) < numPBuckets {
|
it.pBuckets = append(it.pBuckets, make([]int64, numPBuckets)...)
|
||||||
it.pBuckets = make([]int64, numPBuckets)
|
it.pBucketsDelta = append(it.pBucketsDelta, make([]int64, numPBuckets)...)
|
||||||
// If cap(it.pBuckets) isn't sufficient, neither is the cap of the others.
|
it.pFloatBuckets = append(it.pFloatBuckets, make([]float64, numPBuckets)...)
|
||||||
it.pBucketsDelta = make([]int64, numPBuckets)
|
|
||||||
it.pFloatBuckets = make([]float64, numPBuckets)
|
|
||||||
} else {
|
|
||||||
for i := 0; i < numPBuckets; i++ {
|
|
||||||
it.pBuckets = append(it.pBuckets, 0)
|
|
||||||
it.pBucketsDelta = append(it.pBucketsDelta, 0)
|
|
||||||
it.pFloatBuckets = append(it.pFloatBuckets, 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if numNBuckets > 0 {
|
if numNBuckets > 0 {
|
||||||
if cap(it.nBuckets) < numNBuckets {
|
it.nBuckets = append(it.nBuckets, make([]int64, numNBuckets)...)
|
||||||
it.nBuckets = make([]int64, numNBuckets)
|
it.nBucketsDelta = append(it.nBucketsDelta, make([]int64, numNBuckets)...)
|
||||||
// If cap(it.nBuckets) isn't sufficient, neither is the cap of the others.
|
it.nFloatBuckets = append(it.nFloatBuckets, make([]float64, numNBuckets)...)
|
||||||
it.nBucketsDelta = make([]int64, numNBuckets)
|
|
||||||
it.nFloatBuckets = make([]float64, numNBuckets)
|
|
||||||
} else {
|
|
||||||
for i := 0; i < numNBuckets; i++ {
|
|
||||||
it.nBuckets = append(it.nBuckets, 0)
|
|
||||||
it.nBucketsDelta = append(it.nBucketsDelta, 0)
|
|
||||||
it.pFloatBuckets = append(it.pFloatBuckets, 0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now read the actual data.
|
// Now read the actual data.
|
||||||
|
|
10
vendor/modules.txt
vendored
10
vendor/modules.txt
vendored
|
@ -38,7 +38,7 @@ github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming
|
github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore/to
|
github.com/Azure/azure-sdk-for-go/sdk/azcore/to
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing
|
github.com/Azure/azure-sdk-for-go/sdk/azcore/tracing
|
||||||
# github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1
|
# github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal/diag
|
github.com/Azure/azure-sdk-for-go/sdk/internal/diag
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo
|
github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo
|
||||||
|
@ -80,7 +80,7 @@ github.com/VividCortex/ewma
|
||||||
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||||
## explicit; go 1.15
|
## explicit; go 1.15
|
||||||
github.com/alecthomas/units
|
github.com/alecthomas/units
|
||||||
# github.com/aws/aws-sdk-go v1.44.157
|
# github.com/aws/aws-sdk-go v1.44.160
|
||||||
## explicit; go 1.11
|
## explicit; go 1.11
|
||||||
github.com/aws/aws-sdk-go/aws
|
github.com/aws/aws-sdk-go/aws
|
||||||
github.com/aws/aws-sdk-go/aws/awserr
|
github.com/aws/aws-sdk-go/aws/awserr
|
||||||
|
@ -383,7 +383,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint
|
||||||
# github.com/prometheus/client_model v0.3.0
|
# github.com/prometheus/client_model v0.3.0
|
||||||
## explicit; go 1.9
|
## explicit; go 1.9
|
||||||
github.com/prometheus/client_model/go
|
github.com/prometheus/client_model/go
|
||||||
# github.com/prometheus/common v0.38.0
|
# github.com/prometheus/common v0.39.0
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/prometheus/common/config
|
github.com/prometheus/common/config
|
||||||
github.com/prometheus/common/expfmt
|
github.com/prometheus/common/expfmt
|
||||||
|
@ -398,7 +398,7 @@ github.com/prometheus/common/sigv4
|
||||||
github.com/prometheus/procfs
|
github.com/prometheus/procfs
|
||||||
github.com/prometheus/procfs/internal/fs
|
github.com/prometheus/procfs/internal/fs
|
||||||
github.com/prometheus/procfs/internal/util
|
github.com/prometheus/procfs/internal/util
|
||||||
# github.com/prometheus/prometheus v0.40.6
|
# github.com/prometheus/prometheus v0.40.7
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
github.com/prometheus/prometheus/config
|
github.com/prometheus/prometheus/config
|
||||||
github.com/prometheus/prometheus/discovery
|
github.com/prometheus/prometheus/discovery
|
||||||
|
@ -527,7 +527,7 @@ go.uber.org/atomic
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
go.uber.org/goleak
|
go.uber.org/goleak
|
||||||
go.uber.org/goleak/internal/stack
|
go.uber.org/goleak/internal/stack
|
||||||
# golang.org/x/exp v0.0.0-20221208152030-732eee02a75a
|
# golang.org/x/exp v0.0.0-20221212164502-fae10dda9338
|
||||||
## explicit; go 1.18
|
## explicit; go 1.18
|
||||||
golang.org/x/exp/constraints
|
golang.org/x/exp/constraints
|
||||||
golang.org/x/exp/slices
|
golang.org/x/exp/slices
|
||||||
|
|
Loading…
Reference in a new issue