Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2023-09-22 12:33:27 +02:00
commit 1590ddecba
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
19 changed files with 399 additions and 182 deletions

View file

@ -65,7 +65,7 @@ jobs:
make ${{ matrix.scenario}} make ${{ matrix.scenario}}
- name: Publish coverage - name: Publish coverage
uses: codecov/codecov-action@v4 uses: codecov/codecov-action@v3
with: with:
file: ./coverage.txt file: ./coverage.txt

View file

@ -1,6 +1,7 @@
package main package main
import ( import (
"bytes"
"encoding/base64" "encoding/base64"
"flag" "flag"
"fmt" "fmt"
@ -290,6 +291,13 @@ func (sp *SrcPath) MarshalYAML() (interface{}, error) {
return sp.sOriginal, nil return sp.sOriginal, nil
} }
var (
configReloads = metrics.NewCounter(`vmauth_config_last_reload_total`)
configReloadErrors = metrics.NewCounter(`vmauth_config_last_reload_errors_total`)
configSuccess = metrics.NewCounter(`vmauth_config_last_reload_successful`)
configTimestamp = metrics.NewCounter(`vmauth_config_last_reload_success_timestamp_seconds`)
)
func initAuthConfig() { func initAuthConfig() {
if len(*authConfigPath) == 0 { if len(*authConfigPath) == 0 {
logger.Fatalf("missing required `-auth.config` command-line flag") logger.Fatalf("missing required `-auth.config` command-line flag")
@ -300,11 +308,14 @@ func initAuthConfig() {
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1240 // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1240
sighupCh := procutil.NewSighupChan() sighupCh := procutil.NewSighupChan()
err := loadAuthConfig() _, err := loadAuthConfig()
if err != nil { if err != nil {
logger.Fatalf("cannot load auth config: %s", err) logger.Fatalf("cannot load auth config: %s", err)
} }
configSuccess.Set(1)
configTimestamp.Set(fasttime.UnixTimestamp())
stopCh = make(chan struct{}) stopCh = make(chan struct{})
authConfigWG.Add(1) authConfigWG.Add(1)
go func() { go func() {
@ -327,52 +338,75 @@ func authConfigReloader(sighupCh <-chan os.Signal) {
refreshCh = ticker.C refreshCh = ticker.C
} }
updateFn := func() {
configReloads.Inc()
updated, err := loadAuthConfig()
if err != nil {
logger.Errorf("failed to load auth config; using the last successfully loaded config; error: %s", err)
configSuccess.Set(0)
configReloadErrors.Inc()
return
}
configSuccess.Set(1)
if updated {
configTimestamp.Set(fasttime.UnixTimestamp())
}
}
for { for {
select { select {
case <-stopCh: case <-stopCh:
return return
case <-refreshCh: case <-refreshCh:
procutil.SelfSIGHUP() updateFn()
case <-sighupCh: case <-sighupCh:
logger.Infof("SIGHUP received; loading -auth.config=%q", *authConfigPath) logger.Infof("SIGHUP received; loading -auth.config=%q", *authConfigPath)
err := loadAuthConfig() updateFn()
if err != nil {
logger.Errorf("failed to load auth config; using the last successfully loaded config; error: %s", err)
continue
}
} }
} }
} }
// authConfigData stores the yaml definition for this config.
// authConfigData needs to be updated each time authConfig is updated.
var authConfigData atomic.Pointer[[]byte]
var authConfig atomic.Pointer[AuthConfig] var authConfig atomic.Pointer[AuthConfig]
var authUsers atomic.Pointer[map[string]*UserInfo] var authUsers atomic.Pointer[map[string]*UserInfo]
var authConfigWG sync.WaitGroup var authConfigWG sync.WaitGroup
var stopCh chan struct{} var stopCh chan struct{}
func loadAuthConfig() error { // loadAuthConfig loads and applies the config from *authConfigPath.
ac, err := readAuthConfig(*authConfigPath) // It returns bool value to identify if new config was applied.
// The config can be not applied if there is a parsing error
// or if there are no changes to the current authConfig.
func loadAuthConfig() (bool, error) {
data, err := fs.ReadFileOrHTTP(*authConfigPath)
if err != nil { if err != nil {
return fmt.Errorf("failed to load -auth.config=%q: %s", *authConfigPath, err) return false, fmt.Errorf("failed to read -auth.config=%q: %w", *authConfigPath, err)
}
oldData := authConfigData.Load()
if oldData != nil && bytes.Equal(data, *oldData) {
// there are no updates in the config - skip reloading.
return false, nil
}
ac, err := parseAuthConfig(data)
if err != nil {
return false, fmt.Errorf("failed to parse -auth.config=%q: %w", *authConfigPath, err)
} }
m, err := parseAuthConfigUsers(ac) m, err := parseAuthConfigUsers(ac)
if err != nil { if err != nil {
return fmt.Errorf("failed to parse users from -auth.config=%q: %s", *authConfigPath, err) return false, fmt.Errorf("failed to parse users from -auth.config=%q: %w", *authConfigPath, err)
} }
logger.Infof("loaded information about %d users from -auth.config=%q", len(m), *authConfigPath) logger.Infof("loaded information about %d users from -auth.config=%q", len(m), *authConfigPath)
authConfig.Store(ac) authConfig.Store(ac)
authConfigData.Store(&data)
authUsers.Store(&m) authUsers.Store(&m)
return nil return true, nil
}
func readAuthConfig(path string) (*AuthConfig, error) {
data, err := fs.ReadFileOrHTTP(path)
if err != nil {
return nil, err
}
return parseAuthConfig(data)
} }
func parseAuthConfig(data []byte) (*AuthConfig, error) { func parseAuthConfig(data []byte) (*AuthConfig, error) {

View file

@ -143,11 +143,14 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
## Advanced usage ## Advanced usage
* Obtaining credentials from a file.
### Providing credentials as a file
Obtaining credentials from a file.
Add flag `-credsFilePath=/etc/credentials` with the following content: Add flag `-credsFilePath=/etc/credentials` with the following content:
for s3 (aws, minio or other s3 compatible storages): - for S3 (AWS, MinIO or other S3 compatible storages):
```console ```console
[default] [default]
@ -155,7 +158,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
aws_secret_access_key=thesecretaccesskeyvalue aws_secret_access_key=thesecretaccesskeyvalue
``` ```
for gce cloud storage: - for GCP cloud storage:
```json ```json
{ {
@ -171,24 +174,99 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email" "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email"
} }
``` ```
* Obtaining credentials from env variables.
### Providing credentials via env variables
Obtaining credentials from env variables.
- For AWS S3 compatible storages set env variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. - For AWS S3 compatible storages set env variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
Also you can set env variable `AWS_SHARED_CREDENTIALS_FILE` with path to credentials file. Also you can set env variable `AWS_SHARED_CREDENTIALS_FILE` with path to credentials file.
- For GCE cloud storage set env variable `GOOGLE_APPLICATION_CREDENTIALS` with path to credentials file. - For GCE cloud storage set env variable `GOOGLE_APPLICATION_CREDENTIALS` with path to credentials file.
- For Azure storage either set env variables `AZURE_STORAGE_ACCOUNT_NAME` and `AZURE_STORAGE_ACCOUNT_KEY`, or `AZURE_STORAGE_ACCOUNT_CONNECTION_STRING`. - For Azure storage either set env variables `AZURE_STORAGE_ACCOUNT_NAME` and `AZURE_STORAGE_ACCOUNT_KEY`, or `AZURE_STORAGE_ACCOUNT_CONNECTION_STRING`.
* Usage with s3 custom url endpoint. It is possible to use `vmbackup` with s3 compatible storages like minio, cloudian, etc. Please, note that `vmbackup` will use credentials provided by cloud providers metadata service [when applicable](https://docs.victoriametrics.com/vmbackup.html#using-cloud-providers-metadata-service).
### Using cloud providers metadata service
`vmbackup` and `vmbackupmanager` will automatically use cloud providers metadata service in order to obtain credentials if they are running in cloud environment
and credentials are not explicitly provided via flags or env variables.
### Providing credentials in Kubernetes
The simplest way to provide credentials in Kubernetes is to use [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/)
and inject them into the pod as environment variables. For example, the following secret can be used for AWS S3 credentials:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: vmbackup-credentials
data:
access_key: key
secret_key: secret
```
And then it can be injected into the pod as environment variables:
```yaml
...
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: access_key
name: vmbackup-credentials
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: secret_key
name: vmbackup-credentials
...
```
A more secure way is to use IAM roles to provide tokens for pods instead of managing credentials manually.
For AWS deployments it will be required to configure [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html).
In order to use IAM roles for service accounts with `vmbackup` or `vmbackupmanager` it is required to create ServiceAccount with IAM role mapping:
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: monitoring-backups
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::{ACCOUNT_ID}:role/{ROLE_NAME}
```
And [configure pod to use service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/).
After this `vmbackup` and `vmbackupmanager` will automatically use IAM role for service account in order to obtain credentials.
For GCP deployments it will be required to configure [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity).
In order to use Workload Identity with `vmbackup` or `vmbackupmanager` it is required to create ServiceAccount with Workload Identity annotation:
```yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: monitoring-backups
annotations:
iam.gke.io/gcp-service-account: {sa_name}@{project_name}.iam.gserviceaccount.com
```
And [configure pod to use service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/).
After this `vmbackup` and `vmbackupmanager` will automatically use Workload Identity for servicpe account in order to obtain credentials.
### Using custom S3 endpoint
Usage with s3 custom url endpoint. It is possible to use `vmbackup` with s3 compatible storages like minio, cloudian, etc.
You have to add a custom url endpoint via flag: You have to add a custom url endpoint via flag:
- for MinIO
```console ```console
# for minio
-customS3Endpoint=http://localhost:9000 -customS3Endpoint=http://localhost:9000
```
# for aws gov region - for aws gov region
```console
-customS3Endpoint=https://s3-fips.us-gov-west-1.amazonaws.com -customS3Endpoint=https://s3-fips.us-gov-west-1.amazonaws.com
``` ```
* Run `vmbackup -help` in order to see all the available options: ### Command-line flags
Run `vmbackup -help` in order to see all the available options:
```console ```console
-concurrency int -concurrency int

View file

@ -110,6 +110,9 @@ The result on the GCS bucket
<img alt="latest folder" src="vmbackupmanager_latest_folder.png"> <img alt="latest folder" src="vmbackupmanager_latest_folder.png">
Please, see [vmbackup docs](https://docs.victoriametrics.com/vmbackup.html#advanced-usage) for more examples of authentication with different
storage types.
## Backup Retention Policy ## Backup Retention Policy
Backup retention policy is controlled by: Backup retention policy is controlled by:

View file

@ -76,7 +76,7 @@
"uid": "$ds" "uid": "$ds"
}, },
"enable": true, "enable": true,
"expr": "sum(vm_app_version{job=~\"$job\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\"} offset 20m) by(short_version))", "expr": "sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"} offset 20m) by(short_version))",
"hide": true, "hide": true,
"iconColor": "dark-blue", "iconColor": "dark-blue",
"name": "version", "name": "version",

View file

@ -77,7 +77,7 @@
"uid": "$ds" "uid": "$ds"
}, },
"enable": true, "enable": true,
"expr": "sum(vm_app_version{job=~\"$job\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\"} offset 20m) by(short_version))", "expr": "sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"} offset 20m) by(short_version))",
"hide": true, "hide": true,
"iconColor": "dark-blue", "iconColor": "dark-blue",
"name": "version", "name": "version",

View file

@ -78,6 +78,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
* [VictoriaMetrics: an overview and its use instead of Prometheus](https://rtfm.co.ua/en/victoriametrics-an-overview-and-its-use-instead-of-prometheus/) * [VictoriaMetrics: an overview and its use instead of Prometheus](https://rtfm.co.ua/en/victoriametrics-an-overview-and-its-use-instead-of-prometheus/)
* [VictoriaMetrics: deploying a Kubernetes monitoring stack](https://rtfm.co.ua/en/victoriametrics-deploying-a-kubernetes-monitoring-stack/) * [VictoriaMetrics: deploying a Kubernetes monitoring stack](https://rtfm.co.ua/en/victoriametrics-deploying-a-kubernetes-monitoring-stack/)
* [Better, Faster, Cheaper: How Grammarly Improved Monitoring by Over 10x with VictoriaMetrics](https://www.grammarly.com/blog/engineering/monitoring-with-victoriametrics/) * [Better, Faster, Cheaper: How Grammarly Improved Monitoring by Over 10x with VictoriaMetrics](https://www.grammarly.com/blog/engineering/monitoring-with-victoriametrics/)
* [VictoriaMetrics, a stress-free Prometheus Remote Storage for 1 Billion metrics](https://medium.com/criteo-engineering/victoriametrics-a-prometheus-remote-storage-solution-57081a3d8e61)
## Our articles ## Our articles

View file

@ -38,20 +38,31 @@ The following `tip` changes can be tested by building VictoriaMetrics components
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): improve accessibility score to 100 according to [Google's Lighthouse](https://developer.chrome.com/docs/lighthouse/accessibility/) tests. * FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): improve accessibility score to 100 according to [Google's Lighthouse](https://developer.chrome.com/docs/lighthouse/accessibility/) tests.
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): organize `min`, `max`, `median` values on the chart legend and tooltips for better visibility. * FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): organize `min`, `max`, `median` values on the chart legend and tooltips for better visibility.
* FEATURE: dashboards: provide copies of Grafana dashboards alternated with VictoriaMetrics datasource at [dashboards/vm](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/dashboards/vm). * FEATURE: dashboards: provide copies of Grafana dashboards alternated with VictoriaMetrics datasource at [dashboards/vm](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/dashboards/vm).
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): added ability to set, override and clear request and response headers on a per-user and per-path basis. See [this i * FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): added ability to set, override and clear request and response headers on a per-user and per-path basis. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4825) and [these docs](https://docs.victoriametrics.com/vmauth.html#auth-config) for details.
ssue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4825) and [these docs](https://docs.victoriametrics.com/vmauth.html#auth-config) for details.
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): add ability to retry requests to the [remaining backends](https://docs.victoriametrics.com/vmauth.html#load-balancing) if they return response status codes specified in the `retry_status_codes` list. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4893). * FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): add ability to retry requests to the [remaining backends](https://docs.victoriametrics.com/vmauth.html#load-balancing) if they return response status codes specified in the `retry_status_codes` list. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4893).
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): expose metrics `vmauth_config_last_reload_*` for tracking the state of config reloads, similarly to vmagent/vmalert components.
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): do not print logs like `SIGHUP received...` once per configured `-configCheckInterval` cmd-line flag. This log will be printed only if config reload was invoked manually.
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `eval_offset` attribute for [Groups](https://docs.victoriametrics.com/vmalert.html#groups). If specified, Group will be evaluated at the exact time offset on the range of [0...evaluationInterval]. The setting might be useful for cron-like rules which must be evaluated at specific moments of time. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3409) for details. * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `eval_offset` attribute for [Groups](https://docs.victoriametrics.com/vmalert.html#groups). If specified, Group will be evaluated at the exact time offset on the range of [0...evaluationInterval]. The setting might be useful for cron-like rules which must be evaluated at specific moments of time. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3409) for details.
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): validate [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) function names in alerting and recording rules when `vmalert` runs with `-dryRun` command-line flag. Previously it was allowed to use unknown (aka invalid) MetricsQL function names there. For example, `foo()` was counted as a valid query. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4933). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): validate [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) function names in alerting and recording rules when `vmalert` runs with `-dryRun` command-line flag. Previously it was allowed to use unknown (aka invalid) MetricsQL function names there. For example, `foo()` was counted as a valid query. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4933).
* FEATURE: limit the length of string params in log messages to 500 chars. Longer string params are replaced with the `first_250_chars..last_250_chars`. This prevents from too long log lines, which can be emitted by VictoriaMetrics components. * FEATURE: limit the length of string params in log messages to 500 chars. Longer string params are replaced with the `first_250_chars..last_250_chars`. This prevents from too long log lines, which can be emitted by VictoriaMetrics components.
* BUGFIX: [storage](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html): prevent from livelock when [forced merge](https://docs.victoriametrics.com/#forced-merge) is called under high data ingestion. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4987).
* BUGFIX: [Graphite Render API](https://docs.victoriametrics.com/#graphite-render-api-usage): correctly return `null` instead of `Inf` in JSON query responses. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3783).
* BUGFIX: [Official Grafana dashboards for VictoriaMetrics](https://grafana.com/orgs/victoriametrics): fix display of ingested rows rate for `Samples ingested/s` and `Samples rate` panels for vmagent's dasbhoard. Previously, not all ingested protocols were accounted in these panels. An extra panel `Rows rate` was added to `Ingestion` section to display the split for rows ingested rate by protocol. * BUGFIX: [Official Grafana dashboards for VictoriaMetrics](https://grafana.com/orgs/victoriametrics): fix display of ingested rows rate for `Samples ingested/s` and `Samples rate` panels for vmagent's dasbhoard. Previously, not all ingested protocols were accounted in these panels. An extra panel `Rows rate` was added to `Ingestion` section to display the split for rows ingested rate by protocol.
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the bug causing render looping when switching to heatmap. * BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the bug causing render looping when switching to heatmap.
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): properly copy `parts.json` files inside `<-storageDataPath>/{data,indexdb}` folders during [incremental backups](https://docs.victoriametrics.com/vmbackup.html#incremental-backups). Previously the new `parts.json` could be skipped during incremental backups, which could lead to inability to restore from the backup. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5005). This issue has been introduced in [v1.90.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.90.0).
* BUGFIX: [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html) validate `-dedup.minScrapeInterval` value and `-downsampling.period` intervals are multiples of each other. See [these docs](https://docs.victoriametrics.com/#downsampling). * BUGFIX: [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html) validate `-dedup.minScrapeInterval` value and `-downsampling.period` intervals are multiples of each other. See [these docs](https://docs.victoriametrics.com/#downsampling).
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): properly copy `appliedRetention.txt` files inside `<-storageDataPath>/{data}` folders during [incremental backups](https://docs.victoriametrics.com/vmbackup.html#incremental-backups). Previously the new `appliedRetention.txt` could be skipped during incremental backups, which could lead to increased load on storage after restoring from backup. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5005).
## [v1.93.5](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.5)
Released at 2023-09-19
**v1.93.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
The v1.93.x line will be supported for at least 12 months since [v1.93.0](https://docs.victoriametrics.com/CHANGELOG.html#v1930) release**
* BUGFIX: [storage](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html): prevent from livelock when [forced merge](https://docs.victoriametrics.com/#forced-merge) is called under high data ingestion. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4987).
* BUGFIX: [Graphite Render API](https://docs.victoriametrics.com/#graphite-render-api-usage): correctly return `null` instead of `Inf` in JSON query responses. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3783).
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): properly copy `parts.json` files inside `<-storageDataPath>/{data,indexdb}` folders during [incremental backups](https://docs.victoriametrics.com/vmbackup.html#incremental-backups). Previously the new `parts.json` could be skipped during incremental backups, which could lead to inability to restore from the backup. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5005). This issue has been introduced in [v1.90.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.90.0).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly close connections to Kubernetes API server after the change in `selectors` or `namespaces` sections of [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs). Previously `vmagent` could continue polling Kubernetes API server with the old `selectors` or `namespaces` configs additionally to polling new configs. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4850). * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly close connections to Kubernetes API server after the change in `selectors` or `namespaces` sections of [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs). Previously `vmagent` could continue polling Kubernetes API server with the old `selectors` or `namespaces` configs additionally to polling new configs. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4850).
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): prevent configuration reloading if there were no changes in config. This improves memory usage when `-configCheckInterval` cmd-line flag is configured and config has extensive list of regexp expressions requiring additional memory on parsing.
## [v1.93.4](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.4) ## [v1.93.4](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.4)

View file

@ -10,7 +10,7 @@ aliases:
- /operator/vars.html - /operator/vars.html
--- ---
# Auto Generated vars for package config # Auto Generated vars for package config
updated at Wed Sep 13 14:05:24 UTC 2023 updated at Thu Sep 21 10:01:40 UTC 2023
| varible name | variable default value | variable required | variable description | | varible name | variable default value | variable required | variable description |
@ -20,7 +20,7 @@ aliases:
| VM_CUSTOMCONFIGRELOADERIMAGE | victoriametrics/operator:config-reloader-v0.32.0 | false | - | | VM_CUSTOMCONFIGRELOADERIMAGE | victoriametrics/operator:config-reloader-v0.32.0 | false | - |
| VM_PSPAUTOCREATEENABLED | false | false | - | | VM_PSPAUTOCREATEENABLED | false | false | - |
| VM_VMALERTDEFAULT_IMAGE | victoriametrics/vmalert | false | - | | VM_VMALERTDEFAULT_IMAGE | victoriametrics/vmalert | false | - |
| VM_VMALERTDEFAULT_VERSION | v1.93.4 | false | - | | VM_VMALERTDEFAULT_VERSION | v1.93.5 | false | - |
| VM_VMALERTDEFAULT_PORT | 8080 | false | - | | VM_VMALERTDEFAULT_PORT | 8080 | false | - |
| VM_VMALERTDEFAULT_USEDEFAULTRESOURCES | true | false | - | | VM_VMALERTDEFAULT_USEDEFAULTRESOURCES | true | false | - |
| VM_VMALERTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - | | VM_VMALERTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - |
@ -31,7 +31,7 @@ aliases:
| VM_VMALERTDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - | | VM_VMALERTDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - |
| VM_VMALERTDEFAULT_CONFIGRELOADIMAGE | jimmidyson/configmap-reload:v0.3.0 | false | - | | VM_VMALERTDEFAULT_CONFIGRELOADIMAGE | jimmidyson/configmap-reload:v0.3.0 | false | - |
| VM_VMAGENTDEFAULT_IMAGE | victoriametrics/vmagent | false | - | | VM_VMAGENTDEFAULT_IMAGE | victoriametrics/vmagent | false | - |
| VM_VMAGENTDEFAULT_VERSION | v1.93.4 | false | - | | VM_VMAGENTDEFAULT_VERSION | v1.93.5 | false | - |
| VM_VMAGENTDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0 | false | - | | VM_VMAGENTDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0 | false | - |
| VM_VMAGENTDEFAULT_PORT | 8429 | false | - | | VM_VMAGENTDEFAULT_PORT | 8429 | false | - |
| VM_VMAGENTDEFAULT_USEDEFAULTRESOURCES | true | false | - | | VM_VMAGENTDEFAULT_USEDEFAULTRESOURCES | true | false | - |
@ -42,7 +42,7 @@ aliases:
| VM_VMAGENTDEFAULT_CONFIGRELOADERCPU | 100m | false | - | | VM_VMAGENTDEFAULT_CONFIGRELOADERCPU | 100m | false | - |
| VM_VMAGENTDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - | | VM_VMAGENTDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - |
| VM_VMSINGLEDEFAULT_IMAGE | victoriametrics/victoria-metrics | false | - | | VM_VMSINGLEDEFAULT_IMAGE | victoriametrics/victoria-metrics | false | - |
| VM_VMSINGLEDEFAULT_VERSION | v1.93.4 | false | - | | VM_VMSINGLEDEFAULT_VERSION | v1.93.5 | false | - |
| VM_VMSINGLEDEFAULT_PORT | 8429 | false | - | | VM_VMSINGLEDEFAULT_PORT | 8429 | false | - |
| VM_VMSINGLEDEFAULT_USEDEFAULTRESOURCES | true | false | - | | VM_VMSINGLEDEFAULT_USEDEFAULTRESOURCES | true | false | - |
| VM_VMSINGLEDEFAULT_RESOURCE_LIMIT_MEM | 1500Mi | false | - | | VM_VMSINGLEDEFAULT_RESOURCE_LIMIT_MEM | 1500Mi | false | - |
@ -53,14 +53,14 @@ aliases:
| VM_VMSINGLEDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - | | VM_VMSINGLEDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - |
| VM_VMCLUSTERDEFAULT_USEDEFAULTRESOURCES | true | false | - | | VM_VMCLUSTERDEFAULT_USEDEFAULTRESOURCES | true | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_IMAGE | victoriametrics/vmselect | false | - | | VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_IMAGE | victoriametrics/vmselect | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_VERSION | v1.93.4-cluster | false | - | | VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_VERSION | v1.93.5-cluster | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_PORT | 8481 | false | - | | VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_PORT | 8481 | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_LIMIT_MEM | 1000Mi | false | - | | VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_LIMIT_MEM | 1000Mi | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_LIMIT_CPU | 500m | false | - | | VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_LIMIT_CPU | 500m | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_REQUEST_MEM | 500Mi | false | - | | VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_REQUEST_MEM | 500Mi | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_REQUEST_CPU | 100m | false | - | | VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_REQUEST_CPU | 100m | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_IMAGE | victoriametrics/vmstorage | false | - | | VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_IMAGE | victoriametrics/vmstorage | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VERSION | v1.93.4-cluster | false | - | | VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VERSION | v1.93.5-cluster | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VMINSERTPORT | 8400 | false | - | | VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VMINSERTPORT | 8400 | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VMSELECTPORT | 8401 | false | - | | VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VMSELECTPORT | 8401 | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_PORT | 8482 | false | - | | VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_PORT | 8482 | false | - |
@ -69,7 +69,7 @@ aliases:
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_RESOURCE_REQUEST_MEM | 500Mi | false | - | | VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_RESOURCE_REQUEST_MEM | 500Mi | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_RESOURCE_REQUEST_CPU | 250m | false | - | | VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_RESOURCE_REQUEST_CPU | 250m | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_IMAGE | victoriametrics/vminsert | false | - | | VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_IMAGE | victoriametrics/vminsert | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_VERSION | v1.93.4-cluster | false | - | | VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_VERSION | v1.93.5-cluster | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_PORT | 8480 | false | - | | VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_PORT | 8480 | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - | | VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_RESOURCE_LIMIT_CPU | 500m | false | - | | VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_RESOURCE_LIMIT_CPU | 500m | false | - |
@ -88,7 +88,7 @@ aliases:
| VM_VMALERTMANAGER_RESOURCE_REQUEST_CPU | 30m | false | - | | VM_VMALERTMANAGER_RESOURCE_REQUEST_CPU | 30m | false | - |
| VM_DISABLESELFSERVICESCRAPECREATION | false | false | - | | VM_DISABLESELFSERVICESCRAPECREATION | false | false | - |
| VM_VMBACKUP_IMAGE | victoriametrics/vmbackupmanager | false | - | | VM_VMBACKUP_IMAGE | victoriametrics/vmbackupmanager | false | - |
| VM_VMBACKUP_VERSION | v1.93.4-enterprise | false | - | | VM_VMBACKUP_VERSION | v1.93.5-enterprise | false | - |
| VM_VMBACKUP_PORT | 8300 | false | - | | VM_VMBACKUP_PORT | 8300 | false | - |
| VM_VMBACKUP_USEDEFAULTRESOURCES | true | false | - | | VM_VMBACKUP_USEDEFAULTRESOURCES | true | false | - |
| VM_VMBACKUP_RESOURCE_LIMIT_MEM | 500Mi | false | - | | VM_VMBACKUP_RESOURCE_LIMIT_MEM | 500Mi | false | - |
@ -97,7 +97,7 @@ aliases:
| VM_VMBACKUP_RESOURCE_REQUEST_CPU | 150m | false | - | | VM_VMBACKUP_RESOURCE_REQUEST_CPU | 150m | false | - |
| VM_VMBACKUP_LOGLEVEL | INFO | false | - | | VM_VMBACKUP_LOGLEVEL | INFO | false | - |
| VM_VMAUTHDEFAULT_IMAGE | victoriametrics/vmauth | false | - | | VM_VMAUTHDEFAULT_IMAGE | victoriametrics/vmauth | false | - |
| VM_VMAUTHDEFAULT_VERSION | v1.93.4 | false | - | | VM_VMAUTHDEFAULT_VERSION | v1.93.5 | false | - |
| VM_VMAUTHDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0 | false | - | | VM_VMAUTHDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0 | false | - |
| VM_VMAUTHDEFAULT_PORT | 8427 | false | - | | VM_VMAUTHDEFAULT_PORT | 8427 | false | - |
| VM_VMAUTHDEFAULT_USEDEFAULTRESOURCES | true | false | - | | VM_VMAUTHDEFAULT_USEDEFAULTRESOURCES | true | false | - |

View file

@ -154,11 +154,14 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
## Advanced usage ## Advanced usage
* Obtaining credentials from a file.
### Providing credentials as a file
Obtaining credentials from a file.
Add flag `-credsFilePath=/etc/credentials` with the following content: Add flag `-credsFilePath=/etc/credentials` with the following content:
for s3 (aws, minio or other s3 compatible storages): - for S3 (AWS, MinIO or other S3 compatible storages):
```console ```console
[default] [default]
@ -166,7 +169,7 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
aws_secret_access_key=thesecretaccesskeyvalue aws_secret_access_key=thesecretaccesskeyvalue
``` ```
for gce cloud storage: - for GCP cloud storage:
```json ```json
{ {
@ -182,24 +185,99 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email" "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email"
} }
``` ```
* Obtaining credentials from env variables.
### Providing credentials via env variables
Obtaining credentials from env variables.
- For AWS S3 compatible storages set env variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`. - For AWS S3 compatible storages set env variable `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
Also you can set env variable `AWS_SHARED_CREDENTIALS_FILE` with path to credentials file. Also you can set env variable `AWS_SHARED_CREDENTIALS_FILE` with path to credentials file.
- For GCE cloud storage set env variable `GOOGLE_APPLICATION_CREDENTIALS` with path to credentials file. - For GCE cloud storage set env variable `GOOGLE_APPLICATION_CREDENTIALS` with path to credentials file.
- For Azure storage either set env variables `AZURE_STORAGE_ACCOUNT_NAME` and `AZURE_STORAGE_ACCOUNT_KEY`, or `AZURE_STORAGE_ACCOUNT_CONNECTION_STRING`. - For Azure storage either set env variables `AZURE_STORAGE_ACCOUNT_NAME` and `AZURE_STORAGE_ACCOUNT_KEY`, or `AZURE_STORAGE_ACCOUNT_CONNECTION_STRING`.
* Usage with s3 custom url endpoint. It is possible to use `vmbackup` with s3 compatible storages like minio, cloudian, etc. Please, note that `vmbackup` will use credentials provided by cloud providers metadata service [when applicable](https://docs.victoriametrics.com/vmbackup.html#using-cloud-providers-metadata-service).
### Using cloud providers metadata service
`vmbackup` and `vmbackupmanager` will automatically use cloud providers metadata service in order to obtain credentials if they are running in cloud environment
and credentials are not explicitly provided via flags or env variables.
### Providing credentials in Kubernetes
The simplest way to provide credentials in Kubernetes is to use [Secrets](https://kubernetes.io/docs/concepts/configuration/secret/)
and inject them into the pod as environment variables. For example, the following secret can be used for AWS S3 credentials:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: vmbackup-credentials
data:
access_key: key
secret_key: secret
```
And then it can be injected into the pod as environment variables:
```yaml
...
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: access_key
name: vmbackup-credentials
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: secret_key
name: vmbackup-credentials
...
```
A more secure way is to use IAM roles to provide tokens for pods instead of managing credentials manually.
For AWS deployments it will be required to configure [IAM roles for service accounts](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html).
In order to use IAM roles for service accounts with `vmbackup` or `vmbackupmanager` it is required to create ServiceAccount with IAM role mapping:
```yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: monitoring-backups
annotations:
eks.amazonaws.com/role-arn: arn:aws:iam::{ACCOUNT_ID}:role/{ROLE_NAME}
```
And [configure pod to use service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/).
After this `vmbackup` and `vmbackupmanager` will automatically use IAM role for service account in order to obtain credentials.
For GCP deployments it will be required to configure [Workload Identity](https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity).
In order to use Workload Identity with `vmbackup` or `vmbackupmanager` it is required to create ServiceAccount with Workload Identity annotation:
```yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: monitoring-backups
annotations:
iam.gke.io/gcp-service-account: {sa_name}@{project_name}.iam.gserviceaccount.com
```
And [configure pod to use service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/).
After this `vmbackup` and `vmbackupmanager` will automatically use Workload Identity for servicpe account in order to obtain credentials.
### Using custom S3 endpoint
Usage with s3 custom url endpoint. It is possible to use `vmbackup` with s3 compatible storages like minio, cloudian, etc.
You have to add a custom url endpoint via flag: You have to add a custom url endpoint via flag:
- for MinIO
```console ```console
# for minio
-customS3Endpoint=http://localhost:9000 -customS3Endpoint=http://localhost:9000
```
# for aws gov region - for aws gov region
```console
-customS3Endpoint=https://s3-fips.us-gov-west-1.amazonaws.com -customS3Endpoint=https://s3-fips.us-gov-west-1.amazonaws.com
``` ```
* Run `vmbackup -help` in order to see all the available options: ### Command-line flags
Run `vmbackup -help` in order to see all the available options:
```console ```console
-concurrency int -concurrency int

View file

@ -121,6 +121,9 @@ The result on the GCS bucket
<img alt="latest folder" src="vmbackupmanager_latest_folder.png"> <img alt="latest folder" src="vmbackupmanager_latest_folder.png">
Please, see [vmbackup docs](https://docs.victoriametrics.com/vmbackup.html#advanced-usage) for more examples of authentication with different
storage types.
## Backup Retention Policy ## Backup Retention Policy
Backup retention policy is controlled by: Backup retention policy is controlled by:

View file

@ -40,9 +40,11 @@ type Part struct {
// key returns a string, which uniquely identifies p. // key returns a string, which uniquely identifies p.
func (p *Part) key() string { func (p *Part) key() string {
if strings.HasSuffix(p.Path, "/parts.json") { if strings.HasSuffix(p.Path, "/parts.json") ||
// parts.json file contents changes over time, so it must have an unique key in order strings.HasSuffix(p.Path, "/appliedRetention.txt") {
// to always copy it during backup, restore and server-side copy. // parts.json and appliedRetention.txt files contents changes over time,
// so it must have an unique key in order to always copy it during
// backup, restore and server-side copy.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5005 // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5005
id := atomic.AddUint64(&uniqueKeyID, 1) id := atomic.AddUint64(&uniqueKeyID, 1)
return fmt.Sprintf("unique-%016X", id) return fmt.Sprintf("unique-%016X", id)

View file

@ -149,7 +149,18 @@ func mustOpenDatadb(pt *partition, path string, flushInterval time.Duration) *da
pws := make([]*partWrapper, len(partNames)) pws := make([]*partWrapper, len(partNames))
for i, partName := range partNames { for i, partName := range partNames {
// Make sure the partName exists on disk.
// If it is missing, then manual action from the user is needed,
// since this is unexpected state, which cannot occur under normal operation,
// including unclean shutdown.
partPath := filepath.Join(path, partName) partPath := filepath.Join(path, partName)
if !fs.IsPathExist(partPath) {
partsFile := filepath.Join(path, partsFilename)
logger.Panicf("FATAL: part %q is listed in %q, but is missing on disk; "+
"ensure %q contents is not corrupted; remove %q to rebuild its' content from the list of existing parts",
partPath, partsFile, partsFile, partsFile)
}
p := mustOpenFilePart(pt, partPath) p := mustOpenFilePart(pt, partPath)
pws[i] = newPartWrapper(p, nil, time.Time{}) pws[i] = newPartWrapper(p, nil, time.Time{})
} }

View file

@ -1349,6 +1349,18 @@ func mustOpenParts(path string) []*partWrapper {
des := fs.MustReadDir(path) des := fs.MustReadDir(path)
m := make(map[string]struct{}, len(partNames)) m := make(map[string]struct{}, len(partNames))
for _, partName := range partNames { for _, partName := range partNames {
// Make sure the partName exists on disk.
// If it is missing, then manual action from the user is needed,
// since this is unexpected state, which cannot occur under normal operation,
// including unclean shutdown.
partPath := filepath.Join(path, partName)
if !fs.IsPathExist(partPath) {
partsFile := filepath.Join(path, partsFilename)
logger.Panicf("FATAL: part %q is listed in %q, but is missing on disk; "+
"ensure %q contents is not corrupted; remove %q to rebuild its' content from the list of existing parts",
partPath, partsFile, partsFile, partsFile)
}
m[partName] = struct{}{} m[partName] = struct{}{}
} }
for _, de := range des { for _, de := range des {

View file

@ -668,7 +668,8 @@ func (is *indexSearch) searchLabelNamesWithFiltersOnDate(qt *querytracer.Tracer,
// This would help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978 // This would help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978
metricIDs := filter.AppendTo(nil) metricIDs := filter.AppendTo(nil)
qt.Printf("sort %d metricIDs", len(metricIDs)) qt.Printf("sort %d metricIDs", len(metricIDs))
return is.getLabelNamesForMetricIDs(qt, metricIDs, lns, maxLabelNames) is.getLabelNamesForMetricIDs(qt, metricIDs, lns, maxLabelNames)
return nil
} }
var prevLabelName []byte var prevLabelName []byte
ts := &is.ts ts := &is.ts
@ -732,39 +733,34 @@ func (is *indexSearch) searchLabelNamesWithFiltersOnDate(qt *querytracer.Tracer,
return nil return nil
} }
func (is *indexSearch) getLabelNamesForMetricIDs(qt *querytracer.Tracer, metricIDs []uint64, lns map[string]struct{}, maxLabelNames int) error { func (is *indexSearch) getLabelNamesForMetricIDs(qt *querytracer.Tracer, metricIDs []uint64, lns map[string]struct{}, maxLabelNames int) {
lns["__name__"] = struct{}{} lns["__name__"] = struct{}{}
var mn MetricName var mn MetricName
foundLabelNames := 0 foundLabelNames := 0
var buf []byte var buf []byte
for _, metricID := range metricIDs { for _, metricID := range metricIDs {
var err error var ok bool
buf, err = is.searchMetricNameWithCache(buf[:0], metricID) buf, ok = is.searchMetricNameWithCache(buf[:0], metricID)
if err != nil { if !ok {
if err == io.EOF {
// It is likely the metricID->metricName entry didn't propagate to inverted index yet. // It is likely the metricID->metricName entry didn't propagate to inverted index yet.
// Skip this metricID for now. // Skip this metricID for now.
continue continue
} }
return fmt.Errorf("cannot find metricName by metricID %d: %w", metricID, err)
}
if err := mn.Unmarshal(buf); err != nil { if err := mn.Unmarshal(buf); err != nil {
return fmt.Errorf("cannot unmarshal metricName %q: %w", buf, err) logger.Panicf("FATAL: cannot unmarshal metricName %q: %w", buf, err)
} }
for _, tag := range mn.Tags { for _, tag := range mn.Tags {
_, ok := lns[string(tag.Key)] if _, ok := lns[string(tag.Key)]; !ok {
if !ok {
foundLabelNames++ foundLabelNames++
lns[string(tag.Key)] = struct{}{} lns[string(tag.Key)] = struct{}{}
if len(lns) >= maxLabelNames { if len(lns) >= maxLabelNames {
qt.Printf("hit the limit on the number of unique label names: %d", maxLabelNames) qt.Printf("hit the limit on the number of unique label names: %d", maxLabelNames)
return nil return
} }
} }
} }
} }
qt.Printf("get %d distinct label names from %d metricIDs", foundLabelNames, len(metricIDs)) qt.Printf("get %d distinct label names from %d metricIDs", foundLabelNames, len(metricIDs))
return nil
} }
// SearchLabelValuesWithFiltersOnTimeRange returns label values for the given labelName, tfss and tr. // SearchLabelValuesWithFiltersOnTimeRange returns label values for the given labelName, tfss and tr.
@ -868,7 +864,8 @@ func (is *indexSearch) searchLabelValuesWithFiltersOnDate(qt *querytracer.Tracer
// This would help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978 // This would help https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2978
metricIDs := filter.AppendTo(nil) metricIDs := filter.AppendTo(nil)
qt.Printf("sort %d metricIDs", len(metricIDs)) qt.Printf("sort %d metricIDs", len(metricIDs))
return is.getLabelValuesForMetricIDs(qt, lvs, labelName, metricIDs, maxLabelValues) is.getLabelValuesForMetricIDs(qt, lvs, labelName, metricIDs, maxLabelValues)
return nil
} }
if labelName == "__name__" { if labelName == "__name__" {
// __name__ label is encoded as empty string in indexdb. // __name__ label is encoded as empty string in indexdb.
@ -927,7 +924,7 @@ func (is *indexSearch) searchLabelValuesWithFiltersOnDate(qt *querytracer.Tracer
return nil return nil
} }
func (is *indexSearch) getLabelValuesForMetricIDs(qt *querytracer.Tracer, lvs map[string]struct{}, labelName string, metricIDs []uint64, maxLabelValues int) error { func (is *indexSearch) getLabelValuesForMetricIDs(qt *querytracer.Tracer, lvs map[string]struct{}, labelName string, metricIDs []uint64, maxLabelValues int) {
if labelName == "" { if labelName == "" {
labelName = "__name__" labelName = "__name__"
} }
@ -935,32 +932,27 @@ func (is *indexSearch) getLabelValuesForMetricIDs(qt *querytracer.Tracer, lvs ma
foundLabelValues := 0 foundLabelValues := 0
var buf []byte var buf []byte
for _, metricID := range metricIDs { for _, metricID := range metricIDs {
var err error var ok bool
buf, err = is.searchMetricNameWithCache(buf[:0], metricID) buf, ok = is.searchMetricNameWithCache(buf[:0], metricID)
if err != nil { if !ok {
if err == io.EOF {
// It is likely the metricID->metricName entry didn't propagate to inverted index yet. // It is likely the metricID->metricName entry didn't propagate to inverted index yet.
// Skip this metricID for now. // Skip this metricID for now.
continue continue
} }
return fmt.Errorf("cannot find metricName by metricID %d: %w", metricID, err)
}
if err := mn.Unmarshal(buf); err != nil { if err := mn.Unmarshal(buf); err != nil {
return fmt.Errorf("cannot unmarshal metricName %q: %w", buf, err) logger.Panicf("FATAL: cannot unmarshal metricName %q: %s", buf, err)
} }
tagValue := mn.GetTagValue(labelName) tagValue := mn.GetTagValue(labelName)
_, ok := lvs[string(tagValue)] if _, ok := lvs[string(tagValue)]; !ok {
if !ok {
foundLabelValues++ foundLabelValues++
lvs[string(tagValue)] = struct{}{} lvs[string(tagValue)] = struct{}{}
if len(lvs) >= maxLabelValues { if len(lvs) >= maxLabelValues {
qt.Printf("hit the limit on the number of unique label values for label %q: %d", labelName, maxLabelValues) qt.Printf("hit the limit on the number of unique label values for label %q: %d", labelName, maxLabelValues)
return nil return
} }
} }
} }
qt.Printf("get %d distinct values for label %q from %d metricIDs", foundLabelValues, labelName, len(metricIDs)) qt.Printf("get %d distinct values for label %q from %d metricIDs", foundLabelValues, labelName, len(metricIDs))
return nil
} }
// SearchTagValueSuffixes returns all the tag value suffixes for the given tagKey and tagValuePrefix on the given tr. // SearchTagValueSuffixes returns all the tag value suffixes for the given tagKey and tagValuePrefix on the given tr.
@ -1442,38 +1434,35 @@ func (th *topHeap) Pop() interface{} {
// searchMetricNameWithCache appends metric name for the given metricID to dst // searchMetricNameWithCache appends metric name for the given metricID to dst
// and returns the result. // and returns the result.
func (db *indexDB) searchMetricNameWithCache(dst []byte, metricID uint64) ([]byte, error) { func (db *indexDB) searchMetricNameWithCache(dst []byte, metricID uint64) ([]byte, bool) {
metricName := db.getMetricNameFromCache(dst, metricID) metricName := db.getMetricNameFromCache(dst, metricID)
if len(metricName) > len(dst) { if len(metricName) > len(dst) {
return metricName, nil return metricName, true
} }
is := db.getIndexSearch(noDeadline) is := db.getIndexSearch(noDeadline)
var err error var ok bool
dst, err = is.searchMetricName(dst, metricID) dst, ok = is.searchMetricName(dst, metricID)
db.putIndexSearch(is) db.putIndexSearch(is)
if err == nil { if ok {
// There is no need in verifying whether the given metricID is deleted, // There is no need in verifying whether the given metricID is deleted,
// since the filtering must be performed before calling this func. // since the filtering must be performed before calling this func.
db.putMetricNameToCache(metricID, dst) db.putMetricNameToCache(metricID, dst)
return dst, nil return dst, true
}
if err != io.EOF {
return dst, err
} }
// Try searching in the external indexDB. // Try searching in the external indexDB.
if db.doExtDB(func(extDB *indexDB) { if db.doExtDB(func(extDB *indexDB) {
is := extDB.getIndexSearch(noDeadline) is := extDB.getIndexSearch(noDeadline)
dst, err = is.searchMetricName(dst, metricID) dst, ok = is.searchMetricName(dst, metricID)
extDB.putIndexSearch(is) extDB.putIndexSearch(is)
if err == nil { if ok {
// There is no need in verifying whether the given metricID is deleted, // There is no need in verifying whether the given metricID is deleted,
// since the filtering must be performed before calling this func. // since the filtering must be performed before calling this func.
extDB.putMetricNameToCache(metricID, dst) extDB.putMetricNameToCache(metricID, dst)
} }
}) { }) && ok {
return dst, err return dst, true
} }
// Cannot find MetricName for the given metricID. This may be the case // Cannot find MetricName for the given metricID. This may be the case
@ -1484,7 +1473,7 @@ func (db *indexDB) searchMetricNameWithCache(dst []byte, metricID uint64) ([]byt
// Mark the metricID as deleted, so it will be created again when new data point // Mark the metricID as deleted, so it will be created again when new data point
// for the given time series will arrive. // for the given time series will arrive.
db.deleteMetricIDs([]uint64{metricID}) db.deleteMetricIDs([]uint64{metricID})
return dst, io.EOF return dst, false
} }
// DeleteTSIDs marks as deleted all the TSIDs matching the given tfss. // DeleteTSIDs marks as deleted all the TSIDs matching the given tfss.
@ -1820,36 +1809,36 @@ func (is *indexSearch) getTSIDByMetricNameNoExtDB(dst *TSID, metricName []byte,
return false return false
} }
func (is *indexSearch) searchMetricNameWithCache(dst []byte, metricID uint64) ([]byte, error) { func (is *indexSearch) searchMetricNameWithCache(dst []byte, metricID uint64) ([]byte, bool) {
metricName := is.db.getMetricNameFromCache(dst, metricID) metricName := is.db.getMetricNameFromCache(dst, metricID)
if len(metricName) > len(dst) { if len(metricName) > len(dst) {
return metricName, nil return metricName, true
} }
var err error var ok bool
dst, err = is.searchMetricName(dst, metricID) dst, ok = is.searchMetricName(dst, metricID)
if err == nil { if ok {
// There is no need in verifying whether the given metricID is deleted, // There is no need in verifying whether the given metricID is deleted,
// since the filtering must be performed before calling this func. // since the filtering must be performed before calling this func.
is.db.putMetricNameToCache(metricID, dst) is.db.putMetricNameToCache(metricID, dst)
return dst, nil return dst, true
} }
return dst, err return dst, false
} }
func (is *indexSearch) searchMetricName(dst []byte, metricID uint64) ([]byte, error) { func (is *indexSearch) searchMetricName(dst []byte, metricID uint64) ([]byte, bool) {
ts := &is.ts ts := &is.ts
kb := &is.kb kb := &is.kb
kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixMetricIDToMetricName) kb.B = is.marshalCommonPrefix(kb.B[:0], nsPrefixMetricIDToMetricName)
kb.B = encoding.MarshalUint64(kb.B, metricID) kb.B = encoding.MarshalUint64(kb.B, metricID)
if err := ts.FirstItemWithPrefix(kb.B); err != nil { if err := ts.FirstItemWithPrefix(kb.B); err != nil {
if err == io.EOF { if err == io.EOF {
return dst, err return dst, false
} }
return dst, fmt.Errorf("error when searching metricName by metricID; searchPrefix %q: %w", kb.B, err) logger.Panicf("FATAL: error when searching metricName by metricID; searchPrefix %q: %w", kb.B, err)
} }
v := ts.Item[len(kb.B):] v := ts.Item[len(kb.B):]
dst = append(dst, v...) dst = append(dst, v...)
return dst, nil return dst, true
} }
func (is *indexSearch) containsTimeRange(tr TimeRange) (bool, error) { func (is *indexSearch) containsTimeRange(tr TimeRange) (bool, error) {
@ -1928,18 +1917,15 @@ func (is *indexSearch) updateMetricIDsByMetricNameMatch(qt *querytracer.Tracer,
return err return err
} }
} }
var err error var ok bool
metricName.B, err = is.searchMetricNameWithCache(metricName.B[:0], metricID) metricName.B, ok = is.searchMetricNameWithCache(metricName.B[:0], metricID)
if err != nil { if !ok {
if err == io.EOF {
// It is likely the metricID->metricName entry didn't propagate to inverted index yet. // It is likely the metricID->metricName entry didn't propagate to inverted index yet.
// Skip this metricID for now. // Skip this metricID for now.
continue continue
} }
return fmt.Errorf("cannot find metricName by metricID %d: %w", metricID, err)
}
if err := mn.Unmarshal(metricName.B); err != nil { if err := mn.Unmarshal(metricName.B); err != nil {
return fmt.Errorf("cannot unmarshal metricName %q: %w", metricName.B, err) logger.Panicf("FATAL: cannot unmarshal metricName %q: %s", metricName.B, err)
} }
// Match the mn against tfs. // Match the mn against tfs.

View file

@ -3,7 +3,6 @@ package storage
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"io"
"math/rand" "math/rand"
"os" "os"
"reflect" "reflect"
@ -655,19 +654,19 @@ func testIndexDBCheckTSIDByName(db *indexDB, mns []MetricName, tsids []TSID, isC
} }
// Search for metric name for the given metricID. // Search for metric name for the given metricID.
var err error var ok bool
metricNameCopy, err = db.searchMetricNameWithCache(metricNameCopy[:0], genTSID.TSID.MetricID) metricNameCopy, ok = db.searchMetricNameWithCache(metricNameCopy[:0], genTSID.TSID.MetricID)
if err != nil { if !ok {
return fmt.Errorf("error in searchMetricNameWithCache for metricID=%d; i=%d: %w", genTSID.TSID.MetricID, i, err) return fmt.Errorf("cannot find metricName for metricID=%d; i=%d", genTSID.TSID.MetricID, i)
} }
if !bytes.Equal(metricName, metricNameCopy) { if !bytes.Equal(metricName, metricNameCopy) {
return fmt.Errorf("unexpected mn for metricID=%d;\ngot\n%q\nwant\n%q", genTSID.TSID.MetricID, metricNameCopy, metricName) return fmt.Errorf("unexpected mn for metricID=%d;\ngot\n%q\nwant\n%q", genTSID.TSID.MetricID, metricNameCopy, metricName)
} }
// Try searching metric name for non-existent MetricID. // Try searching metric name for non-existent MetricID.
buf, err := db.searchMetricNameWithCache(nil, 1) buf, found := db.searchMetricNameWithCache(nil, 1)
if err != io.EOF { if found {
return fmt.Errorf("expecting io.EOF error when searching for non-existing metricID; got %v", err) return fmt.Errorf("unexpected metricName found for non-existing metricID; got %X", buf)
} }
if len(buf) > 0 { if len(buf) > 0 {
return fmt.Errorf("expecting empty buf when searching for non-existent metricID; got %X", buf) return fmt.Errorf("expecting empty buf when searching for non-existent metricID; got %X", buf)

View file

@ -1779,6 +1779,18 @@ func mustOpenParts(path string, partNames []string) []*partWrapper {
des := fs.MustReadDir(path) des := fs.MustReadDir(path)
m := make(map[string]struct{}, len(partNames)) m := make(map[string]struct{}, len(partNames))
for _, partName := range partNames { for _, partName := range partNames {
// Make sure the partName exists on disk.
// If it is missing, then manual action from the user is needed,
// since this is unexpected state, which cannot occur under normal operation,
// including unclean shutdown.
partPath := filepath.Join(path, partName)
if !fs.IsPathExist(partPath) {
partsFile := filepath.Join(path, partsFilename)
logger.Panicf("FATAL: part %q is listed in %q, but is missing on disk; "+
"ensure %q contents is not corrupted; remove %q to rebuild its' content from the list of existing parts",
partPath, partsFile, partsFile, partsFile)
}
m[partName] = struct{}{} m[partName] = struct{}{}
} }
for _, de := range des { for _, de := range des {

View file

@ -211,17 +211,13 @@ func (s *Search) NextMetricBlock() bool {
// Skip the block, since it contains only data outside the configured retention. // Skip the block, since it contains only data outside the configured retention.
continue continue
} }
var err error var ok bool
s.MetricBlockRef.MetricName, err = s.idb.searchMetricNameWithCache(s.MetricBlockRef.MetricName[:0], tsid.MetricID) s.MetricBlockRef.MetricName, ok = s.idb.searchMetricNameWithCache(s.MetricBlockRef.MetricName[:0], tsid.MetricID)
if err != nil { if !ok {
if err == io.EOF {
// Skip missing metricName for tsid.MetricID. // Skip missing metricName for tsid.MetricID.
// It should be automatically fixed. See indexDB.searchMetricNameWithCache for details. // It should be automatically fixed. See indexDB.searchMetricNameWithCache for details.
continue continue
} }
s.err = err
return false
}
s.prevMetricID = tsid.MetricID s.prevMetricID = tsid.MetricID
} }
s.MetricBlockRef.BlockRef = s.ts.BlockRef s.MetricBlockRef.BlockRef = s.ts.BlockRef

View file

@ -1114,16 +1114,13 @@ func (s *Storage) SearchMetricNames(qt *querytracer.Tracer, tfss []*TagFilters,
return nil, err return nil, err
} }
} }
var err error var ok bool
metricName, err = idb.searchMetricNameWithCache(metricName[:0], metricID) metricName, ok = idb.searchMetricNameWithCache(metricName[:0], metricID)
if err != nil { if !ok {
if err == io.EOF {
// Skip missing metricName for metricID. // Skip missing metricName for metricID.
// It should be automatically fixed. See indexDB.searchMetricName for details. // It should be automatically fixed. See indexDB.searchMetricName for details.
continue continue
} }
return nil, fmt.Errorf("error when searching metricName for metricID=%d: %w", metricID, err)
}
if _, ok := metricNamesSeen[string(metricName)]; ok { if _, ok := metricNamesSeen[string(metricName)]; ok {
// The given metric name was already seen; skip it // The given metric name was already seen; skip it
continue continue
@ -1175,14 +1172,12 @@ func (s *Storage) prefetchMetricNames(qt *querytracer.Tracer, srcMetricIDs []uin
return err return err
} }
} }
metricName, err = is.searchMetricNameWithCache(metricName[:0], metricID) var ok bool
if err != nil { metricName, ok = is.searchMetricNameWithCache(metricName[:0], metricID)
if err == io.EOF { if !ok {
missingMetricIDs = append(missingMetricIDs, metricID) missingMetricIDs = append(missingMetricIDs, metricID)
continue continue
} }
return fmt.Errorf("error in pre-fetching metricName for metricID=%d: %w", metricID, err)
}
} }
idb.doExtDB(func(extDB *indexDB) { idb.doExtDB(func(extDB *indexDB) {
is := extDB.getIndexSearch(deadline) is := extDB.getIndexSearch(deadline)
@ -1193,11 +1188,7 @@ func (s *Storage) prefetchMetricNames(qt *querytracer.Tracer, srcMetricIDs []uin
return return
} }
} }
metricName, err = is.searchMetricNameWithCache(metricName[:0], metricID) metricName, _ = is.searchMetricNameWithCache(metricName[:0], metricID)
if err != nil && err != io.EOF {
err = fmt.Errorf("error in pre-fetching metricName for metricID=%d in extDB: %w", metricID, err)
return
}
} }
}) })
if err != nil && err != io.EOF { if err != nil && err != io.EOF {