mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-30 15:22:07 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
a2e224593e
366 changed files with 42543 additions and 3177 deletions
2
.github/workflows/check-licenses.yml
vendored
2
.github/workflows/check-licenses.yml
vendored
|
@ -17,7 +17,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@main
|
||||
with:
|
||||
go-version: 1.20.4
|
||||
go-version: 1.20.5
|
||||
id: go
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@master
|
||||
|
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
|
@ -57,7 +57,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.20.4
|
||||
go-version: 1.20.5
|
||||
check-latest: true
|
||||
cache: true
|
||||
if: ${{ matrix.language == 'go' }}
|
||||
|
|
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
|
@ -32,7 +32,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.20.4
|
||||
go-version: 1.20.5
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
|
@ -56,7 +56,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.20.4
|
||||
go-version: 1.20.5
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
|
@ -81,7 +81,7 @@ jobs:
|
|||
id: go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.20.4
|
||||
go-version: 1.20.5
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
|
|
77
.github/workflows/update-sandbox.yml
vendored
Normal file
77
.github/workflows/update-sandbox.yml
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
name: sandbox-release
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
permissions:
|
||||
contents: write
|
||||
jobs:
|
||||
deploy-sandbox:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: VictoriaMetrics/ops
|
||||
ref: master
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
|
||||
- name: Import GPG key
|
||||
id: import-gpg
|
||||
uses: crazy-max/ghaction-import-gpg@v5
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
|
||||
- name: update image tag
|
||||
uses: fjogeleit/yaml-update-action@main
|
||||
with:
|
||||
valueFile: 'gcp-test/sandbox/manifests/benchmark-vm/vmcluster.yaml'
|
||||
commitChange: false
|
||||
createPR: false
|
||||
changes: |
|
||||
{
|
||||
"gcp-test/sandbox/manifests/benchmark-vm/vmcluster.yaml": {
|
||||
"spec.vminsert.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster",
|
||||
"spec.vmselect.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster",
|
||||
"spec.vmstorage.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster"
|
||||
},
|
||||
"gcp-test/sandbox/manifests/benchmark-vm/vmsingle.yaml": {
|
||||
"spec.image.tag": "${{ github.event.release.tag_name }}-enterprise"
|
||||
},
|
||||
"gcp-test/sandbox/manifests/monitoring/monitoring-vmagent.yaml": {
|
||||
"spec.image.tag": "${{ github.event.release.tag_name }}"
|
||||
},
|
||||
"gcp-test/sandbox/manifests/monitoring/monitoring-vmcluster.yaml": {
|
||||
"spec.vminsert.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster",
|
||||
"spec.vmselect.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster",
|
||||
"spec.vmstorage.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster"
|
||||
},
|
||||
"gcp-test/sandbox/manifests/monitoring/vmalert.yaml": {
|
||||
"spec.image.tag": "${{ github.event.release.tag_name }}-enterprise"
|
||||
}
|
||||
}
|
||||
|
||||
- name: commit changes
|
||||
run: |
|
||||
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
|
||||
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
|
||||
git add .
|
||||
git commit -S -m "Deploy image tag ${RELEASE_TAG} to sandbox"
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>
|
||||
branch: release-automation
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
delete-branch: true
|
||||
title: "release ${{ github.event.release.tag_name }}"
|
||||
body: |
|
||||
Release [${{ github.event.release.tag_name }}](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/${{ github.event.release.tag_name }}) to sandbox
|
||||
|
||||
> Auto-generated by `Github Actions Bot`
|
||||
|
106
Makefile
106
Makefile
|
@ -21,6 +21,7 @@ include package/release/Makefile
|
|||
|
||||
all: \
|
||||
victoria-metrics-prod \
|
||||
victoria-logs-prod \
|
||||
vmagent-prod \
|
||||
vmalert-prod \
|
||||
vmauth-prod \
|
||||
|
@ -31,8 +32,9 @@ all: \
|
|||
clean:
|
||||
rm -rf bin/*
|
||||
|
||||
publish: docker-scan \
|
||||
publish: package-base \
|
||||
publish-victoria-metrics \
|
||||
publish-victoria-logs \
|
||||
publish-vmagent \
|
||||
publish-vmalert \
|
||||
publish-vmauth \
|
||||
|
@ -42,6 +44,7 @@ publish: docker-scan \
|
|||
|
||||
package: \
|
||||
package-victoria-metrics \
|
||||
package-victoria-logs \
|
||||
package-vmagent \
|
||||
package-vmalert \
|
||||
package-vmauth \
|
||||
|
@ -178,6 +181,7 @@ publish-release:
|
|||
|
||||
release: \
|
||||
release-victoria-metrics \
|
||||
release-victoria-logs \
|
||||
release-vmutils
|
||||
|
||||
release-victoria-metrics: \
|
||||
|
@ -191,7 +195,6 @@ release-victoria-metrics: \
|
|||
release-victoria-metrics-openbsd-amd64 \
|
||||
release-victoria-metrics-windows-amd64
|
||||
|
||||
# adds i386 arch
|
||||
release-victoria-metrics-linux-386:
|
||||
GOOS=linux GOARCH=386 $(MAKE) release-victoria-metrics-goos-goarch
|
||||
|
||||
|
@ -238,6 +241,63 @@ release-victoria-metrics-windows-goarch: victoria-metrics-windows-$(GOARCH)-prod
|
|||
cd bin && rm -rf \
|
||||
victoria-metrics-windows-$(GOARCH)-prod.exe
|
||||
|
||||
release-victoria-logs: \
|
||||
release-victoria-logs-linux-386 \
|
||||
release-victoria-logs-linux-amd64 \
|
||||
release-victoria-logs-linux-arm \
|
||||
release-victoria-logs-linux-arm64 \
|
||||
release-victoria-logs-darwin-amd64 \
|
||||
release-victoria-logs-darwin-arm64 \
|
||||
release-victoria-logs-freebsd-amd64 \
|
||||
release-victoria-logs-openbsd-amd64 \
|
||||
release-victoria-logs-windows-amd64
|
||||
|
||||
release-victoria-logs-linux-386:
|
||||
GOOS=linux GOARCH=386 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-linux-amd64:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-linux-arm:
|
||||
GOOS=linux GOARCH=arm $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-linux-arm64:
|
||||
GOOS=linux GOARCH=arm64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-darwin-amd64:
|
||||
GOOS=darwin GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-darwin-arm64:
|
||||
GOOS=darwin GOARCH=arm64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-freebsd-amd64:
|
||||
GOOS=freebsd GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-openbsd-amd64:
|
||||
GOOS=openbsd GOARCH=amd64 $(MAKE) release-victoria-logs-goos-goarch
|
||||
|
||||
release-victoria-logs-windows-amd64:
|
||||
GOARCH=amd64 $(MAKE) release-victoria-logs-windows-goarch
|
||||
|
||||
release-victoria-logs-goos-goarch: victoria-logs-$(GOOS)-$(GOARCH)-prod
|
||||
cd bin && \
|
||||
tar --transform="flags=r;s|-$(GOOS)-$(GOARCH)||" -czf victoria-logs-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
victoria-logs-$(GOOS)-$(GOARCH)-prod \
|
||||
&& sha256sum victoria-logs-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
|
||||
victoria-logs-$(GOOS)-$(GOARCH)-prod \
|
||||
| sed s/-$(GOOS)-$(GOARCH)-prod/-prod/ > victoria-logs-$(GOOS)-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
cd bin && rm -rf victoria-logs-$(GOOS)-$(GOARCH)-prod
|
||||
|
||||
release-victoria-logs-windows-goarch: victoria-logs-windows-$(GOARCH)-prod
|
||||
cd bin && \
|
||||
zip victoria-logs-windows-$(GOARCH)-$(PKG_TAG).zip \
|
||||
victoria-logs-windows-$(GOARCH)-prod.exe \
|
||||
&& sha256sum victoria-logs-windows-$(GOARCH)-$(PKG_TAG).zip \
|
||||
victoria-logs-windows-$(GOARCH)-prod.exe \
|
||||
> victoria-logs-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
|
||||
cd bin && rm -rf \
|
||||
victoria-logs-windows-$(GOARCH)-prod.exe
|
||||
|
||||
release-vmutils: \
|
||||
release-vmutils-linux-386 \
|
||||
release-vmutils-linux-amd64 \
|
||||
|
@ -418,27 +478,39 @@ check-licenses: install-wwhrd
|
|||
wwhrd check -f .wwhrd.yml
|
||||
|
||||
copy-docs:
|
||||
echo '' > ${DST}
|
||||
# The 'printf' function is used instead of 'echo' or 'echo -e' to handle line breaks (e.g. '\n') in the same way on different operating systems (MacOS/Ubuntu Linux/Arch Linux) and their shells (bash/sh/zsh/fish).
|
||||
# For details, see https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4548#issue-1782796419 and https://stackoverflow.com/questions/8467424/echo-newline-in-bash-prints-literal-n
|
||||
echo "---" > ${DST}
|
||||
@if [ ${ORDER} -ne 0 ]; then \
|
||||
echo "---\nsort: ${ORDER}\n---\n" > ${DST}; \
|
||||
echo "sort: ${ORDER}" >> ${DST}; \
|
||||
echo "weight: ${ORDER}" >> ${DST}; \
|
||||
printf "menu:\n docs:\n parent: 'victoriametrics'\n weight: ${ORDER}\n" >> ${DST}; \
|
||||
fi
|
||||
|
||||
echo "title: ${TITLE}" >> ${DST}
|
||||
@if [ ${OLD_URL} ]; then \
|
||||
printf "aliases:\n - ${OLD_URL}\n" >> ${DST}; \
|
||||
fi
|
||||
echo "---" >> ${DST}
|
||||
cat ${SRC} >> ${DST}
|
||||
sed -i='.tmp' 's/<img src=\"docs\//<img src=\"/' ${DST}
|
||||
rm -rf docs/*.tmp
|
||||
|
||||
# Copies docs for all components and adds the order tag.
|
||||
# For ORDER=0 it adds no order tag.
|
||||
# Copies docs for all components and adds the order/weight tag, title, menu position and alias with the backward compatible link for the old site.
|
||||
# For ORDER=0 it adds no order tag/weight tag.
|
||||
# FOR OLD_URL - relative link, used for backward compatibility with the link from documentation based on GitHub pages (old one)
|
||||
# FOR OLD_URL='' it adds no alias, it should be empty for every new page, don't change it for already existing links.
|
||||
# Images starting with <img src="docs/ are replaced with <img src="
|
||||
# Cluster docs are supposed to be ordered as 9th.
|
||||
# Cluster docs are supposed to be ordered as 2nd.
|
||||
# The rest of docs is ordered manually.
|
||||
docs-sync:
|
||||
SRC=README.md DST=docs/README.md ORDER=0 $(MAKE) copy-docs
|
||||
SRC=README.md DST=docs/Single-server-VictoriaMetrics.md ORDER=1 $(MAKE) copy-docs
|
||||
SRC=app/vmagent/README.md DST=docs/vmagent.md ORDER=3 $(MAKE) copy-docs
|
||||
SRC=app/vmalert/README.md DST=docs/vmalert.md ORDER=4 $(MAKE) copy-docs
|
||||
SRC=app/vmauth/README.md DST=docs/vmauth.md ORDER=5 $(MAKE) copy-docs
|
||||
SRC=app/vmbackup/README.md DST=docs/vmbackup.md ORDER=6 $(MAKE) copy-docs
|
||||
SRC=app/vmrestore/README.md DST=docs/vmrestore.md ORDER=7 $(MAKE) copy-docs
|
||||
SRC=app/vmctl/README.md DST=docs/vmctl.md ORDER=8 $(MAKE) copy-docs
|
||||
SRC=app/vmgateway/README.md DST=docs/vmgateway.md ORDER=9 $(MAKE) copy-docs
|
||||
SRC=app/vmbackupmanager/README.md DST=docs/vmbackupmanager.md ORDER=10 $(MAKE) copy-docs
|
||||
SRC=README.md DST=docs/README.md OLD_URL='' ORDER=0 TITLE=VictoriaMetrics $(MAKE) copy-docs
|
||||
SRC=README.md DST=docs/Single-server-VictoriaMetrics.md OLD_URL='/Single-server-VictoriaMetrics.html' TITLE=VictoriaMetrics ORDER=1 $(MAKE) copy-docs
|
||||
SRC=app/vmagent/README.md DST=docs/vmagent.md OLD_URL='/vmagent.html' ORDER=3 TITLE=vmagent $(MAKE) copy-docs
|
||||
SRC=app/vmalert/README.md DST=docs/vmalert.md OLD_URL='/vmalert.html' ORDER=4 TITLE=vmalert $(MAKE) copy-docs
|
||||
SRC=app/vmauth/README.md DST=docs/vmauth.md OLD_URL='/vmauth.html' ORDER=5 TITLE=vmauth $(MAKE) copy-docs
|
||||
SRC=app/vmbackup/README.md DST=docs/vmbackup.md OLD_URL='/vmbackup.html' ORDER=6 TITLE=vmbackup $(MAKE) copy-docs
|
||||
SRC=app/vmrestore/README.md DST=docs/vmrestore.md OLD_URL='/vmrestore.html' ORDER=7 TITLE=vmrestore $(MAKE) copy-docs
|
||||
SRC=app/vmctl/README.md DST=docs/vmctl.md OLD_URL='/vmctl.html' ORDER=8 TITLE=vmctl $(MAKE) copy-docs
|
||||
SRC=app/vmgateway/README.md DST=docs/vmgateway.md OLD_URL='/vmgateway.html' ORDER=9 TITLE=vmgateway $(MAKE) copy-docs
|
||||
SRC=app/vmbackupmanager/README.md DST=docs/vmbackupmanager.md OLD_URL='/vmbackupmanager.html' ORDER=10 TITLE=vmbackupmanager $(MAKE) copy-docs
|
||||
|
|
105
README.md
105
README.md
|
@ -16,14 +16,16 @@ VictoriaMetrics is a fast, cost-effective and scalable monitoring solution and t
|
|||
VictoriaMetrics is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[Docker images](https://hub.docker.com/r/victoriametrics/victoria-metrics/), [Snap packages](https://snapcraft.io/victoriametrics)
|
||||
and [source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
Just download [the latest version of VictoriaMetrics](https://docs.victoriametrics.com/CHANGELOG.html)
|
||||
and follow [these instructions](https://docs.victoriametrics.com/Quick-Start.html).
|
||||
|
||||
The cluster version of VictoriaMetrics is available [here](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html).
|
||||
|
||||
Learn more about [key concepts](https://docs.victoriametrics.com/keyConcepts.html) of VictoriaMetrics and follow the
|
||||
[quick start guide](https://docs.victoriametrics.com/Quick-Start.html) for a better experience.
|
||||
|
||||
There is also user-friendly database for logs - [VictoriaLogs](https://docs.victoriametrics.com/VictoriaLogs/).
|
||||
|
||||
If you have questions about VictoriaMetrics, then feel free asking them at [VictoriaMetrics community Slack chat](https://slack.victoriametrics.com/).
|
||||
|
||||
[Contact us](mailto:info@victoriametrics.com) if you need enterprise support for VictoriaMetrics.
|
||||
See [features available in enterprise package](https://docs.victoriametrics.com/enterprise.html).
|
||||
Enterprise binaries can be downloaded and evaluated for free
|
||||
|
@ -116,6 +118,7 @@ Case studies:
|
|||
* [MHI Vestas Offshore Wind](https://docs.victoriametrics.com/CaseStudies.html#mhi-vestas-offshore-wind)
|
||||
* [Razorpay](https://docs.victoriametrics.com/CaseStudies.html#razorpay)
|
||||
* [Percona](https://docs.victoriametrics.com/CaseStudies.html#percona)
|
||||
* [Roblox](https://docs.victoriametrics.com/CaseStudies.html#roblox)
|
||||
* [Sensedia](https://docs.victoriametrics.com/CaseStudies.html#sensedia)
|
||||
* [Smarkets](https://docs.victoriametrics.com/CaseStudies.html#smarkets)
|
||||
* [Synthesio](https://docs.victoriametrics.com/CaseStudies.html#synthesio)
|
||||
|
@ -147,7 +150,7 @@ VictoriaMetrics can also be installed via these installation methods:
|
|||
The following command-line flags are used the most:
|
||||
|
||||
* `-storageDataPath` - VictoriaMetrics stores all the data in this directory. Default path is `victoria-metrics-data` in the current working directory.
|
||||
* `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month. See [the Retention section](#retention) for more details.
|
||||
* `-retentionPeriod` - retention for stored data. Older data is automatically deleted. Default retention is 1 month. The minimum retention period is 24h or 1d. See [the Retention section](#retention) for more details.
|
||||
|
||||
Other flags have good enough default values, so set them only if you really need this. Pass `-help` to see [all the available flags with description and default values](#list-of-command-line-flags).
|
||||
|
||||
|
@ -528,8 +531,10 @@ and stream plain InfluxDB line protocol data to the configured TCP and/or UDP ad
|
|||
|
||||
VictoriaMetrics performs the following transformations to the ingested InfluxDB data:
|
||||
|
||||
* [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
||||
unless `db` tag exists in the InfluxDB line. The `db` label name can be overridden via `-influxDBLabel` command-line flag.
|
||||
* [db query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db`
|
||||
[label](https://docs.victoriametrics.com/keyConcepts.html#labels) value unless `db` tag exists in the InfluxDB line.
|
||||
The `db` label name can be overridden via `-influxDBLabel` command-line flag. If more strict data isolation is required,
|
||||
read more about multi-tenancy [here](https://docs.victoriametrics.com/keyConcepts.html#multi-tenancy).
|
||||
* Field names are mapped to time series names prefixed with `{measurement}{separator}` value, where `{separator}` equals to `_` by default. It can be changed with `-influxMeasurementFieldSeparator` command-line flag. See also `-influxSkipSingleField` command-line flag. If `{measurement}` is empty or if `-influxSkipMeasurement` command-line flag is set, then time series names correspond to field names.
|
||||
* Field values are mapped to time series values.
|
||||
* Tags are mapped to Prometheus labels as-is.
|
||||
|
@ -821,6 +826,7 @@ in [query APIs](https://docs.victoriametrics.com/#prometheus-querying-api-usage)
|
|||
in [export APIs](https://docs.victoriametrics.com/#how-to-export-time-series).
|
||||
|
||||
- Unix timestamps in seconds with optional milliseconds after the point. For example, `1562529662.678`.
|
||||
- Unix timestamps in milliseconds. For example, `1562529662678`.
|
||||
- [RFC3339](https://www.ietf.org/rfc/rfc3339.txt). For example, `2022-03-29T01:02:03Z` or `2022-03-29T01:02:03+02:30`.
|
||||
- Partial RFC3339. Examples: `2022`, `2022-03`, `2022-03-29`, `2022-03-29T01`, `2022-03-29T01:02`, `2022-03-29T01:02:03`.
|
||||
The partial RFC3339 time is in UTC timezone by default. It is possible to specify timezone there by adding `+hh:mm` or `-hh:mm` suffix to partial time.
|
||||
|
@ -1039,7 +1045,7 @@ VictoriaMetrics provides the following handlers for exporting data:
|
|||
Send a request to `http://<victoriametrics-addr>:8428/api/v1/export?match[]=<timeseries_selector_for_export>`,
|
||||
where `<timeseries_selector_for_export>` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)
|
||||
for metrics to export. Use `{__name__!=""}` selector for fetching all the time series.
|
||||
The response would contain all the data for the selected time series in [JSON streaming format](https://en.wikipedia.org/wiki/JSON_streaming#Line-delimited_JSON).
|
||||
The response would contain all the data for the selected time series in [JSON streaming format](http://ndjson.org/).
|
||||
Each JSON line contains samples for a single time series. An example output:
|
||||
|
||||
```json
|
||||
|
@ -1159,6 +1165,13 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data
|
|||
* `/api/v1/import/prometheus` for importing data in Prometheus exposition format and in [Pushgateway format](https://github.com/prometheus/pushgateway#url).
|
||||
See [these docs](#how-to-import-data-in-prometheus-exposition-format) for details.
|
||||
|
||||
Please note, most of the ingestion APIs (except [Prometheus remote_write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write))
|
||||
are optimized for performance and processes data in a streaming fashion.
|
||||
It means that client can transfer unlimited amount of data through the open connection. Because of this, import APIs
|
||||
may not return parsing errors to the client, as it is expected for data stream to be not interrupted.
|
||||
Instead, look for parsing errors on the server side (VictoriaMetrics single-node or vminsert) or
|
||||
check for changes in `vm_rows_invalid_total` (exported by server side) metric.
|
||||
|
||||
### How to import data in JSON line format
|
||||
|
||||
Example for importing data obtained via [/api/v1/export](#how-to-export-data-in-json-line-format):
|
||||
|
@ -1464,22 +1477,37 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
|||
|
||||
## Deduplication
|
||||
|
||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval
|
||||
if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single
|
||||
raw sample with the biggest timestamp per each discrete 60s interval.
|
||||
VictoriaMetrics leaves a single [raw sample](https://docs.victoriametrics.com/keyConcepts.html#raw-samples)
|
||||
with the biggest [timestamp](https://en.wikipedia.org/wiki/Unix_time) for each [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
|
||||
per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration.
|
||||
For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete
|
||||
`60s` interval.
|
||||
This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||
|
||||
If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then the sample with the biggest value is left.
|
||||
If multiple raw samples have **the same timestamp** on the given `-dedup.minScrapeInterval` discrete interval,
|
||||
then the sample with **the biggest value** is kept.
|
||||
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. So it is safe to use deduplication and downsampling simultaneously.
|
||||
Please note, [labels](https://docs.victoriametrics.com/keyConcepts.html#labels) of raw samples should be identical
|
||||
in order to be deduplicated. For example, this is why [HA pair of vmagents](https://docs.victoriametrics.com/vmagent.html#high-availability)
|
||||
needs to be identically configured.
|
||||
|
||||
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs. It is recommended to have a single `scrape_interval` across all the scrape targets. See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled.
|
||||
So it is safe to use deduplication and downsampling simultaneously.
|
||||
|
||||
The de-duplication reduces disk space usage if multiple identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus instances in HA pair
|
||||
write data to the same VictoriaMetrics instance. These vmagent or Prometheus instances must have identical
|
||||
`external_labels` section in their configs, so they write data to the same time series. See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
||||
The recommended value for `-dedup.minScrapeInterval` must equal to `scrape_interval` config from Prometheus configs.
|
||||
It is recommended to have a single `scrape_interval` across all the scrape targets.
|
||||
See [this article](https://www.robustperception.io/keep-it-simple-scrape_interval-id) for details.
|
||||
|
||||
It is recommended passing different `-promscrape.cluster.name` values to HA pairs of `vmagent` instances, so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples from other `vmagent` instances. See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||
The de-duplication reduces disk space usage if multiple **identically configured** [vmagent](https://docs.victoriametrics.com/vmagent.html)
|
||||
or Prometheus instances in HA pair write data to the same VictoriaMetrics instance.
|
||||
These vmagent or Prometheus instances must have **identical** `external_labels` section in their configs,
|
||||
so they write data to the same time series.
|
||||
See also [how to set up multiple vmagent instances for scraping the same targets](https://docs.victoriametrics.com/vmagent.html#scraping-big-number-of-targets).
|
||||
|
||||
It is recommended passing different `-promscrape.cluster.name` values to each distinct HA pair of `vmagent` instances,
|
||||
so the de-duplication consistently leaves samples for one `vmagent` instance and removes duplicate samples
|
||||
from other `vmagent` instances.
|
||||
See [these docs](https://docs.victoriametrics.com/vmagent.html#high-availability) for details.
|
||||
|
||||
## Storage
|
||||
|
||||
|
@ -1527,7 +1555,13 @@ occurs in the middle of writing the `part` to disk - such incompletely written `
|
|||
are automatically deleted on the next VictoriaMetrics start.
|
||||
|
||||
The same applies to merge process — `parts` are either fully merged into a new `part` or fail to merge,
|
||||
leaving the source `parts` untouched.
|
||||
leaving the source `parts` untouched. However, due to hardware issues data on disk may be corrupted regardless of
|
||||
VictoriaMetrics process. VictoriaMetrics can detect corruption during decompressing, decoding or sanity checking
|
||||
of the data blocks. But **it cannot fix the corrupted data**. Data parts that fail to load on startup need to be deleted
|
||||
or restored from backups. This is why it is recommended performing
|
||||
[regular backups](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#backups).
|
||||
|
||||
VictoriaMetrics doesn't use checksums for stored data blocks. See why [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3011).
|
||||
|
||||
VictoriaMetrics doesn't merge parts if their summary size exceeds free disk space.
|
||||
This prevents from potential out of disk space errors during merge.
|
||||
|
@ -1546,19 +1580,22 @@ See also [how to work with snapshots](#how-to-work-with-snapshots).
|
|||
|
||||
## Retention
|
||||
|
||||
Retention is configured with the `-retentionPeriod` command-line flag, which takes a number followed by a time unit character - `h(ours)`, `d(ays)`, `w(eeks)`, `y(ears)`. If the time unit is not specified, a month is assumed. For instance, `-retentionPeriod=3` means that the data will be stored for 3 months and then deleted. The default retention period is one month.
|
||||
Retention is configured with the `-retentionPeriod` command-line flag, which takes a number followed by a time unit
|
||||
character - `h(ours)`, `d(ays)`, `w(eeks)`, `y(ears)`. If the time unit is not specified, a month is assumed.
|
||||
For instance, `-retentionPeriod=3` means that the data will be stored for 3 months and then deleted.
|
||||
The default retention period is one month. The **minimum retention** period is 24h or 1d.
|
||||
|
||||
Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
|
||||
Data partitions outside the configured retention are deleted on the first day of the new month.
|
||||
Each partition consists of one or more data parts. Data parts outside the configured retention are eventually deleted during
|
||||
[background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
**Data partitions** outside the configured retention are deleted **on the first day of the new month**.
|
||||
Each partition consists of one or more **data parts**. Data parts outside the configured retention
|
||||
are **eventually deleted** during [background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
The time range covered by data part is **not limited by retention period unit**. One data part can cover hours or days of
|
||||
data. Hence, a data part can be deleted only **when fully outside the configured retention**.
|
||||
See more about partitions and parts [here](#storage).
|
||||
|
||||
The maximum disk space usage for a given `-retentionPeriod` is going to be (`-retentionPeriod` + 1) months.
|
||||
For example, if `-retentionPeriod` is set to 1, data for January is deleted on March 1st.
|
||||
|
||||
Please note, the time range covered by data part is not limited by retention period unit. Hence, data part may contain data
|
||||
for multiple days and will be deleted only when fully outside the configured retention.
|
||||
|
||||
It is safe to extend `-retentionPeriod` on existing data. If `-retentionPeriod` is set to a lower
|
||||
value than before, then data outside the configured period will be eventually deleted.
|
||||
|
||||
|
@ -1623,6 +1660,10 @@ Retention filters can be evaluated for free by downloading and using enterprise
|
|||
|
||||
Downsampling is applied independently per each time series. It can reduce disk space usage and improve query performance if it is applied to time series with big number of samples per each series. The downsampling doesn't improve query performance if the database contains big number of time series with small number of samples per each series (aka [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate)), since downsampling doesn't reduce the number of time series. So the majority of time is spent on searching for the matching time series. It is possible to use recording rules in [vmalert](https://docs.victoriametrics.com/vmalert.html) in order to reduce the number of time series. See [these docs](https://docs.victoriametrics.com/vmalert.html#downsampling-and-aggregation-via-vmalert).
|
||||
|
||||
Downsampling happens during [background merges](https://docs.victoriametrics.com/#storage)
|
||||
and can't be performed if there is not enough of free disk space or if vmstorage
|
||||
is in [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
|
||||
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
## Multi-tenancy
|
||||
|
@ -2020,8 +2061,7 @@ It is recommended disabling query cache with `-search.disableCache` command-line
|
|||
historical data with timestamps from the past, since the cache assumes that the data is written with
|
||||
the current timestamps. Query cache can be enabled after the backfilling is complete.
|
||||
|
||||
An alternative solution is to query `/internal/resetRollupResultCache` url after backfilling is complete. This will reset
|
||||
the query cache, which could contain incomplete data cached during the backfilling.
|
||||
An alternative solution is to query [/internal/resetRollupResultCache](https://docs.victoriametrics.com/url-examples.html#internalresetRollupResultCache) handler after the backfilling is complete. This will reset the query cache, which could contain incomplete data cached during the backfilling.
|
||||
|
||||
Yet another solution is to increase `-search.cacheTimestampOffset` flag value in order to disable caching
|
||||
for data with timestamps close to the current time. Single-node VictoriaMetrics automatically resets response
|
||||
|
@ -2128,7 +2168,6 @@ Feel free asking any questions regarding VictoriaMetrics:
|
|||
* [reddit](https://www.reddit.com/r/VictoriaMetrics/)
|
||||
* [telegram-en](https://t.me/VictoriaMetrics_en)
|
||||
* [telegram-ru](https://t.me/VictoriaMetrics_ru1)
|
||||
* [articles and talks about VictoriaMetrics in Russian](https://github.com/denisgolius/victoriametrics-ru-links)
|
||||
* [google groups](https://groups.google.com/forum/#!forum/victorametrics-users)
|
||||
|
||||
If you like VictoriaMetrics and want to contribute, then we need the following:
|
||||
|
@ -2209,7 +2248,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-denyQueryTracing
|
||||
Whether to disable the ability to trace queries. See https://docs.victoriametrics.com/#query-tracing
|
||||
-downsampling.period array
|
||||
Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. See https://docs.victoriametrics.com/#downsampling for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||
Comma-separated downsampling periods in the format 'offset:period'. For example, '30d:10m' instructs to leave a single sample per 10 minutes for samples older than 30 days. When setting multiple downsampling periods, it is necessary for the periods to be multiples of each other. See https://docs.victoriametrics.com/#downsampling for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-dryRun
|
||||
Whether to check config files without running VictoriaMetrics. The following config files are checked: -promscrape.config, -relabelConfig and -streamAggr.config. Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag
|
||||
|
@ -2256,7 +2295,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
-httpListenAddr string
|
||||
TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol (default ":8428")
|
||||
-httpListenAddr.useProxyProtocol
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing
|
||||
-import.maxLineLen size
|
||||
The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with 'max_rows_per_line' query arg passed to /api/v1/export
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 104857600)
|
||||
|
@ -2367,6 +2406,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Wait time used by Consul service discovery. Default value is used if not set
|
||||
-promscrape.consulSDCheckInterval duration
|
||||
Interval for checking for changes in Consul. This works only if consul_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#consul_sd_configs for details (default 30s)
|
||||
-promscrape.consulagentSDCheckInterval duration
|
||||
Interval for checking for changes in Consul Agent. This works only if consulagent_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#consulagent_sd_configs for details (default 30s)
|
||||
-promscrape.digitaloceanSDCheckInterval duration
|
||||
Interval for checking for changes in digital ocean. This works only if digitalocean_sd_configs is configured in '-promscrape.config' file. See https://docs.victoriametrics.com/sd_configs.html#digitalocean_sd_configs for details (default 1m0s)
|
||||
-promscrape.disableCompression
|
||||
|
@ -2446,7 +2487,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Retention filter in the format 'filter:retention'. For example, '{env="dev"}:3d' configures the retention for time series with env="dev" label to 3 days. See https://docs.victoriametrics.com/#retention-filters for details. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-retentionPeriod value
|
||||
Data with timestamps outside the retentionPeriod is automatically deleted. See also -retentionFilter
|
||||
Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. See also -retentionFilter
|
||||
The following optional suffixes are supported: h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 1)
|
||||
-retentionTimezoneOffset duration
|
||||
The offset for performing indexdb rotation. If set to 0, then the indexdb rotation is performed at 4am UTC time per each -retentionPeriod. If set to 2h, then the indexdb rotation is performed at 4am EET time (the timezone with +2h offset)
|
||||
|
@ -2477,6 +2518,10 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
The maximum number of time series, which can be returned from /federate. This option allows limiting memory usage (default 1000000)
|
||||
-search.maxGraphiteSeries int
|
||||
The maximum number of time series, which can be scanned during queries to Graphite Render API. See https://docs.victoriametrics.com/#graphite-render-api-usage (default 300000)
|
||||
-search.maxGraphiteTagKeys int
|
||||
The maximum number of tag keys returned from Graphite API, which returns tags. See https://docs.victoriametrics.com/#graphite-tags-api-usage (default 100000)
|
||||
-search.maxGraphiteTagValues int
|
||||
The maximum number of tag values returned from Graphite API, which returns tag values. See https://docs.victoriametrics.com/#graphite-tags-api-usage (default 100000)
|
||||
-search.maxLookback duration
|
||||
Synonym to -search.lookback-delta from Prometheus. The value is dynamically detected from interval between time series datapoints if not set. It can be overridden on per-query basis via max_lookback arg. See also '-search.maxStalenessInterval' flag, which has the same meaning due to historical reasons
|
||||
-search.maxMemoryPerQuery size
|
||||
|
|
103
app/victoria-logs/Makefile
Normal file
103
app/victoria-logs/Makefile
Normal file
|
@ -0,0 +1,103 @@
|
|||
# All these commands must run from repository root.
|
||||
|
||||
victoria-logs:
|
||||
APP_NAME=victoria-logs $(MAKE) app-local
|
||||
|
||||
victoria-logs-race:
|
||||
APP_NAME=victoria-logs RACE=-race $(MAKE) app-local
|
||||
|
||||
victoria-logs-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker
|
||||
|
||||
victoria-logs-pure-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-pure
|
||||
|
||||
victoria-logs-linux-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-amd64
|
||||
|
||||
victoria-logs-linux-arm-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-arm
|
||||
|
||||
victoria-logs-linux-arm64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-arm64
|
||||
|
||||
victoria-logs-linux-ppc64le-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-ppc64le
|
||||
|
||||
victoria-logs-linux-386-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-linux-386
|
||||
|
||||
victoria-logs-darwin-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-darwin-amd64
|
||||
|
||||
victoria-logs-darwin-arm64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-darwin-arm64
|
||||
|
||||
victoria-logs-freebsd-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-freebsd-amd64
|
||||
|
||||
victoria-logs-openbsd-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-openbsd-amd64
|
||||
|
||||
victoria-logs-windows-amd64-prod:
|
||||
APP_NAME=victoria-logs $(MAKE) app-via-docker-windows-amd64
|
||||
|
||||
package-victoria-logs:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker
|
||||
|
||||
package-victoria-logs-pure:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-pure
|
||||
|
||||
package-victoria-logs-amd64:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-amd64
|
||||
|
||||
package-victoria-logs-arm:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-arm
|
||||
|
||||
package-victoria-logs-arm64:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-arm64
|
||||
|
||||
package-victoria-logs-ppc64le:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-ppc64le
|
||||
|
||||
package-victoria-logs-386:
|
||||
APP_NAME=victoria-logs $(MAKE) package-via-docker-386
|
||||
|
||||
publish-victoria-logs:
|
||||
APP_NAME=victoria-logs $(MAKE) publish-via-docker
|
||||
|
||||
victoria-logs-linux-amd64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=1 GOOS=linux GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-arm:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-arm64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-ppc64le:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-s390x:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=s390x $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-linux-386:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=linux GOARCH=386 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-darwin-amd64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-darwin-arm64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-freebsd-amd64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-openbsd-amd64:
|
||||
APP_NAME=victoria-logs CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
|
||||
|
||||
victoria-logs-windows-amd64:
|
||||
GOARCH=amd64 APP_NAME=victoria-logs $(MAKE) app-local-windows-goarch
|
||||
|
||||
victoria-logs-pure:
|
||||
APP_NAME=victoria-logs $(MAKE) app-local-pure
|
8
app/victoria-logs/deployment/Dockerfile
Normal file
8
app/victoria-logs/deployment/Dockerfile
Normal file
|
@ -0,0 +1,8 @@
|
|||
ARG base_image
|
||||
FROM $base_image
|
||||
|
||||
EXPOSE 8428
|
||||
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG src_binary
|
||||
COPY $src_binary ./victoria-logs-prod
|
103
app/victoria-logs/main.go
Normal file
103
app/victoria-logs/main.go
Normal file
|
@ -0,0 +1,103 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/pushmetrics"
|
||||
)
|
||||
|
||||
var (
|
||||
httpListenAddr = flag.String("httpListenAddr", ":9428", "TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol")
|
||||
useProxyProtocol = flag.Bool("httpListenAddr.useProxyProtocol", false, "Whether to use proxy protocol for connections accepted at -httpListenAddr . "+
|
||||
"See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . "+
|
||||
"With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing")
|
||||
gogc = flag.Int("gogc", 100, "GOGC to use. See https://tip.golang.org/doc/gc-guide")
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Write flags and help message to stdout, since it is easier to grep or pipe.
|
||||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
cgroup.SetGOGC(*gogc)
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
logger.Infof("starting VictoriaLogs at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
|
||||
vlstorage.Init()
|
||||
vlselect.Init()
|
||||
vlinsert.Init()
|
||||
|
||||
go httpserver.Serve(*httpListenAddr, *useProxyProtocol, requestHandler)
|
||||
logger.Infof("started VictoriaLogs in %.3f seconds; see https://docs.victoriametrics.com/VictoriaLogs/", time.Since(startTime).Seconds())
|
||||
|
||||
sig := procutil.WaitForSigterm()
|
||||
logger.Infof("received signal %s", sig)
|
||||
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
startTime = time.Now()
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
vlinsert.Stop()
|
||||
vlselect.Stop()
|
||||
vlstorage.Stop()
|
||||
|
||||
fs.MustStopDirRemover()
|
||||
|
||||
logger.Infof("the VictoriaLogs has been stopped in %.3f seconds", time.Since(startTime).Seconds())
|
||||
}
|
||||
|
||||
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
if r.URL.Path == "/" {
|
||||
if r.Method != http.MethodGet {
|
||||
return false
|
||||
}
|
||||
w.Header().Add("Content-Type", "text/html; charset=utf-8")
|
||||
fmt.Fprintf(w, "<h2>Single-node VictoriaLogs</h2></br>")
|
||||
fmt.Fprintf(w, "See docs at <a href='https://docs.victoriametrics.com/VictoriaLogs/'>https://docs.victoriametrics.com/VictoriaLogs/</a></br>")
|
||||
fmt.Fprintf(w, "Useful endpoints:</br>")
|
||||
httpserver.WriteAPIHelp(w, [][2]string{
|
||||
{"select/vmui", "Web UI for VictoriaLogs"},
|
||||
{"metrics", "available service metrics"},
|
||||
{"flags", "command-line flags"},
|
||||
})
|
||||
return true
|
||||
}
|
||||
if vlinsert.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
if vlselect.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func usage() {
|
||||
const s = `
|
||||
victoria-logs is a log management and analytics service.
|
||||
|
||||
See the docs at https://docs.victoriametrics.com/VictoriaLogs/
|
||||
`
|
||||
flagutil.Usage(s)
|
||||
}
|
12
app/victoria-logs/multiarch/Dockerfile
Normal file
12
app/victoria-logs/multiarch/Dockerfile
Normal file
|
@ -0,0 +1,12 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
ARG certs_image
|
||||
ARG root_image
|
||||
FROM $certs_image as certs
|
||||
RUN apk update && apk upgrade && apk --update --no-cache add ca-certificates
|
||||
|
||||
FROM $root_image
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
EXPOSE 8428
|
||||
ENTRYPOINT ["/victoria-logs-prod"]
|
||||
ARG TARGETARCH
|
||||
COPY victoria-logs-linux-${TARGETARCH}-prod ./victoria-logs-prod
|
20
app/vlinsert/elasticsearch/bulk_response.qtpl
Normal file
20
app/vlinsert/elasticsearch/bulk_response.qtpl
Normal file
|
@ -0,0 +1,20 @@
|
|||
{% stripspace %}
|
||||
|
||||
{% func BulkResponse(n int, tookMs int64) %}
|
||||
{
|
||||
"took":{%dl tookMs %},
|
||||
"errors":false,
|
||||
"items":[
|
||||
{% for i := 0; i < n; i++ %}
|
||||
{
|
||||
"create":{
|
||||
"status":201
|
||||
}
|
||||
}
|
||||
{% if i+1 < n %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
69
app/vlinsert/elasticsearch/bulk_response.qtpl.go
Normal file
69
app/vlinsert/elasticsearch/bulk_response.qtpl.go
Normal file
|
@ -0,0 +1,69 @@
|
|||
// Code generated by qtc from "bulk_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
package elasticsearch
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
func StreamBulkResponse(qw422016 *qt422016.Writer, n int, tookMs int64) {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:3
|
||||
qw422016.N().S(`{"took":`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:5
|
||||
qw422016.N().DL(tookMs)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:5
|
||||
qw422016.N().S(`,"errors":false,"items":[`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:8
|
||||
for i := 0; i < n; i++ {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:8
|
||||
qw422016.N().S(`{"create":{"status":201}}`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:14
|
||||
if i+1 < n {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:14
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:14
|
||||
}
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:15
|
||||
}
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:15
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
}
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
func WriteBulkResponse(qq422016 qtio422016.Writer, n int, tookMs int64) {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
StreamBulkResponse(qw422016, n, tookMs)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
}
|
||||
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
func BulkResponse(n int, tookMs int64) string {
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
WriteBulkResponse(qb422016, n, tookMs)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
return qs422016
|
||||
//line app/vlinsert/elasticsearch/bulk_response.qtpl:18
|
||||
}
|
256
app/vlinsert/elasticsearch/elasticsearch.go
Normal file
256
app/vlinsert/elasticsearch/elasticsearch.go
Normal file
|
@ -0,0 +1,256 @@
|
|||
package elasticsearch
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logjson"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// RequestHandler processes ElasticSearch insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
// This header is needed for Logstash
|
||||
w.Header().Set("X-Elastic-Product", "Elasticsearch")
|
||||
|
||||
if strings.HasPrefix(path, "/_ilm/policy") {
|
||||
// Return fake response for ElasticSearch ilm request.
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/_index_template") {
|
||||
// Return fake response for ElasticSearch index template request.
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/_ingest") {
|
||||
// Return fake response for ElasticSearch ingest pipeline request.
|
||||
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/put-pipeline-api.html
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/_nodes") {
|
||||
// Return fake response for ElasticSearch nodes discovery request.
|
||||
// See: https://www.elastic.co/guide/en/elasticsearch/reference/8.8/cluster.html
|
||||
fmt.Fprintf(w, `{}`)
|
||||
return true
|
||||
}
|
||||
switch path {
|
||||
case "/":
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
// Return fake response for ElasticSearch ping request.
|
||||
// See the latest available version for ElasticSearch at https://github.com/elastic/elasticsearch/releases
|
||||
fmt.Fprintf(w, `{
|
||||
"version": {
|
||||
"number": "8.8.0"
|
||||
}
|
||||
}`)
|
||||
case http.MethodHead:
|
||||
// Return empty response for Logstash ping request.
|
||||
}
|
||||
|
||||
return true
|
||||
case "/_license":
|
||||
// Return fake response for ElasticSearch license request.
|
||||
fmt.Fprintf(w, `{
|
||||
"license": {
|
||||
"uid": "cbff45e7-c553-41f7-ae4f-9205eabd80xx",
|
||||
"type": "oss",
|
||||
"status": "active",
|
||||
"expiry_date_in_millis" : 4000000000000
|
||||
}
|
||||
}`)
|
||||
return true
|
||||
case "/_bulk":
|
||||
startTime := time.Now()
|
||||
bulkRequestsTotal.Inc()
|
||||
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
|
||||
processLogMessage := cp.GetProcessLogMessageFunc(lr)
|
||||
isGzip := r.Header.Get("Content-Encoding") == "gzip"
|
||||
n, err := readBulkRequest(r.Body, isGzip, cp.TimeField, cp.MsgField, processLogMessage)
|
||||
if err != nil {
|
||||
logger.Warnf("cannot decode log message #%d in /_bulk request: %s", n, err)
|
||||
return true
|
||||
}
|
||||
vlstorage.MustAddRows(lr)
|
||||
logstorage.PutLogRows(lr)
|
||||
|
||||
tookMs := time.Since(startTime).Milliseconds()
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
WriteBulkResponse(bw, n, tookMs)
|
||||
_ = bw.Flush()
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
bulkRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/elasticsearch/_bulk"}`)
|
||||
)
|
||||
|
||||
func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string,
|
||||
processLogMessage func(timestamp int64, fields []logstorage.Field),
|
||||
) (int, error) {
|
||||
// See https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
|
||||
|
||||
if isGzip {
|
||||
zr, err := common.GetGzipReader(r)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot read gzipped _bulk request: %w", err)
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lb := lineBufferPool.Get()
|
||||
defer lineBufferPool.Put(lb)
|
||||
|
||||
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
|
||||
sc := bufio.NewScanner(wcr)
|
||||
sc.Buffer(lb.B, len(lb.B))
|
||||
|
||||
n := 0
|
||||
nCheckpoint := 0
|
||||
for {
|
||||
ok, err := readBulkLine(sc, timeField, msgField, processLogMessage)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil || !ok {
|
||||
rowsIngestedTotal.Add(n - nCheckpoint)
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
if batchSize := n - nCheckpoint; n >= 1000 {
|
||||
rowsIngestedTotal.Add(batchSize)
|
||||
nCheckpoint = n
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
var rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="elasticsearch_bulk"}`)
|
||||
|
||||
func readBulkLine(sc *bufio.Scanner, timeField, msgField string,
|
||||
processLogMessage func(timestamp int64, fields []logstorage.Field),
|
||||
) (bool, error) {
|
||||
var line []byte
|
||||
|
||||
// Read the command, must be "create" or "index"
|
||||
for len(line) == 0 {
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf(`cannot read "create" or "index" command, since its size exceeds -insert.maxLineSizeBytes=%d`,
|
||||
insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
line = sc.Bytes()
|
||||
}
|
||||
lineStr := bytesutil.ToUnsafeString(line)
|
||||
if !strings.Contains(lineStr, `"create"`) && !strings.Contains(lineStr, `"index"`) {
|
||||
return false, fmt.Errorf(`unexpected command %q; expecting "create" or "index"`, line)
|
||||
}
|
||||
|
||||
// Decode log message
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf("cannot read log message, since its size exceeds -insert.maxLineSizeBytes=%d", insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, fmt.Errorf(`missing log message after the "create" or "index" command`)
|
||||
}
|
||||
line = sc.Bytes()
|
||||
p := logjson.GetParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
|
||||
timestamp, err := extractTimestampFromFields(timeField, p.Fields)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot parse timestamp: %w", err)
|
||||
}
|
||||
p.RenameField(msgField, "_msg")
|
||||
processLogMessage(timestamp, p.Fields)
|
||||
logjson.PutParser(p)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func extractTimestampFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
timestamp, err := parseElasticsearchTimestamp(f.Value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.Value = ""
|
||||
return timestamp, nil
|
||||
}
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
|
||||
func parseElasticsearchTimestamp(s string) (int64, error) {
|
||||
if len(s) < len("YYYY-MM-DD") || s[len("YYYY")] != '-' {
|
||||
// Try parsing timestamp in milliseconds
|
||||
n, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse timestamp in milliseconds from %q: %w", s, err)
|
||||
}
|
||||
if n > int64(math.MaxInt64)/1e6 {
|
||||
return 0, fmt.Errorf("too big timestamp in milliseconds: %d; mustn't exceed %d", n, int64(math.MaxInt64)/1e6)
|
||||
}
|
||||
if n < int64(math.MinInt64)/1e6 {
|
||||
return 0, fmt.Errorf("too small timestamp in milliseconds: %d; must be bigger than %d", n, int64(math.MinInt64)/1e6)
|
||||
}
|
||||
n *= 1e6
|
||||
return n, nil
|
||||
}
|
||||
if len(s) == len("YYYY-MM-DD") {
|
||||
t, err := time.Parse("2006-01-02", s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse date %q: %w", s, err)
|
||||
}
|
||||
return t.UnixNano(), nil
|
||||
}
|
||||
t, err := time.Parse(time.RFC3339, s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse timestamp %q: %w", s, err)
|
||||
}
|
||||
return t.UnixNano(), nil
|
||||
}
|
129
app/vlinsert/elasticsearch/elasticsearch_test.go
Normal file
129
app/vlinsert/elasticsearch/elasticsearch_test.go
Normal file
|
@ -0,0 +1,129 @@
|
|||
package elasticsearch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func TestReadBulkRequestFailure(t *testing.T) {
|
||||
f := func(data string) {
|
||||
t.Helper()
|
||||
|
||||
processLogMessage := func(timestamp int64, fields []logstorage.Field) {
|
||||
t.Fatalf("unexpected call to processLogMessage with timestamp=%d, fields=%s", timestamp, fields)
|
||||
}
|
||||
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, "_time", "_msg", processLogMessage)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-empty error")
|
||||
}
|
||||
if rows != 0 {
|
||||
t.Fatalf("unexpected non-zero rows=%d", rows)
|
||||
}
|
||||
}
|
||||
f("foobar")
|
||||
f(`{}`)
|
||||
f(`{"create":{}}`)
|
||||
f(`{"creat":{}}
|
||||
{}`)
|
||||
f(`{"create":{}}
|
||||
foobar`)
|
||||
}
|
||||
|
||||
func TestReadBulkRequestSuccess(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
var timestamps []int64
|
||||
var result string
|
||||
processLogMessage := func(timestamp int64, fields []logstorage.Field) {
|
||||
timestamps = append(timestamps, timestamp)
|
||||
|
||||
a := make([]string, len(fields))
|
||||
for i, f := range fields {
|
||||
a[i] = fmt.Sprintf("%q:%q", f.Name, f.Value)
|
||||
}
|
||||
s := "{" + strings.Join(a, ",") + "}\n"
|
||||
result += s
|
||||
}
|
||||
|
||||
// Read the request without compression
|
||||
r := bytes.NewBufferString(data)
|
||||
rows, err := readBulkRequest(r, false, timeField, msgField, processLogMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(timestamps, timestampsExpected) {
|
||||
t.Fatalf("unexpected timestamps;\ngot\n%d\nwant\n%d", timestamps, timestampsExpected)
|
||||
}
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
|
||||
// Read the request with compression
|
||||
timestamps = nil
|
||||
result = ""
|
||||
compressedData := compressData(data)
|
||||
r = bytes.NewBufferString(compressedData)
|
||||
rows, err = readBulkRequest(r, true, timeField, msgField, processLogMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(timestamps, timestampsExpected) {
|
||||
t.Fatalf("unexpected timestamps;\ngot\n%d\nwant\n%d", timestamps, timestampsExpected)
|
||||
}
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify an empty data
|
||||
f("", "_time", "_msg", 0, nil, "")
|
||||
f("\n", "_time", "_msg", 0, nil, "")
|
||||
f("\n\n", "_time", "_msg", 0, nil, "")
|
||||
|
||||
// Verify non-empty data
|
||||
data := `{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
|
||||
{"index":{"_index":"filebeat-8.8.0"}}
|
||||
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
|
||||
`
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1686026891735000000, 1686026892735000000, 1686026893735000000}
|
||||
resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"@timestamp":"","_msg":"baz"}
|
||||
{"_msg":"xyz","@timestamp":"","x":"y"}
|
||||
`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
}
|
||||
|
||||
func compressData(s string) string {
|
||||
var bb bytes.Buffer
|
||||
zw := gzip.NewWriter(&bb)
|
||||
if _, err := zw.Write([]byte(s)); err != nil {
|
||||
panic(fmt.Errorf("unexpected error when compressing data: %s", err))
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
panic(fmt.Errorf("unexpected error when closing gzip writer: %s", err))
|
||||
}
|
||||
return bb.String()
|
||||
}
|
50
app/vlinsert/elasticsearch/elasticsearch_timing_test.go
Normal file
50
app/vlinsert/elasticsearch/elasticsearch_timing_test.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package elasticsearch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func BenchmarkReadBulkRequest(b *testing.B) {
|
||||
b.Run("gzip:off", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, false)
|
||||
})
|
||||
b.Run("gzip:on", func(b *testing.B) {
|
||||
benchmarkReadBulkRequest(b, true)
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkReadBulkRequest(b *testing.B, isGzip bool) {
|
||||
data := `{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
|
||||
{"create":{"_index":"filebeat-8.8.0"}}
|
||||
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
|
||||
`
|
||||
if isGzip {
|
||||
data = compressData(data)
|
||||
}
|
||||
dataBytes := bytesutil.ToUnsafeBytes(data)
|
||||
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
processLogMessage := func(timestmap int64, fields []logstorage.Field) {}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(data)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
r := &bytes.Reader{}
|
||||
for pb.Next() {
|
||||
r.Reset(dataBytes)
|
||||
_, err := readBulkRequest(r, isGzip, timeField, msgField, processLogMessage)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("unexpected error: %s", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
91
app/vlinsert/insertutils/common_params.go
Normal file
91
app/vlinsert/insertutils/common_params.go
Normal file
|
@ -0,0 +1,91 @@
|
|||
package insertutils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// CommonParams contains common HTTP parameters used by log ingestion APIs.
|
||||
//
|
||||
// See https://docs.victoriametrics.com/VictoriaLogs/data-ingestion/#http-parameters
|
||||
type CommonParams struct {
|
||||
TenantID logstorage.TenantID
|
||||
TimeField string
|
||||
MsgField string
|
||||
StreamFields []string
|
||||
IgnoreFields []string
|
||||
|
||||
Debug bool
|
||||
DebugRequestURI string
|
||||
DebugRemoteAddr string
|
||||
}
|
||||
|
||||
// GetCommonParams returns CommonParams from r.
|
||||
func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract time field name from _time_field query arg
|
||||
var timeField = "_time"
|
||||
if tf := r.FormValue("_time_field"); tf != "" {
|
||||
timeField = tf
|
||||
}
|
||||
|
||||
// Extract message field name from _msg_field query arg
|
||||
var msgField = ""
|
||||
if msgf := r.FormValue("_msg_field"); msgf != "" {
|
||||
msgField = msgf
|
||||
}
|
||||
|
||||
streamFields := httputils.GetArray(r, "_stream_fields")
|
||||
ignoreFields := httputils.GetArray(r, "ignore_fields")
|
||||
|
||||
debug := httputils.GetBool(r, "debug")
|
||||
debugRequestURI := ""
|
||||
debugRemoteAddr := ""
|
||||
if debug {
|
||||
debugRequestURI = httpserver.GetRequestURI(r)
|
||||
debugRemoteAddr = httpserver.GetQuotedRemoteAddr(r)
|
||||
}
|
||||
|
||||
cp := &CommonParams{
|
||||
TenantID: tenantID,
|
||||
TimeField: timeField,
|
||||
MsgField: msgField,
|
||||
StreamFields: streamFields,
|
||||
IgnoreFields: ignoreFields,
|
||||
Debug: debug,
|
||||
DebugRequestURI: debugRequestURI,
|
||||
DebugRemoteAddr: debugRemoteAddr,
|
||||
}
|
||||
return cp, nil
|
||||
}
|
||||
|
||||
// GetProcessLogMessageFunc returns a function, which adds parsed log messages to lr.
|
||||
func (cp *CommonParams) GetProcessLogMessageFunc(lr *logstorage.LogRows) func(timestamp int64, fields []logstorage.Field) {
|
||||
return func(timestamp int64, fields []logstorage.Field) {
|
||||
lr.MustAdd(cp.TenantID, timestamp, fields)
|
||||
if cp.Debug {
|
||||
s := lr.GetRowString(0)
|
||||
lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` query arg: %s", cp.DebugRemoteAddr, cp.DebugRequestURI, s)
|
||||
rowsDroppedTotal.Inc()
|
||||
return
|
||||
}
|
||||
if lr.NeedFlush() {
|
||||
vlstorage.MustAddRows(lr)
|
||||
lr.ResetKeepSettings()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var rowsDroppedTotal = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
|
10
app/vlinsert/insertutils/flags.go
Normal file
10
app/vlinsert/insertutils/flags.go
Normal file
|
@ -0,0 +1,10 @@
|
|||
package insertutils
|
||||
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// MaxLineSizeBytes is the maximum length of a single line for /insert/* handlers
|
||||
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers")
|
||||
)
|
141
app/vlinsert/jsonline/jsonline.go
Normal file
141
app/vlinsert/jsonline/jsonline.go
Normal file
|
@ -0,0 +1,141 @@
|
|||
package jsonline
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logjson"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// RequestHandler processes jsonline insert requests
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
|
||||
if r.Method != "POST" {
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
return true
|
||||
}
|
||||
|
||||
requestsTotal.Inc()
|
||||
|
||||
cp, err := insertutils.GetCommonParams(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
|
||||
processLogMessage := cp.GetProcessLogMessageFunc(lr)
|
||||
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read gzipped _bulk request: %s", err)
|
||||
return true
|
||||
}
|
||||
defer common.PutGzipReader(zr)
|
||||
reader = zr
|
||||
}
|
||||
|
||||
wcr := writeconcurrencylimiter.GetReader(reader)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
|
||||
lb := lineBufferPool.Get()
|
||||
defer lineBufferPool.Put(lb)
|
||||
|
||||
lb.B = bytesutil.ResizeNoCopyNoOverallocate(lb.B, insertutils.MaxLineSizeBytes.IntN())
|
||||
sc := bufio.NewScanner(wcr)
|
||||
sc.Buffer(lb.B, len(lb.B))
|
||||
|
||||
n := 0
|
||||
for {
|
||||
ok, err := readLine(sc, cp.TimeField, cp.MsgField, processLogMessage)
|
||||
wcr.DecConcurrency()
|
||||
if err != nil {
|
||||
logger.Errorf("cannot read line #%d in /jsonline request: %s", n, err)
|
||||
break
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
n++
|
||||
rowsIngestedTotal.Inc()
|
||||
}
|
||||
|
||||
vlstorage.MustAddRows(lr)
|
||||
logstorage.PutLogRows(lr)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func readLine(sc *bufio.Scanner, timeField, msgField string, processLogMessage func(timestamp int64, fields []logstorage.Field)) (bool, error) {
|
||||
var line []byte
|
||||
for len(line) == 0 {
|
||||
if !sc.Scan() {
|
||||
if err := sc.Err(); err != nil {
|
||||
if errors.Is(err, bufio.ErrTooLong) {
|
||||
return false, fmt.Errorf(`cannot read json line, since its size exceeds -insert.maxLineSizeBytes=%d`, insertutils.MaxLineSizeBytes.IntN())
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
line = sc.Bytes()
|
||||
}
|
||||
|
||||
p := logjson.GetParser()
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
return false, fmt.Errorf("cannot parse json-encoded log entry: %w", err)
|
||||
}
|
||||
timestamp, err := extractTimestampFromFields(timeField, p.Fields)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cannot parse timestamp: %w", err)
|
||||
}
|
||||
p.RenameField(msgField, "_msg")
|
||||
processLogMessage(timestamp, p.Fields)
|
||||
logjson.PutParser(p)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func extractTimestampFromFields(timeField string, fields []logstorage.Field) (int64, error) {
|
||||
for i := range fields {
|
||||
f := &fields[i]
|
||||
if f.Name != timeField {
|
||||
continue
|
||||
}
|
||||
timestamp, err := parseISO8601Timestamp(f.Value)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.Value = ""
|
||||
return timestamp, nil
|
||||
}
|
||||
return time.Now().UnixNano(), nil
|
||||
}
|
||||
|
||||
func parseISO8601Timestamp(s string) (int64, error) {
|
||||
t, err := time.Parse(time.RFC3339, s)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse timestamp %q: %w", s, err)
|
||||
}
|
||||
return t.UnixNano(), nil
|
||||
}
|
||||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
var (
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="jsonline"}`)
|
||||
)
|
70
app/vlinsert/jsonline/jsonline_test.go
Normal file
70
app/vlinsert/jsonline/jsonline_test.go
Normal file
|
@ -0,0 +1,70 @@
|
|||
package jsonline
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReadBulkRequestSuccess(t *testing.T) {
|
||||
f := func(data, timeField, msgField string, rowsExpected int, timestampsExpected []int64, resultExpected string) {
|
||||
t.Helper()
|
||||
|
||||
var timestamps []int64
|
||||
var result string
|
||||
processLogMessage := func(timestamp int64, fields []logstorage.Field) {
|
||||
timestamps = append(timestamps, timestamp)
|
||||
|
||||
a := make([]string, len(fields))
|
||||
for i, f := range fields {
|
||||
a[i] = fmt.Sprintf("%q:%q", f.Name, f.Value)
|
||||
}
|
||||
s := "{" + strings.Join(a, ",") + "}\n"
|
||||
result += s
|
||||
}
|
||||
|
||||
// Read the request without compression
|
||||
r := bytes.NewBufferString(data)
|
||||
sc := bufio.NewScanner(r)
|
||||
rows := 0
|
||||
for {
|
||||
ok, err := readLine(sc, timeField, msgField, processLogMessage)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
rows++
|
||||
}
|
||||
if rows != rowsExpected {
|
||||
t.Fatalf("unexpected rows read; got %d; want %d", rows, rowsExpected)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(timestamps, timestampsExpected) {
|
||||
t.Fatalf("unexpected timestamps;\ngot\n%d\nwant\n%d", timestamps, timestampsExpected)
|
||||
}
|
||||
if result != resultExpected {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, resultExpected)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify non-empty data
|
||||
data := `{"@timestamp":"2023-06-06T04:48:11.735Z","log":{"offset":71770,"file":{"path":"/var/log/auth.log"}},"message":"foobar"}
|
||||
{"@timestamp":"2023-06-06T04:48:12.735Z","message":"baz"}
|
||||
{"message":"xyz","@timestamp":"2023-06-06T04:48:13.735Z","x":"y"}
|
||||
`
|
||||
timeField := "@timestamp"
|
||||
msgField := "message"
|
||||
rowsExpected := 3
|
||||
timestampsExpected := []int64{1686026891735000000, 1686026892735000000, 1686026893735000000}
|
||||
resultExpected := `{"@timestamp":"","log.offset":"71770","log.file.path":"/var/log/auth.log","_msg":"foobar"}
|
||||
{"@timestamp":"","_msg":"baz"}
|
||||
{"_msg":"xyz","@timestamp":"","x":"y"}
|
||||
`
|
||||
f(data, timeField, msgField, rowsExpected, timestampsExpected, resultExpected)
|
||||
}
|
39
app/vlinsert/main.go
Normal file
39
app/vlinsert/main.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package vlinsert
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/elasticsearch"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/jsonline"
|
||||
)
|
||||
|
||||
// Init initializes vlinsert
|
||||
func Init() {
|
||||
}
|
||||
|
||||
// Stop stops vlinsert
|
||||
func Stop() {
|
||||
}
|
||||
|
||||
// RequestHandler handles insert requests for VictoriaLogs
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
if !strings.HasPrefix(path, "/insert/") {
|
||||
// Skip requests, which do not start with /insert/, since these aren't our requests.
|
||||
return false
|
||||
}
|
||||
path = strings.TrimPrefix(path, "/insert")
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
||||
if path == "/jsonline" {
|
||||
return jsonline.RequestHandler(w, r)
|
||||
}
|
||||
switch {
|
||||
case strings.HasPrefix(path, "/elasticsearch/"):
|
||||
path = strings.TrimPrefix(path, "/elasticsearch")
|
||||
return elasticsearch.RequestHandler(path, w, r)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
56
app/vlselect/logsql/logsql.go
Normal file
56
app/vlselect/logsql/logsql.go
Normal file
|
@ -0,0 +1,56 @@
|
|||
package logsql
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
var (
|
||||
maxSortBufferSize = flagutil.NewBytes("select.maxSortBufferSize", 1024*1024, "Query results from /select/logsql/query are automatically sorted by _time "+
|
||||
"if their summary size doesn't exceed this value; otherwise query results are streamed in the response without sorting; "+
|
||||
"too big value for this flag may result in high memory usage, since the sorting is performed in memory")
|
||||
)
|
||||
|
||||
// ProcessQueryRequest handles /select/logsql/query request
|
||||
func ProcessQueryRequest(w http.ResponseWriter, r *http.Request, stopCh <-chan struct{}) {
|
||||
// Extract tenantID
|
||||
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return
|
||||
}
|
||||
|
||||
qStr := r.FormValue("query")
|
||||
q, err := logstorage.ParseQuery(qStr)
|
||||
if err != nil {
|
||||
httpserver.Errorf(w, r, "cannot parse query [%s]: %s", qStr, err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/stream+json; charset=utf-8")
|
||||
|
||||
sw := getSortWriter()
|
||||
sw.Init(w, maxSortBufferSize.IntN())
|
||||
tenantIDs := []logstorage.TenantID{tenantID}
|
||||
vlstorage.RunQuery(tenantIDs, q, stopCh, func(columns []logstorage.BlockColumn) {
|
||||
if len(columns) == 0 {
|
||||
return
|
||||
}
|
||||
rowsCount := len(columns[0].Values)
|
||||
|
||||
bb := blockResultPool.Get()
|
||||
for rowIdx := 0; rowIdx < rowsCount; rowIdx++ {
|
||||
WriteJSONRow(bb, columns, rowIdx)
|
||||
}
|
||||
sw.MustWrite(bb.B)
|
||||
blockResultPool.Put(bb)
|
||||
})
|
||||
sw.FinalFlush()
|
||||
putSortWriter(sw)
|
||||
}
|
||||
|
||||
var blockResultPool bytesutil.ByteBufferPool
|
41
app/vlselect/logsql/query_response.qtpl
Normal file
41
app/vlselect/logsql/query_response.qtpl
Normal file
|
@ -0,0 +1,41 @@
|
|||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
// JSONRow creates JSON row from the given fields.
|
||||
{% func JSONRow(columns []logstorage.BlockColumn, rowIdx int) %}
|
||||
{
|
||||
{% code c := &columns[0] %}
|
||||
{%q= c.Name %}:{%q= c.Values[rowIdx] %}
|
||||
{% code columns = columns[1:] %}
|
||||
{% for colIdx := range columns %}
|
||||
{% code c := &columns[colIdx] %}
|
||||
,{%q= c.Name %}:{%q= c.Values[rowIdx] %}
|
||||
{% endfor %}
|
||||
}{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
// JSONRows prints formatted rows
|
||||
{% func JSONRows(rows [][]logstorage.Field) %}
|
||||
{% if len(rows) == 0 %}
|
||||
{% return %}
|
||||
{% endif %}
|
||||
{% for _, fields := range rows %}
|
||||
{
|
||||
{% if len(fields) > 0 %}
|
||||
{% code
|
||||
f := fields[0]
|
||||
fields = fields[1:]
|
||||
%}
|
||||
{%q= f.Name %}:{%q= f.Value %}
|
||||
{% for _, f := range fields %}
|
||||
,{%q= f.Name %}:{%q= f.Value %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
}{% newline %}
|
||||
{% endfor %}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
166
app/vlselect/logsql/query_response.qtpl.go
Normal file
166
app/vlselect/logsql/query_response.qtpl.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
// Code generated by qtc from "query_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:1
|
||||
package logsql
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
// JSONRow creates JSON row from the given fields.
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:8
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:8
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:8
|
||||
func StreamJSONRow(qw422016 *qt422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
|
||||
//line app/vlselect/logsql/query_response.qtpl:8
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:10
|
||||
c := &columns[0]
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:11
|
||||
qw422016.N().Q(c.Name)
|
||||
//line app/vlselect/logsql/query_response.qtpl:11
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:11
|
||||
qw422016.N().Q(c.Values[rowIdx])
|
||||
//line app/vlselect/logsql/query_response.qtpl:12
|
||||
columns = columns[1:]
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:13
|
||||
for colIdx := range columns {
|
||||
//line app/vlselect/logsql/query_response.qtpl:14
|
||||
c := &columns[colIdx]
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:14
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:15
|
||||
qw422016.N().Q(c.Name)
|
||||
//line app/vlselect/logsql/query_response.qtpl:15
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:15
|
||||
qw422016.N().Q(c.Values[rowIdx])
|
||||
//line app/vlselect/logsql/query_response.qtpl:16
|
||||
}
|
||||
//line app/vlselect/logsql/query_response.qtpl:16
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:17
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
func WriteJSONRow(qq422016 qtio422016.Writer, columns []logstorage.BlockColumn, rowIdx int) {
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
StreamJSONRow(qw422016, columns, rowIdx)
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
func JSONRow(columns []logstorage.BlockColumn, rowIdx int) string {
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
WriteJSONRow(qb422016, columns, rowIdx)
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/query_response.qtpl:18
|
||||
}
|
||||
|
||||
// JSONRows prints formatted rows
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:21
|
||||
func StreamJSONRows(qw422016 *qt422016.Writer, rows [][]logstorage.Field) {
|
||||
//line app/vlselect/logsql/query_response.qtpl:22
|
||||
if len(rows) == 0 {
|
||||
//line app/vlselect/logsql/query_response.qtpl:23
|
||||
return
|
||||
//line app/vlselect/logsql/query_response.qtpl:24
|
||||
}
|
||||
//line app/vlselect/logsql/query_response.qtpl:25
|
||||
for _, fields := range rows {
|
||||
//line app/vlselect/logsql/query_response.qtpl:25
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:27
|
||||
if len(fields) > 0 {
|
||||
//line app/vlselect/logsql/query_response.qtpl:29
|
||||
f := fields[0]
|
||||
fields = fields[1:]
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:32
|
||||
qw422016.N().Q(f.Name)
|
||||
//line app/vlselect/logsql/query_response.qtpl:32
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:32
|
||||
qw422016.N().Q(f.Value)
|
||||
//line app/vlselect/logsql/query_response.qtpl:33
|
||||
for _, f := range fields {
|
||||
//line app/vlselect/logsql/query_response.qtpl:33
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:34
|
||||
qw422016.N().Q(f.Name)
|
||||
//line app/vlselect/logsql/query_response.qtpl:34
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:34
|
||||
qw422016.N().Q(f.Value)
|
||||
//line app/vlselect/logsql/query_response.qtpl:35
|
||||
}
|
||||
//line app/vlselect/logsql/query_response.qtpl:36
|
||||
}
|
||||
//line app/vlselect/logsql/query_response.qtpl:36
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:37
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vlselect/logsql/query_response.qtpl:38
|
||||
}
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
func WriteJSONRows(qq422016 qtio422016.Writer, rows [][]logstorage.Field) {
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
StreamJSONRows(qw422016, rows)
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
}
|
||||
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
func JSONRows(rows [][]logstorage.Field) string {
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
WriteJSONRows(qb422016, rows)
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
return qs422016
|
||||
//line app/vlselect/logsql/query_response.qtpl:39
|
||||
}
|
225
app/vlselect/logsql/sort_writer.go
Normal file
225
app/vlselect/logsql/sort_writer.go
Normal file
|
@ -0,0 +1,225 @@
|
|||
package logsql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logjson"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
func getSortWriter() *sortWriter {
|
||||
v := sortWriterPool.Get()
|
||||
if v == nil {
|
||||
return &sortWriter{}
|
||||
}
|
||||
return v.(*sortWriter)
|
||||
}
|
||||
|
||||
func putSortWriter(sw *sortWriter) {
|
||||
sw.reset()
|
||||
sortWriterPool.Put(sw)
|
||||
}
|
||||
|
||||
var sortWriterPool sync.Pool
|
||||
|
||||
// sortWriter expects JSON line stream to be written to it.
|
||||
//
|
||||
// It buffers the incoming data until its size reaches maxBufLen.
|
||||
// Then it streams the buffered data and all the incoming data to w.
|
||||
//
|
||||
// The FinalFlush() must be called when all the data is written.
|
||||
// If the buf isn't empty at FinalFlush() call, then the buffered data
|
||||
// is sorted by _time field.
|
||||
type sortWriter struct {
|
||||
mu sync.Mutex
|
||||
w io.Writer
|
||||
maxBufLen int
|
||||
buf []byte
|
||||
bufFlushed bool
|
||||
|
||||
hasErr bool
|
||||
}
|
||||
|
||||
func (sw *sortWriter) reset() {
|
||||
sw.w = nil
|
||||
sw.maxBufLen = 0
|
||||
sw.buf = sw.buf[:0]
|
||||
sw.bufFlushed = false
|
||||
sw.hasErr = false
|
||||
}
|
||||
|
||||
func (sw *sortWriter) Init(w io.Writer, maxBufLen int) {
|
||||
sw.reset()
|
||||
|
||||
sw.w = w
|
||||
sw.maxBufLen = maxBufLen
|
||||
}
|
||||
|
||||
func (sw *sortWriter) MustWrite(p []byte) {
|
||||
sw.mu.Lock()
|
||||
defer sw.mu.Unlock()
|
||||
|
||||
if sw.hasErr {
|
||||
return
|
||||
}
|
||||
|
||||
if sw.bufFlushed {
|
||||
if _, err := sw.w.Write(p); err != nil {
|
||||
sw.hasErr = true
|
||||
}
|
||||
return
|
||||
}
|
||||
if len(sw.buf)+len(p) < sw.maxBufLen {
|
||||
sw.buf = append(sw.buf, p...)
|
||||
return
|
||||
}
|
||||
sw.bufFlushed = true
|
||||
if len(sw.buf) > 0 {
|
||||
if _, err := sw.w.Write(sw.buf); err != nil {
|
||||
sw.hasErr = true
|
||||
return
|
||||
}
|
||||
sw.buf = sw.buf[:0]
|
||||
}
|
||||
if _, err := sw.w.Write(p); err != nil {
|
||||
sw.hasErr = true
|
||||
}
|
||||
}
|
||||
|
||||
func (sw *sortWriter) FinalFlush() {
|
||||
if sw.hasErr || sw.bufFlushed {
|
||||
return
|
||||
}
|
||||
rs := getRowsSorter()
|
||||
rs.parseRows(sw.buf)
|
||||
rs.sort()
|
||||
WriteJSONRows(sw.w, rs.rows)
|
||||
putRowsSorter(rs)
|
||||
}
|
||||
|
||||
func getRowsSorter() *rowsSorter {
|
||||
v := rowsSorterPool.Get()
|
||||
if v == nil {
|
||||
return &rowsSorter{}
|
||||
}
|
||||
return v.(*rowsSorter)
|
||||
}
|
||||
|
||||
func putRowsSorter(rs *rowsSorter) {
|
||||
rs.reset()
|
||||
rowsSorterPool.Put(rs)
|
||||
}
|
||||
|
||||
var rowsSorterPool sync.Pool
|
||||
|
||||
type rowsSorter struct {
|
||||
buf []byte
|
||||
fieldsBuf []logstorage.Field
|
||||
rows [][]logstorage.Field
|
||||
times []string
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) reset() {
|
||||
rs.buf = rs.buf[:0]
|
||||
|
||||
fieldsBuf := rs.fieldsBuf
|
||||
for i := range fieldsBuf {
|
||||
fieldsBuf[i].Reset()
|
||||
}
|
||||
rs.fieldsBuf = fieldsBuf[:0]
|
||||
|
||||
rows := rs.rows
|
||||
for i := range rows {
|
||||
rows[i] = nil
|
||||
}
|
||||
rs.rows = rows[:0]
|
||||
|
||||
times := rs.times
|
||||
for i := range times {
|
||||
times[i] = ""
|
||||
}
|
||||
rs.times = times[:0]
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) parseRows(src []byte) {
|
||||
rs.reset()
|
||||
|
||||
buf := rs.buf
|
||||
fieldsBuf := rs.fieldsBuf
|
||||
rows := rs.rows
|
||||
times := rs.times
|
||||
|
||||
p := logjson.GetParser()
|
||||
for len(src) > 0 {
|
||||
var line []byte
|
||||
n := bytes.IndexByte(src, '\n')
|
||||
if n < 0 {
|
||||
line = src
|
||||
src = nil
|
||||
} else {
|
||||
line = src[:n]
|
||||
src = src[n+1:]
|
||||
}
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := p.ParseLogMessage(line); err != nil {
|
||||
logger.Panicf("BUG: unexpected invalid JSON line: %s", err)
|
||||
}
|
||||
|
||||
timeValue := ""
|
||||
fieldsBufLen := len(fieldsBuf)
|
||||
for _, f := range p.Fields {
|
||||
bufLen := len(buf)
|
||||
buf = append(buf, f.Name...)
|
||||
name := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
|
||||
bufLen = len(buf)
|
||||
buf = append(buf, f.Value...)
|
||||
value := bytesutil.ToUnsafeString(buf[bufLen:])
|
||||
|
||||
fieldsBuf = append(fieldsBuf, logstorage.Field{
|
||||
Name: name,
|
||||
Value: value,
|
||||
})
|
||||
|
||||
if name == "_time" {
|
||||
timeValue = value
|
||||
}
|
||||
}
|
||||
rows = append(rows, fieldsBuf[fieldsBufLen:])
|
||||
times = append(times, timeValue)
|
||||
}
|
||||
logjson.PutParser(p)
|
||||
|
||||
rs.buf = buf
|
||||
rs.fieldsBuf = fieldsBuf
|
||||
rs.rows = rows
|
||||
rs.times = times
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) Len() int {
|
||||
return len(rs.rows)
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) Less(i, j int) bool {
|
||||
times := rs.times
|
||||
return times[i] < times[j]
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) Swap(i, j int) {
|
||||
times := rs.times
|
||||
rows := rs.rows
|
||||
times[i], times[j] = times[j], times[i]
|
||||
rows[i], rows[j] = rows[j], rows[i]
|
||||
}
|
||||
|
||||
func (rs *rowsSorter) sort() {
|
||||
sort.Sort(rs)
|
||||
}
|
39
app/vlselect/logsql/sort_writer_test.go
Normal file
39
app/vlselect/logsql/sort_writer_test.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package logsql
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSortWriter(t *testing.T) {
|
||||
f := func(maxBufLen int, data string, expectedResult string) {
|
||||
t.Helper()
|
||||
|
||||
var bb bytes.Buffer
|
||||
sw := getSortWriter()
|
||||
sw.Init(&bb, maxBufLen)
|
||||
|
||||
for _, s := range strings.Split(data, "\n") {
|
||||
sw.MustWrite([]byte(s + "\n"))
|
||||
}
|
||||
sw.FinalFlush()
|
||||
putSortWriter(sw)
|
||||
|
||||
result := bb.String()
|
||||
if result != expectedResult {
|
||||
t.Fatalf("unexpected result;\ngot\n%s\nwant\n%s", result, expectedResult)
|
||||
}
|
||||
}
|
||||
|
||||
f(100, "", "")
|
||||
f(100, "{}", "{}\n")
|
||||
|
||||
data := `{"_time":"def","_msg":"xxx"}
|
||||
{"_time":"abc","_msg":"foo"}`
|
||||
resultExpected := `{"_time":"abc","_msg":"foo"}
|
||||
{"_time":"def","_msg":"xxx"}
|
||||
`
|
||||
f(100, data, resultExpected)
|
||||
f(10, data, data+"\n")
|
||||
}
|
162
app/vlselect/main.go
Normal file
162
app/vlselect/main.go
Normal file
|
@ -0,0 +1,162 @@
|
|||
package vlselect
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlselect/logsql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", getDefaultMaxConcurrentRequests(), "The maximum number of concurrent search requests. "+
|
||||
"It shouldn't be high, since a single request can saturate all the CPU cores, while many concurrently executed requests may require high amounts of memory. "+
|
||||
"See also -search.maxQueueDuration")
|
||||
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the search request waits for execution when -search.maxConcurrentRequests "+
|
||||
"limit is reached; see also -search.maxQueryDuration")
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution")
|
||||
)
|
||||
|
||||
func getDefaultMaxConcurrentRequests() int {
|
||||
n := cgroup.AvailableCPUs()
|
||||
if n <= 4 {
|
||||
n *= 2
|
||||
}
|
||||
if n > 16 {
|
||||
// A single request can saturate all the CPU cores, so there is no sense
|
||||
// in allowing higher number of concurrent requests - they will just contend
|
||||
// for unavailable CPU time.
|
||||
n = 16
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Init initializes vlselect
|
||||
func Init() {
|
||||
concurrencyLimitCh = make(chan struct{}, *maxConcurrentRequests)
|
||||
}
|
||||
|
||||
// Stop stops vlselect
|
||||
func Stop() {
|
||||
}
|
||||
|
||||
var concurrencyLimitCh chan struct{}
|
||||
|
||||
var (
|
||||
concurrencyLimitReached = metrics.NewCounter(`vl_concurrent_select_limit_reached_total`)
|
||||
concurrencyLimitTimeout = metrics.NewCounter(`vl_concurrent_select_limit_timeout_total`)
|
||||
|
||||
_ = metrics.NewGauge(`vl_concurrent_select_capacity`, func() float64 {
|
||||
return float64(cap(concurrencyLimitCh))
|
||||
})
|
||||
_ = metrics.NewGauge(`vl_concurrent_select_current`, func() float64 {
|
||||
return float64(len(concurrencyLimitCh))
|
||||
})
|
||||
)
|
||||
|
||||
//go:embed vmui
|
||||
var vmuiFiles embed.FS
|
||||
|
||||
var vmuiFileServer = http.FileServer(http.FS(vmuiFiles))
|
||||
|
||||
// RequestHandler handles select requests for VictoriaLogs
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
if !strings.HasPrefix(path, "/select/") {
|
||||
// Skip requests, which do not start with /select/, since these aren't our requests.
|
||||
return false
|
||||
}
|
||||
path = strings.TrimPrefix(path, "/select")
|
||||
path = strings.ReplaceAll(path, "//", "/")
|
||||
|
||||
if path == "/vmui" {
|
||||
// VMUI access via incomplete url without `/` in the end. Redirect to complete url.
|
||||
// Use relative redirect, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
_ = r.ParseForm()
|
||||
newURL := "vmui/?" + r.Form.Encode()
|
||||
httpserver.Redirect(w, newURL)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmui/") {
|
||||
r.URL.Path = path
|
||||
vmuiFileServer.ServeHTTP(w, r)
|
||||
return true
|
||||
}
|
||||
|
||||
// Limit the number of concurrent queries, which can consume big amounts of CPU.
|
||||
startTime := time.Now()
|
||||
stopCh := r.Context().Done()
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
defer func() { <-concurrencyLimitCh }()
|
||||
default:
|
||||
// Sleep for a while until giving up. This should resolve short bursts in requests.
|
||||
concurrencyLimitReached.Inc()
|
||||
d := getMaxQueryDuration(r)
|
||||
if d > *maxQueueDuration {
|
||||
d = *maxQueueDuration
|
||||
}
|
||||
t := timerpool.Get(d)
|
||||
select {
|
||||
case concurrencyLimitCh <- struct{}{}:
|
||||
timerpool.Put(t)
|
||||
defer func() { <-concurrencyLimitCh }()
|
||||
case <-stopCh:
|
||||
timerpool.Put(t)
|
||||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("client has cancelled the request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
||||
time.Since(startTime).Seconds(), remoteAddr, requestURI)
|
||||
return true
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
concurrencyLimitTimeout.Inc()
|
||||
err := &httpserver.ErrorWithStatusCode{
|
||||
Err: fmt.Errorf("couldn't start executing the request in %.3f seconds, since -search.maxConcurrentRequests=%d concurrent requests "+
|
||||
"are executed. Possible solutions: to reduce query load; to add more compute resources to the server; "+
|
||||
"to increase -search.maxQueueDuration=%s; to increase -search.maxQueryDuration; to increase -search.maxConcurrentRequests",
|
||||
d.Seconds(), *maxConcurrentRequests, maxQueueDuration),
|
||||
StatusCode: http.StatusServiceUnavailable,
|
||||
}
|
||||
httpserver.Errorf(w, r, "%s", err)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case path == "/logsql/query":
|
||||
logsqlQueryRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
logsql.ProcessQueryRequest(w, r, stopCh)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// getMaxQueryDuration returns the maximum duration for query from r.
|
||||
func getMaxQueryDuration(r *http.Request) time.Duration {
|
||||
dms, err := httputils.GetDuration(r, "timeout", 0)
|
||||
if err != nil {
|
||||
dms = 0
|
||||
}
|
||||
d := time.Duration(dms) * time.Millisecond
|
||||
if d <= 0 || d > *maxQueryDuration {
|
||||
d = *maxQueryDuration
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
var (
|
||||
logsqlQueryRequests = metrics.NewCounter(`vl_http_requests_total{path="/select/logsql/query"}`)
|
||||
)
|
BIN
app/vlselect/vmui/apple-touch-icon.png
Normal file
BIN
app/vlselect/vmui/apple-touch-icon.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
14
app/vlselect/vmui/asset-manifest.json
Normal file
14
app/vlselect/vmui/asset-manifest.json
Normal file
|
@ -0,0 +1,14 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.f5cb3747.css",
|
||||
"main.js": "./static/js/main.46d11611.js",
|
||||
"static/js/27.c1ccfd29.chunk.js": "./static/js/27.c1ccfd29.chunk.js",
|
||||
"static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf",
|
||||
"static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf",
|
||||
"index.html": "./index.html"
|
||||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.f5cb3747.css",
|
||||
"static/js/main.46d11611.js"
|
||||
]
|
||||
}
|
BIN
app/vlselect/vmui/favicon-32x32.png
Normal file
BIN
app/vlselect/vmui/favicon-32x32.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.6 KiB |
BIN
app/vlselect/vmui/favicon.ico
Normal file
BIN
app/vlselect/vmui/favicon.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 15 KiB |
1
app/vlselect/vmui/index.html
Normal file
1
app/vlselect/vmui/index.html
Normal file
|
@ -0,0 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.46d11611.js"></script><link href="./static/css/main.f5cb3747.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
20
app/vlselect/vmui/manifest.json
Normal file
20
app/vlselect/vmui/manifest.json
Normal file
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"short_name": "Victoria Metrics UI",
|
||||
"name": "Victoria Metrics UI is a metric explorer for Victoria Metrics",
|
||||
"icons": [
|
||||
{
|
||||
"src": "favicon-32x32.png",
|
||||
"sizes": "32x32",
|
||||
"type": "image/png"
|
||||
},
|
||||
{
|
||||
"src": "apple-touch-icon.png",
|
||||
"type": "image/png",
|
||||
"sizes": "192x192"
|
||||
}
|
||||
],
|
||||
"start_url": ".",
|
||||
"display": "standalone",
|
||||
"theme_color": "#000000",
|
||||
"background_color": "#ffffff"
|
||||
}
|
BIN
app/vlselect/vmui/preview.jpg
Normal file
BIN
app/vlselect/vmui/preview.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 67 KiB |
3
app/vlselect/vmui/robots.txt
Normal file
3
app/vlselect/vmui/robots.txt
Normal file
|
@ -0,0 +1,3 @@
|
|||
# https://www.robotstxt.org/robotstxt.html
|
||||
User-agent: *
|
||||
Disallow:
|
1
app/vlselect/vmui/static/css/main.f5cb3747.css
Normal file
1
app/vlselect/vmui/static/css/main.f5cb3747.css
Normal file
File diff suppressed because one or more lines are too long
1
app/vlselect/vmui/static/js/27.c1ccfd29.chunk.js
Normal file
1
app/vlselect/vmui/static/js/27.c1ccfd29.chunk.js
Normal file
|
@ -0,0 +1 @@
|
|||
"use strict";(self.webpackChunkvmui=self.webpackChunkvmui||[]).push([[27],{27:function(e,t,n){n.r(t),n.d(t,{getCLS:function(){return y},getFCP:function(){return g},getFID:function(){return C},getLCP:function(){return P},getTTFB:function(){return D}});var i,r,a,o,u=function(e,t){return{name:e,value:void 0===t?-1:t,delta:0,entries:[],id:"v2-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)}},c=function(e,t){try{if(PerformanceObserver.supportedEntryTypes.includes(e)){if("first-input"===e&&!("PerformanceEventTiming"in self))return;var n=new PerformanceObserver((function(e){return e.getEntries().map(t)}));return n.observe({type:e,buffered:!0}),n}}catch(e){}},f=function(e,t){var n=function n(i){"pagehide"!==i.type&&"hidden"!==document.visibilityState||(e(i),t&&(removeEventListener("visibilitychange",n,!0),removeEventListener("pagehide",n,!0)))};addEventListener("visibilitychange",n,!0),addEventListener("pagehide",n,!0)},s=function(e){addEventListener("pageshow",(function(t){t.persisted&&e(t)}),!0)},m=function(e,t,n){var i;return function(r){t.value>=0&&(r||n)&&(t.delta=t.value-(i||0),(t.delta||void 0===i)&&(i=t.value,e(t)))}},v=-1,p=function(){return"hidden"===document.visibilityState?0:1/0},d=function(){f((function(e){var t=e.timeStamp;v=t}),!0)},l=function(){return v<0&&(v=p(),d(),s((function(){setTimeout((function(){v=p(),d()}),0)}))),{get firstHiddenTime(){return v}}},g=function(e,t){var n,i=l(),r=u("FCP"),a=function(e){"first-contentful-paint"===e.name&&(f&&f.disconnect(),e.startTime<i.firstHiddenTime&&(r.value=e.startTime,r.entries.push(e),n(!0)))},o=window.performance&&performance.getEntriesByName&&performance.getEntriesByName("first-contentful-paint")[0],f=o?null:c("paint",a);(o||f)&&(n=m(e,r,t),o&&a(o),s((function(i){r=u("FCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,n(!0)}))}))})))},h=!1,T=-1,y=function(e,t){h||(g((function(e){T=e.value})),h=!0);var n,i=function(t){T>-1&&e(t)},r=u("CLS",0),a=0,o=[],v=function(e){if(!e.hadRecentInput){var t=o[0],i=o[o.length-1];a&&e.startTime-i.startTime<1e3&&e.startTime-t.startTime<5e3?(a+=e.value,o.push(e)):(a=e.value,o=[e]),a>r.value&&(r.value=a,r.entries=o,n())}},p=c("layout-shift",v);p&&(n=m(i,r,t),f((function(){p.takeRecords().map(v),n(!0)})),s((function(){a=0,T=-1,r=u("CLS",0),n=m(i,r,t)})))},E={passive:!0,capture:!0},w=new Date,L=function(e,t){i||(i=t,r=e,a=new Date,F(removeEventListener),S())},S=function(){if(r>=0&&r<a-w){var e={entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+r};o.forEach((function(t){t(e)})),o=[]}},b=function(e){if(e.cancelable){var t=(e.timeStamp>1e12?new Date:performance.now())-e.timeStamp;"pointerdown"==e.type?function(e,t){var n=function(){L(e,t),r()},i=function(){r()},r=function(){removeEventListener("pointerup",n,E),removeEventListener("pointercancel",i,E)};addEventListener("pointerup",n,E),addEventListener("pointercancel",i,E)}(t,e):L(t,e)}},F=function(e){["mousedown","keydown","touchstart","pointerdown"].forEach((function(t){return e(t,b,E)}))},C=function(e,t){var n,a=l(),v=u("FID"),p=function(e){e.startTime<a.firstHiddenTime&&(v.value=e.processingStart-e.startTime,v.entries.push(e),n(!0))},d=c("first-input",p);n=m(e,v,t),d&&f((function(){d.takeRecords().map(p),d.disconnect()}),!0),d&&s((function(){var a;v=u("FID"),n=m(e,v,t),o=[],r=-1,i=null,F(addEventListener),a=p,o.push(a),S()}))},k={},P=function(e,t){var n,i=l(),r=u("LCP"),a=function(e){var t=e.startTime;t<i.firstHiddenTime&&(r.value=t,r.entries.push(e),n())},o=c("largest-contentful-paint",a);if(o){n=m(e,r,t);var v=function(){k[r.id]||(o.takeRecords().map(a),o.disconnect(),k[r.id]=!0,n(!0))};["keydown","click"].forEach((function(e){addEventListener(e,v,{once:!0,capture:!0})})),f(v,!0),s((function(i){r=u("LCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,k[r.id]=!0,n(!0)}))}))}))}},D=function(e){var t,n=u("TTFB");t=function(){try{var t=performance.getEntriesByType("navigation")[0]||function(){var e=performance.timing,t={entryType:"navigation",startTime:0};for(var n in e)"navigationStart"!==n&&"toJSON"!==n&&(t[n]=Math.max(e[n]-e.navigationStart,0));return t}();if(n.value=n.delta=t.responseStart,n.value<0||n.value>performance.now())return;n.entries=[t],e(n)}catch(e){}},"complete"===document.readyState?setTimeout(t,0):addEventListener("load",(function(){return setTimeout(t,0)}))}}}]);
|
2
app/vlselect/vmui/static/js/main.46d11611.js
Normal file
2
app/vlselect/vmui/static/js/main.46d11611.js
Normal file
File diff suppressed because one or more lines are too long
40
app/vlselect/vmui/static/js/main.46d11611.js.LICENSE.txt
Normal file
40
app/vlselect/vmui/static/js/main.46d11611.js.LICENSE.txt
Normal file
|
@ -0,0 +1,40 @@
|
|||
/*!
|
||||
Copyright (c) 2018 Jed Watson.
|
||||
Licensed under the MIT License (MIT), see
|
||||
http://jedwatson.github.io/classnames
|
||||
*/
|
||||
|
||||
/*! regenerator-runtime -- Copyright (c) 2014-present, Facebook, Inc. -- license (MIT): https://github.com/facebook/regenerator/blob/main/LICENSE */
|
||||
|
||||
/**
|
||||
* @remix-run/router v1.5.0
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE.md file in the root directory of this source tree.
|
||||
*
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router DOM v6.10.0
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE.md file in the root directory of this source tree.
|
||||
*
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router v6.10.0
|
||||
*
|
||||
* Copyright (c) Remix Software Inc.
|
||||
*
|
||||
* This source code is licensed under the MIT license found in the
|
||||
* LICENSE.md file in the root directory of this source tree.
|
||||
*
|
||||
* @license MIT
|
||||
*/
|
Binary file not shown.
Binary file not shown.
170
app/vlstorage/main.go
Normal file
170
app/vlstorage/main.go
Normal file
|
@ -0,0 +1,170 @@
|
|||
package vlstorage
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
retentionPeriod = flagutil.NewDuration("retentionPeriod", "7d", "Log entries with timestamps older than now-retentionPeriod are automatically deleted; "+
|
||||
"log entries with timestamps outside the retention are also rejected during data ingestion; the minimum supported retention is 1d (one day); "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/#retention")
|
||||
futureRetention = flagutil.NewDuration("futureRetention", "2d", "Log entries with timestamps bigger than now+futureRetention are rejected during data ingestion; "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/#retention")
|
||||
storageDataPath = flag.String("storageDataPath", "victoria-logs-data", "Path to directory with the VictoriaLogs data; "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/#storage")
|
||||
inmemoryDataFlushInterval = flag.Duration("inmemoryDataFlushInterval", 5*time.Second, "The interval for guaranteed saving of in-memory data to disk. "+
|
||||
"The saved data survives unclean shutdown such as OOM crash, hardware reset, SIGKILL, etc. "+
|
||||
"Bigger intervals may help increasing lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+
|
||||
"Smaller intervals increase disk IO load. Minimum supported value is 1s")
|
||||
logNewStreams = flag.Bool("logNewStreams", false, "Whether to log creation of new streams; this can be useful for debugging of high cardinality issues with log streams; "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#stream-fields ; see also -logIngestedRows")
|
||||
logIngestedRows = flag.Bool("logIngestedRows", false, "Whether to log all the ingested log entries; this can be useful for debugging of data ingestion; "+
|
||||
"see https://docs.victoriametrics.com/VictoriaLogs/data-ingestion/ ; see also -logNewStreams")
|
||||
)
|
||||
|
||||
// Init initializes vlstorage.
|
||||
//
|
||||
// Stop must be called when vlstorage is no longer needed
|
||||
func Init() {
|
||||
if strg != nil {
|
||||
logger.Panicf("BUG: Init() has been already called")
|
||||
}
|
||||
|
||||
if retentionPeriod.Msecs < 24*3600*1000 {
|
||||
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod)
|
||||
}
|
||||
cfg := &logstorage.StorageConfig{
|
||||
Retention: time.Millisecond * time.Duration(retentionPeriod.Msecs),
|
||||
FlushInterval: *inmemoryDataFlushInterval,
|
||||
FutureRetention: time.Millisecond * time.Duration(futureRetention.Msecs),
|
||||
LogNewStreams: *logNewStreams,
|
||||
LogIngestedRows: *logIngestedRows,
|
||||
}
|
||||
logger.Infof("opening storage at -storageDataPath=%s", *storageDataPath)
|
||||
startTime := time.Now()
|
||||
strg = logstorage.MustOpenStorage(*storageDataPath, cfg)
|
||||
|
||||
var ss logstorage.StorageStats
|
||||
strg.UpdateStats(&ss)
|
||||
logger.Infof("successfully opened storage in %.3f seconds; partsCount: %d; blocksCount: %d; rowsCount: %d; sizeBytes: %d",
|
||||
time.Since(startTime).Seconds(), ss.FileParts, ss.FileBlocks, ss.FileRowsCount, ss.CompressedFileSize)
|
||||
storageMetrics = initStorageMetrics(strg)
|
||||
|
||||
metrics.RegisterSet(storageMetrics)
|
||||
}
|
||||
|
||||
// Stop stops vlstorage.
|
||||
func Stop() {
|
||||
metrics.UnregisterSet(storageMetrics)
|
||||
storageMetrics = nil
|
||||
|
||||
strg.MustClose()
|
||||
strg = nil
|
||||
}
|
||||
|
||||
var strg *logstorage.Storage
|
||||
var storageMetrics *metrics.Set
|
||||
|
||||
// MustAddRows adds lr to vlstorage
|
||||
func MustAddRows(lr *logstorage.LogRows) {
|
||||
strg.MustAddRows(lr)
|
||||
}
|
||||
|
||||
// RunQuery runs the given q and calls processBlock for the returned data blocks
|
||||
func RunQuery(tenantIDs []logstorage.TenantID, q *logstorage.Query, stopCh <-chan struct{}, processBlock func(columns []logstorage.BlockColumn)) {
|
||||
strg.RunQuery(tenantIDs, q, stopCh, processBlock)
|
||||
}
|
||||
|
||||
func initStorageMetrics(strg *logstorage.Storage) *metrics.Set {
|
||||
ssCache := &logstorage.StorageStats{}
|
||||
var ssCacheLock sync.Mutex
|
||||
var lastUpdateTime time.Time
|
||||
|
||||
m := func() *logstorage.StorageStats {
|
||||
ssCacheLock.Lock()
|
||||
defer ssCacheLock.Unlock()
|
||||
if time.Since(lastUpdateTime) < time.Second {
|
||||
return ssCache
|
||||
}
|
||||
var ss logstorage.StorageStats
|
||||
strg.UpdateStats(&ss)
|
||||
ssCache = &ss
|
||||
lastUpdateTime = time.Now()
|
||||
return ssCache
|
||||
}
|
||||
|
||||
ms := metrics.NewSet()
|
||||
|
||||
ms.NewGauge(fmt.Sprintf(`vl_free_disk_space_bytes{path=%q}`, *storageDataPath), func() float64 {
|
||||
return float64(fs.MustGetFreeSpace(*storageDataPath))
|
||||
})
|
||||
|
||||
ms.NewGauge(`vl_active_merges{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryActiveMerges)
|
||||
})
|
||||
ms.NewGauge(`vl_merges_total{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryMergesTotal)
|
||||
})
|
||||
ms.NewGauge(`vl_active_merges{type="file"}`, func() float64 {
|
||||
return float64(m().FileActiveMerges)
|
||||
})
|
||||
ms.NewGauge(`vl_merges_total{type="file"}`, func() float64 {
|
||||
return float64(m().FileMergesTotal)
|
||||
})
|
||||
|
||||
ms.NewGauge(`vl_rows{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryRowsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_rows{type="file"}`, func() float64 {
|
||||
return float64(m().FileRowsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_parts{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryParts)
|
||||
})
|
||||
ms.NewGauge(`vl_parts{type="file"}`, func() float64 {
|
||||
return float64(m().FileParts)
|
||||
})
|
||||
ms.NewGauge(`vl_blocks{type="inmemory"}`, func() float64 {
|
||||
return float64(m().InmemoryBlocks)
|
||||
})
|
||||
ms.NewGauge(`vl_blocks{type="file"}`, func() float64 {
|
||||
return float64(m().FileBlocks)
|
||||
})
|
||||
ms.NewGauge(`vl_partitions`, func() float64 {
|
||||
return float64(m().PartitionsCount)
|
||||
})
|
||||
ms.NewGauge(`vl_streams_created_total`, func() float64 {
|
||||
return float64(m().StreamsCreatedTotal)
|
||||
})
|
||||
|
||||
ms.NewGauge(`vl_compressed_data_size_bytes{type="inmemory"}`, func() float64 {
|
||||
return float64(m().CompressedInmemorySize)
|
||||
})
|
||||
ms.NewGauge(`vl_compressed_data_size_bytes{type="file"}`, func() float64 {
|
||||
return float64(m().CompressedFileSize)
|
||||
})
|
||||
ms.NewGauge(`vl_uncompressed_data_size_bytes{type="inmemory"}`, func() float64 {
|
||||
return float64(m().UncompressedInmemorySize)
|
||||
})
|
||||
ms.NewGauge(`vl_uncompressed_data_size_bytes{type="file"}`, func() float64 {
|
||||
return float64(m().UncompressedFileSize)
|
||||
})
|
||||
|
||||
ms.NewGauge(`vl_rows_dropped_total{reason="too_big_timestamp"}`, func() float64 {
|
||||
return float64(m().RowsDroppedTooBigTimestamp)
|
||||
})
|
||||
ms.NewGauge(`vl_rows_dropped_total{reason="too_small_timestamp"}`, func() float64 {
|
||||
return float64(m().RowsDroppedTooSmallTimestamp)
|
||||
})
|
||||
|
||||
return ms
|
||||
}
|
|
@ -752,14 +752,18 @@ See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
|||
|
||||
## High availability
|
||||
|
||||
It is possible to run multiple identically configured `vmagent` instances or `vmagent` [clusters](#scraping-big-number-of-targets),
|
||||
so they [scrape](#how-to-collect-metrics-in-prometheus-format) the same set of targets and push the collected data to the same set of VictoriaMetrics remote storage systems.
|
||||
It is possible to run multiple **identically configured** `vmagent` instances or `vmagent`
|
||||
[clusters](#scraping-big-number-of-targets), so they [scrape](#how-to-collect-metrics-in-prometheus-format)
|
||||
the same set of targets and push the collected data to the same set of VictoriaMetrics remote storage systems.
|
||||
Two **identically configured** vmagent instances or clusters is usually called an HA pair.
|
||||
|
||||
In this case the deduplication must be configured at VictoriaMetrics in order to de-duplicate samples received from multiple identically configured `vmagent` instances or clusters.
|
||||
When running HA pairs, [deduplication](https://docs.victoriametrics.com/#deduplication) must be configured
|
||||
at VictoriaMetrics side in order to de-duplicate received samples.
|
||||
See [these docs](https://docs.victoriametrics.com/#deduplication) for details.
|
||||
|
||||
It is also recommended passing different values to `-promscrape.cluster.name` command-line flag per each `vmagent` instance or per each `vmagent` cluster in HA setup.
|
||||
This is needed for proper data de-duplication. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679) for details.
|
||||
It is also recommended passing different values to `-promscrape.cluster.name` command-line flag per each `vmagent`
|
||||
instance or per each `vmagent` cluster in HA setup. This is needed for proper data de-duplication.
|
||||
See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679) for details.
|
||||
|
||||
## Scraping targets via a proxy
|
||||
|
||||
|
@ -1174,6 +1178,10 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
|
||||
-cacheExpireDuration duration
|
||||
Items are removed from in-memory caches after they aren't accessed for this duration. Lower values may reduce memory usage at the cost of higher CPU usage. See also -prevCacheRemovalPercent (default 30m0s)
|
||||
-clients.docker
|
||||
Decides whether a docker container be brought up automatically
|
||||
-clients.semaphore
|
||||
Tells if the job is running on Semaphore
|
||||
-configAuthKey string
|
||||
Authorization key for accessing /config page. It must be passed via authKey query arg
|
||||
-csvTrimTimestamp duration
|
||||
|
@ -1224,7 +1232,7 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-httpListenAddr string
|
||||
TCP address to listen for http connections. Set this flag to empty value in order to disable listening on any port. This mode may be useful for running multiple vmagent instances on the same server. Note that /targets and /metrics pages aren't available if -httpListenAddr=''. See also -httpListenAddr.useProxyProtocol (default ":8429")
|
||||
-httpListenAddr.useProxyProtocol
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing
|
||||
-import.maxLineLen size
|
||||
The maximum length in bytes of a single line accepted by /api/v1/import; the line length can be limited with 'max_rows_per_line' query arg passed to /api/v1/export
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 104857600)
|
||||
|
@ -1268,6 +1276,9 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-kafka.consumer.topic.brokers array
|
||||
List of brokers to connect for given topic, e.g. -kafka.consumer.topic.broker=host-1:9092;host-2:9092 . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-kafka.consumer.topic.concurrency array
|
||||
Configures consumer concurrency for topic specified via -kafka.consumer.topic flag.This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
-kafka.consumer.topic.defaultFormat string
|
||||
Expected data format in the topic if -kafka.consumer.topic.format is skipped. This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html (default "promremotewrite")
|
||||
-kafka.consumer.topic.format array
|
||||
|
@ -1476,6 +1487,8 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
-remoteWrite.headers array
|
||||
Optional HTTP headers to send with each request to the corresponding -remoteWrite.url. For example, -remoteWrite.headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding -remoteWrite.url. Multiple headers must be delimited by '^^': -remoteWrite.headers='header1:value1^^header2:value2'
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.keepDanglingQueues
|
||||
Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. Useful when -remoteWrite.url is changed temporarily and persistent queue files will be needed later on.
|
||||
-remoteWrite.label array
|
||||
Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. Pass multiple -remoteWrite.label flags in order to add multiple labels to metrics before sending them to remote storage
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
|
@ -1520,8 +1533,6 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Supports array of values separated by comma or specified via multiple flags.
|
||||
-remoteWrite.relabelConfig string
|
||||
Optional path to file with relabeling configs, which are applied to all the metrics before sending them to -remoteWrite.url. See also -remoteWrite.urlRelabelConfig. The path can point either to local file or to http url. See https://docs.victoriametrics.com/vmagent.html#relabeling
|
||||
-remoteWrite.keepDanglingQueues
|
||||
Keep persistent queues contents at -remoteWrite.tmpDataPath in case there are no matching -remoteWrite.url. Useful when -remoteWrite.url is changed temporarily and persistent queue files will be needed later on.
|
||||
-remoteWrite.roundDigits array
|
||||
Round metric values to this number of decimal digits after the point before writing them to remote storage. Examples: -remoteWrite.roundDigits=2 would round 1.236 to 1.24, while -remoteWrite.roundDigits=-1 would round 126.78 to 130. By default, digits rounding is disabled. Set it to 100 for disabling it for a particular remote storage. This option may be used for improving data compression for the stored metrics
|
||||
Supports array of values separated by comma or specified via multiple flags.
|
||||
|
|
|
@ -257,7 +257,7 @@ func getAuthConfig(argIdx int) (*promauth.Config, error) {
|
|||
}
|
||||
authCfg, err := opts.NewConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot populate OAuth2 config for remoteWrite idx: %d, err: %w", argIdx, err)
|
||||
return nil, fmt.Errorf("cannot populate auth config for remoteWrite idx: %d, err: %w", argIdx, err)
|
||||
}
|
||||
return authCfg, nil
|
||||
}
|
||||
|
|
|
@ -587,6 +587,11 @@ func newRemoteWriteCtx(argIdx int, at *auth.Token, remoteWriteURL *url.URL, maxI
|
|||
}
|
||||
|
||||
func (rwctx *remoteWriteCtx) MustStop() {
|
||||
// sas must be stopped before rwctx is closed
|
||||
// because sas can write pending series to rwctx.pss if there are any
|
||||
sas := rwctx.sas.Swap(nil)
|
||||
sas.MustStop()
|
||||
|
||||
for _, ps := range rwctx.pss {
|
||||
ps.MustStop()
|
||||
}
|
||||
|
@ -596,9 +601,6 @@ func (rwctx *remoteWriteCtx) MustStop() {
|
|||
rwctx.c.MustStop()
|
||||
rwctx.c = nil
|
||||
|
||||
sas := rwctx.sas.Swap(nil)
|
||||
sas.MustStop()
|
||||
|
||||
rwctx.fq.MustClose()
|
||||
rwctx.fq = nil
|
||||
|
||||
|
|
|
@ -73,6 +73,7 @@ test-vmalert:
|
|||
go test -v -race -cover ./app/vmalert/notifier
|
||||
go test -v -race -cover ./app/vmalert/config
|
||||
go test -v -race -cover ./app/vmalert/remotewrite
|
||||
go test -v -race -cover ./app/vmalert/utils
|
||||
|
||||
run-vmalert: vmalert
|
||||
./bin/vmalert -rule=app/vmalert/config/testdata/rules/rules2-good.rules \
|
||||
|
|
|
@ -208,11 +208,13 @@ expr: <string>
|
|||
# Please note, that if rule's query params contain sensitive
|
||||
# information - it will be printed to logs.
|
||||
# Is applicable to alerting rules only.
|
||||
# Available starting from https://docs.victoriametrics.com/CHANGELOG.html#v1820
|
||||
[ debug: <bool> | default = false ]
|
||||
|
||||
# Defines the number of rule's updates entries stored in memory
|
||||
# and available for view on rule's Details page.
|
||||
# Overrides `rule.updateEntriesLimit` value for this specific rule.
|
||||
# Available starting from https://docs.victoriametrics.com/CHANGELOG.html#v1860
|
||||
[ update_entries_limit: <integer> | default 0 ]
|
||||
|
||||
# Labels to add or overwrite for each alert.
|
||||
|
@ -526,6 +528,9 @@ Check how to replace it with [cluster VictoriaMetrics](#cluster-victoriametrics)
|
|||
|
||||
#### Downsampling and aggregation via vmalert
|
||||
|
||||
_Please note, [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) might be more efficient
|
||||
for cases when downsampling or aggregation need to be applied **before data gets into the TSDB.**_
|
||||
|
||||
`vmalert` can't modify existing data. But it can run arbitrary PromQL/MetricsQL queries
|
||||
via [recording rules](#recording-rules) and backfill results to the configured `-remoteWrite.url`.
|
||||
This ability allows to aggregate data. For example, the following rule will calculate the average value for
|
||||
|
@ -789,7 +794,8 @@ If `-remoteWrite.url` command-line flag is configured, vmalert will persist aler
|
|||
changed in time.
|
||||
|
||||
vmalert stores last `-rule.updateEntriesLimit` (or `update_entries_limit` [per-rule config](https://docs.victoriametrics.com/vmalert.html#alerting-rules))
|
||||
state updates for each rule. To check updates, click on `Details` link next to rule's name on `/vmalert/groups` page
|
||||
state updates for each rule starting from [v1.86](https://docs.victoriametrics.com/CHANGELOG.html#v1860).
|
||||
To check updates, click on `Details` link next to rule's name on `/vmalert/groups` page
|
||||
and check the `Last updates` section:
|
||||
|
||||
<img alt="vmalert state" src="vmalert_state.png">
|
||||
|
@ -801,8 +807,8 @@ moment when rule was evaluated.
|
|||
|
||||
### Debug mode
|
||||
|
||||
vmalert allows configuring more detailed logging for specific alerting rule. Just set `debug: true` in rule's configuration
|
||||
and vmalert will start printing additional log messages:
|
||||
vmalert allows configuring more detailed logging for specific alerting rule starting from [v1.82](https://docs.victoriametrics.com/CHANGELOG.html#v1820).
|
||||
Just set `debug: true` in rule's configuration and vmalert will start printing additional log messages:
|
||||
```terminal
|
||||
2022-09-15T13:35:41.155Z DEBUG rule "TestGroup":"Conns" (2601299393013563564) at 2022-09-15T15:35:41+02:00: query returned 0 samples (elapsed: 5.896041ms)
|
||||
2022-09-15T13:35:56.149Z DEBUG datasource request: executing POST request with params "denyPartialResponse=true&query=sum%28vm_tcplistener_conns%7Binstance%3D%22localhost%3A8429%22%7D%29+by%28instance%29+%3E+0&step=15s&time=1663248945"
|
||||
|
@ -815,7 +821,8 @@ and vmalert will start printing additional log messages:
|
|||
|
||||
### Never-firing alerts
|
||||
|
||||
vmalert can detect if alert's expression doesn't match any time series in runtime. This problem usually happens
|
||||
vmalert can detect if alert's expression doesn't match any time series in runtime
|
||||
starting from [v1.91](https://docs.victoriametrics.com/CHANGELOG.html#v1910). This problem usually happens
|
||||
when alerting expression selects time series which aren't present in the datasource (i.e. wrong `job` label)
|
||||
or there is a typo in the series selector (i.e. `env=rpod`). Such alerting rules will be marked with special icon in
|
||||
vmalerts UI and exposed via `vmalert_alerting_rules_last_evaluation_series_fetched` metric. The metric value will
|
||||
|
@ -829,6 +836,32 @@ max(vmalert_alerting_rules_last_evaluation_series_fetched) by(group, alertname)
|
|||
See more details [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4039).
|
||||
This feature is available only if vmalert is using VictoriaMetrics v1.90 or higher as a datasource.
|
||||
|
||||
### Series with the same labelset
|
||||
|
||||
vmalert can produce the following error message during rules evaluation:
|
||||
```
|
||||
result contains metrics with the same labelset after applying rule labels
|
||||
```
|
||||
|
||||
The error means there is a collision between [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
|
||||
after applying extra labels to result.
|
||||
|
||||
For example, a rule with `expr: foo > 0` returns two distinct time series in response:
|
||||
```
|
||||
foo{bar="baz"} 1
|
||||
foo{bar="qux"} 2
|
||||
```
|
||||
|
||||
If user configures `-external.label=bar=baz` cmd-line flag to enforce
|
||||
adding `bar="baz"` label-value pair, then time series won't be distinct anymore:
|
||||
```
|
||||
foo{bar="baz"} 1
|
||||
foo{bar="baz"} 2 # 'bar' label was overriden by `-external.label=bar=baz
|
||||
```
|
||||
|
||||
The same issue can be caused by collision of configured `labels` on [Group](#groups) or [Rule](#rules) levels.
|
||||
To fix it one should avoid collisions by carefully picking label overrides in configuration.
|
||||
|
||||
|
||||
## Profiling
|
||||
|
||||
|
@ -887,6 +920,8 @@ The shortlist of configuration flags is the following:
|
|||
Optional path to bearer token file to use for -datasource.url.
|
||||
-datasource.disableKeepAlive
|
||||
Whether to disable long-lived connections to the datasource. If true, disables HTTP keep-alives and will only use the connection to the server for a single HTTP request.
|
||||
-datasource.disableStepParam
|
||||
Whether to disable adding 'step' param to the issued instant queries. This might be useful when using vmalert with datasources that do not support 'step' param for instant queries, like Google Managed Prometheus. It is not recommended to enable this flag if you use vmalert with VictoriaMetrics.
|
||||
-datasource.headers string
|
||||
Optional HTTP extraHeaders to send with each request to the corresponding -datasource.url. For example, -datasource.headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding -datasource.url. Multiple headers must be delimited by '^^': -datasource.headers='header1:value1^^header2:value2'
|
||||
-datasource.lookback duration
|
||||
|
@ -947,7 +982,7 @@ The shortlist of configuration flags is the following:
|
|||
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-external.url string
|
||||
External URL is used as alert's source for sent alerts to the notifier
|
||||
External URL is used as alert's source for sent alerts to the notifier. By default, hostname is used as address.
|
||||
-flagsAuthKey string
|
||||
Auth key for /flags endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-fs.disableMmap
|
||||
|
@ -971,11 +1006,13 @@ The shortlist of configuration flags is the following:
|
|||
-httpListenAddr string
|
||||
Address to listen for http connections. See also -httpListenAddr.useProxyProtocol (default ":8880")
|
||||
-httpListenAddr.useProxyProtocol
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
||||
-insert.maxQueueDuration duration
|
||||
The maximum duration to wait in the queue when -maxConcurrentInserts concurrent insert requests are executed (default 1m0s)
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing
|
||||
-internStringCacheExpireDuration duration
|
||||
The expiry duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
|
||||
-internStringDisableCache
|
||||
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
|
||||
-internStringMaxLen int
|
||||
The maximum length for strings to intern. Lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning (default 500)
|
||||
The maximum length for strings to intern. Lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
|
||||
-loggerDisableTimestamps
|
||||
Whether to disable writing timestamps in logs
|
||||
-loggerErrorsPerSecondLimit int
|
||||
|
@ -992,8 +1029,6 @@ The shortlist of configuration flags is the following:
|
|||
Timezone to use for timestamps in logs. Timezone must be a valid IANA Time Zone. For example: America/New_York, Europe/Berlin, Etc/GMT+3 or Local (default "UTC")
|
||||
-loggerWarnsPerSecondLimit int
|
||||
Per-second limit on the number of WARN messages. If more than the given number of warns are emitted per second, then the remaining warns are suppressed. Zero values disable the rate limit
|
||||
-maxConcurrentInserts int
|
||||
The maximum number of concurrent insert requests. Default value should work for most cases, since it minimizes the memory usage. The default value can be increased when clients send data over slow networks. See also -insert.maxQueueDuration (default 8)
|
||||
-memory.allowedBytes size
|
||||
Allowed size of system memory VictoriaMetrics caches may occupy. This option overrides -memory.allowedPercent if set to a non-zero value. Too low a value may increase the cache miss rate usually resulting in higher CPU and disk IO usage. Too high a value may evict too much data from OS page cache resulting in higher disk IO usage
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 0)
|
||||
|
@ -1147,6 +1182,10 @@ The shortlist of configuration flags is the following:
|
|||
Optional OAuth2 scopes to use for -notifier.url. Scopes must be delimited by ';'.
|
||||
-remoteWrite.oauth2.tokenUrl string
|
||||
Optional OAuth2 tokenURL to use for -notifier.url.
|
||||
-remoteWrite.retryMaxTime duration
|
||||
The max time spent on retry attempts for the failed remote-write request. Change this value if it is expected for remoteWrite.url to be unreachable for more than -remoteWrite.retryMaxTime. See also -remoteWrite.retryMinInterval (default 30s)
|
||||
-remoteWrite.retryMinInterval duration
|
||||
The minimum delay between retry attempts. Every next retry attempt will double the delay to prevent hammering of remote database. See also -remoteWrite.retryMaxInterval (default 1s)
|
||||
-remoteWrite.sendTimeout duration
|
||||
Timeout for sending data to the configured -remoteWrite.url. (default 30s)
|
||||
-remoteWrite.showURL
|
||||
|
@ -1176,12 +1215,12 @@ The shortlist of configuration flags is the following:
|
|||
-replay.timeTo string
|
||||
The time filter in RFC3339 format to select timeseries with timestamp equal or lower than provided value. E.g. '2020-01-01T20:07:00Z'
|
||||
-rule array
|
||||
Path to the files with alerting and/or recording rules.
|
||||
Path to the files or http url with alerting and/or recording rules.
|
||||
Supports hierarchical patterns and regexpes.
|
||||
Examples:
|
||||
-rule="/path/to/file". Path to a single file with alerting rules.
|
||||
-rule="http://<some-server-addr>/path/to/rules". HTTP URL to a page with alerting rules.
|
||||
-rule="dir/*.yaml" -rule="/*.yaml" -rule="gcs://vmalert-rules/tenant_%{TENANT_ID}/prod".
|
||||
-rule="dir/*.yaml" -rule="/*.yaml" -rule="gcs://vmalert-rules/tenant_%{TENANT_ID}/prod".
|
||||
-rule="dir/**/*.yaml". Includes all the .yaml files in "dir" subfolders recursively.
|
||||
Rule files may contain %{ENV_VAR} placeholders, which are substituted by the corresponding env vars.
|
||||
|
||||
|
@ -1200,12 +1239,13 @@ The shortlist of configuration flags is the following:
|
|||
Minimum amount of time to wait before resending an alert to notifier
|
||||
-rule.templates array
|
||||
Path or glob pattern to location with go template definitions
|
||||
for rules annotations templating. Flag can be specified multiple times.
|
||||
for rules annotations templating. Flag can be specified multiple times.
|
||||
Examples:
|
||||
-rule.templates="/path/to/file". Path to a single file with go templates
|
||||
-rule.templates="dir/*.tpl" -rule.templates="/*.tpl". Relative path to all .tpl files in "dir" folder,
|
||||
absolute path to all .tpl files in root.
|
||||
-rule.templates="dir/**/*.tpl". Includes all the .tpl files in "dir" subfolders recursively.
|
||||
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-rule.updateEntriesLimit int
|
||||
Defines the max number of rule's state updates stored in-memory. Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overridden per rule via update_entries_limit param. (default 20)
|
||||
|
@ -1213,6 +1253,10 @@ The shortlist of configuration flags is the following:
|
|||
Whether to validate rules expressions via MetricsQL engine (default true)
|
||||
-rule.validateTemplates
|
||||
Whether to validate annotation and label templates (default true)
|
||||
-s2a_enable_appengine_dialer
|
||||
If true, opportunistically use AppEngine-specific dialer to call S2A.
|
||||
-s2a_timeout duration
|
||||
Timeout enforced on the connection to the S2A service for handshake. (default 3s)
|
||||
-s3.configFilePath string
|
||||
Path to file with S3 configs. Configs are loaded from default location if not set.
|
||||
See https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html . This flag is available only in VictoriaMetrics enterprise. See https://docs.victoriametrics.com/enterprise.html
|
||||
|
|
|
@ -29,6 +29,7 @@ func toDatasourceType(s string) datasourceType {
|
|||
}
|
||||
|
||||
// VMStorage represents vmstorage entity with ability to read and write metrics
|
||||
// WARN: when adding a new field, remember to update Clone() method.
|
||||
type VMStorage struct {
|
||||
c *http.Client
|
||||
authCfg *promauth.Config
|
||||
|
@ -54,29 +55,54 @@ type keyValue struct {
|
|||
|
||||
// Clone makes clone of VMStorage, shares http client.
|
||||
func (s *VMStorage) Clone() *VMStorage {
|
||||
return &VMStorage{
|
||||
ns := &VMStorage{
|
||||
c: s.c,
|
||||
authCfg: s.authCfg,
|
||||
datasourceURL: s.datasourceURL,
|
||||
appendTypePrefix: s.appendTypePrefix,
|
||||
lookBack: s.lookBack,
|
||||
queryStep: s.queryStep,
|
||||
appendTypePrefix: s.appendTypePrefix,
|
||||
dataSourceType: s.dataSourceType,
|
||||
|
||||
dataSourceType: s.dataSourceType,
|
||||
evaluationInterval: s.evaluationInterval,
|
||||
|
||||
// init map so it can be populated below
|
||||
extraParams: url.Values{},
|
||||
|
||||
debug: s.debug,
|
||||
}
|
||||
if len(s.extraHeaders) > 0 {
|
||||
ns.extraHeaders = make([]keyValue, len(s.extraHeaders))
|
||||
copy(ns.extraHeaders, s.extraHeaders)
|
||||
}
|
||||
for k, v := range s.extraParams {
|
||||
ns.extraParams[k] = v
|
||||
}
|
||||
|
||||
return ns
|
||||
}
|
||||
|
||||
// ApplyParams - changes given querier params.
|
||||
func (s *VMStorage) ApplyParams(params QuerierParams) *VMStorage {
|
||||
s.dataSourceType = toDatasourceType(params.DataSourceType)
|
||||
s.evaluationInterval = params.EvaluationInterval
|
||||
s.extraParams = params.QueryParams
|
||||
s.debug = params.Debug
|
||||
if params.QueryParams != nil {
|
||||
if s.extraParams == nil {
|
||||
s.extraParams = url.Values{}
|
||||
}
|
||||
for k, vl := range params.QueryParams {
|
||||
for _, v := range vl { // custom query params are prior to default ones
|
||||
s.extraParams.Set(k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
if params.Headers != nil {
|
||||
for key, value := range params.Headers {
|
||||
kv := keyValue{key: key, value: value}
|
||||
s.extraHeaders = append(s.extraHeaders, kv)
|
||||
}
|
||||
}
|
||||
s.debug = params.Debug
|
||||
return s
|
||||
}
|
||||
|
||||
|
@ -95,6 +121,7 @@ func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Durati
|
|||
lookBack: lookBack,
|
||||
queryStep: queryStep,
|
||||
dataSourceType: datasourcePrometheus,
|
||||
extraParams: url.Values{},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,9 @@ import (
|
|||
var (
|
||||
disablePathAppend = flag.Bool("remoteRead.disablePathAppend", false, "Whether to disable automatic appending of '/api/v1/query' path "+
|
||||
"to the configured -datasource.url and -remoteRead.url")
|
||||
disableStepParam = flag.Bool("datasource.disableStepParam", false, "Whether to disable adding 'step' param to the issued instant queries. "+
|
||||
"This might be useful when using vmalert with datasources that do not support 'step' param for instant queries, like Google Managed Prometheus. "+
|
||||
"It is not recommended to enable this flag if you use vmalert with VictoriaMetrics.")
|
||||
)
|
||||
|
||||
type promResponse struct {
|
||||
|
@ -166,12 +169,12 @@ func (s *VMStorage) setPrometheusInstantReqParams(r *http.Request, query string,
|
|||
timestamp = timestamp.Truncate(s.evaluationInterval)
|
||||
}
|
||||
q.Set("time", fmt.Sprintf("%d", timestamp.Unix()))
|
||||
if s.evaluationInterval > 0 { // set step as evaluationInterval by default
|
||||
if !*disableStepParam && s.evaluationInterval > 0 { // set step as evaluationInterval by default
|
||||
// always convert to seconds to keep compatibility with older
|
||||
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
|
||||
q.Set("step", fmt.Sprintf("%ds", int(s.evaluationInterval.Seconds())))
|
||||
}
|
||||
if s.queryStep > 0 { // override step with user-specified value
|
||||
if !*disableStepParam && s.queryStep > 0 { // override step with user-specified value
|
||||
// always convert to seconds to keep compatibility with older
|
||||
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
|
||||
q.Set("step", fmt.Sprintf("%ds", int(s.queryStep.Seconds())))
|
||||
|
|
|
@ -378,6 +378,9 @@ func TestRequestParams(t *testing.T) {
|
|||
}
|
||||
query := "up"
|
||||
timestamp := time.Date(2001, 2, 3, 4, 5, 6, 0, time.UTC)
|
||||
storage := VMStorage{
|
||||
extraParams: url.Values{"round_digits": {"10"}},
|
||||
}
|
||||
testCases := []struct {
|
||||
name string
|
||||
queryRange bool
|
||||
|
@ -574,6 +577,17 @@ func TestRequestParams(t *testing.T) {
|
|||
checkEqualString(t, exp, r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
{
|
||||
"custom params overrides the original params",
|
||||
false,
|
||||
storage.Clone().ApplyParams(QuerierParams{
|
||||
QueryParams: url.Values{"round_digits": {"2"}},
|
||||
}),
|
||||
func(t *testing.T, r *http.Request) {
|
||||
exp := fmt.Sprintf("query=%s&round_digits=2&time=%d", query, timestamp.Unix())
|
||||
checkEqualString(t, exp, r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
{
|
||||
"graphite extra params",
|
||||
false,
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -74,7 +75,7 @@ absolute path to all .tpl files in root.
|
|||
ruleUpdateEntriesLimit = flag.Int("rule.updateEntriesLimit", 20, "Defines the max number of rule's state updates stored in-memory. "+
|
||||
"Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overridden per rule via update_entries_limit param.")
|
||||
|
||||
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier")
|
||||
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier. By default, hostname is used as address.")
|
||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+
|
||||
`for cases where you want to build a custom link to Grafana, Prometheus or any other service. `+
|
||||
`Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . `+
|
||||
|
@ -326,8 +327,7 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
|
|||
}
|
||||
|
||||
// init reload metrics with positive values to improve alerting conditions
|
||||
configSuccess.Set(1)
|
||||
configTimestamp.Set(fasttime.UnixTimestamp())
|
||||
setConfigSuccess(fasttime.UnixTimestamp())
|
||||
parseFn := config.Parse
|
||||
for {
|
||||
select {
|
||||
|
@ -347,22 +347,19 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
|
|||
parseFn = config.ParseSilent
|
||||
}
|
||||
if err := notifier.Reload(); err != nil {
|
||||
configReloadErrors.Inc()
|
||||
configSuccess.Set(0)
|
||||
setConfigError(err)
|
||||
logger.Errorf("failed to reload notifier config: %s", err)
|
||||
continue
|
||||
}
|
||||
err := templates.Load(*ruleTemplatesPath, false)
|
||||
if err != nil {
|
||||
configReloadErrors.Inc()
|
||||
configSuccess.Set(0)
|
||||
setConfigError(err)
|
||||
logger.Errorf("failed to load new templates: %s", err)
|
||||
continue
|
||||
}
|
||||
newGroupsCfg, err := parseFn(*rulePath, validateTplFn, *validateExpressions)
|
||||
if err != nil {
|
||||
configReloadErrors.Inc()
|
||||
configSuccess.Set(0)
|
||||
setConfigError(err)
|
||||
logger.Errorf("cannot parse configuration file: %s", err)
|
||||
continue
|
||||
}
|
||||
|
@ -371,19 +368,18 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
|
|||
// set success to 1 since previous reload
|
||||
// could have been unsuccessful
|
||||
configSuccess.Set(1)
|
||||
setConfigError(nil)
|
||||
// config didn't change - skip it
|
||||
continue
|
||||
}
|
||||
if err := m.update(ctx, newGroupsCfg, false); err != nil {
|
||||
configReloadErrors.Inc()
|
||||
configSuccess.Set(0)
|
||||
setConfigError(err)
|
||||
logger.Errorf("error while reloading rules: %s", err)
|
||||
continue
|
||||
}
|
||||
templates.Reload()
|
||||
groupsCfg = newGroupsCfg
|
||||
configSuccess.Set(1)
|
||||
configTimestamp.Set(fasttime.UnixTimestamp())
|
||||
setConfigSuccess(fasttime.UnixTimestamp())
|
||||
logger.Infof("Rules reloaded successfully from %q", *rulePath)
|
||||
}
|
||||
}
|
||||
|
@ -399,3 +395,40 @@ func configsEqual(a, b []config.Group) bool {
|
|||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// setConfigSuccess sets config reload status to 1.
|
||||
func setConfigSuccess(at uint64) {
|
||||
configSuccess.Set(1)
|
||||
configTimestamp.Set(fasttime.UnixTimestamp())
|
||||
// reset the error if any
|
||||
setConfigErr(nil)
|
||||
}
|
||||
|
||||
// setConfigError sets config reload status to 0.
|
||||
func setConfigError(err error) {
|
||||
configReloadErrors.Inc()
|
||||
configSuccess.Set(0)
|
||||
setConfigErr(err)
|
||||
}
|
||||
|
||||
var (
|
||||
configErrMu sync.RWMutex
|
||||
// configErr represent the error message from the last
|
||||
// config reload.
|
||||
configErr error
|
||||
)
|
||||
|
||||
func setConfigErr(err error) {
|
||||
configErrMu.Lock()
|
||||
configErr = err
|
||||
configErrMu.Unlock()
|
||||
}
|
||||
|
||||
func configError() error {
|
||||
configErrMu.RLock()
|
||||
defer configErrMu.RUnlock()
|
||||
if configErr != nil {
|
||||
return configErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -188,6 +188,7 @@ func (g *Group) toAPI() APIGroup {
|
|||
|
||||
Labels: g.Labels,
|
||||
}
|
||||
ag.Rules = make([]APIRule, 0)
|
||||
for _, r := range g.Rules {
|
||||
ag.Rules = append(ag.Rules, r.ToAPI())
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import (
|
|||
var (
|
||||
disablePathAppend = flag.Bool("remoteWrite.disablePathAppend", false, "Whether to disable automatic appending of '/api/v1/write' path to the configured -remoteWrite.url.")
|
||||
sendTimeout = flag.Duration("remoteWrite.sendTimeout", 30*time.Second, "Timeout for sending data to the configured -remoteWrite.url.")
|
||||
retryMinInterval = flag.Duration("remoteWrite.retryMinInterval", time.Second, "The minimum delay between retry attempts. Every next retry attempt will double the delay to prevent hammering of remote database. See also -remoteWrite.retryMaxInterval")
|
||||
retryMaxTime = flag.Duration("remoteWrite.retryMaxTime", time.Second*30, "The max time spent on retry attempts for the failed remote-write request. Change this value if it is expected for remoteWrite.url to be unreachable for more than -remoteWrite.retryMaxTime. See also -remoteWrite.retryMinInterval")
|
||||
)
|
||||
|
||||
// Client is an asynchronous HTTP client for writing
|
||||
|
@ -147,6 +149,7 @@ func (c *Client) run(ctx context.Context) {
|
|||
wr.Timeseries = append(wr.Timeseries, ts)
|
||||
}
|
||||
lastCtx, cancel := context.WithTimeout(context.Background(), defaultWriteTimeout)
|
||||
logger.Infof("shutting down remote write client and flushing remained %d series", len(wr.Timeseries))
|
||||
c.flush(lastCtx, wr)
|
||||
cancel()
|
||||
}
|
||||
|
@ -180,9 +183,14 @@ func (c *Client) run(ctx context.Context) {
|
|||
var (
|
||||
sentRows = metrics.NewCounter(`vmalert_remotewrite_sent_rows_total`)
|
||||
sentBytes = metrics.NewCounter(`vmalert_remotewrite_sent_bytes_total`)
|
||||
sendDuration = metrics.NewFloatCounter(`vmalert_remotewrite_send_duration_seconds_total`)
|
||||
droppedRows = metrics.NewCounter(`vmalert_remotewrite_dropped_rows_total`)
|
||||
droppedBytes = metrics.NewCounter(`vmalert_remotewrite_dropped_bytes_total`)
|
||||
bufferFlushDuration = metrics.NewHistogram(`vmalert_remotewrite_flush_duration_seconds`)
|
||||
|
||||
_ = metrics.NewGauge(`vmalert_remotewrite_concurrency`, func() float64 {
|
||||
return float64(*concurrency)
|
||||
})
|
||||
)
|
||||
|
||||
// flush is a blocking function that marshals WriteRequest and sends
|
||||
|
@ -203,12 +211,14 @@ func (c *Client) flush(ctx context.Context, wr *prompbmarshal.WriteRequest) {
|
|||
|
||||
b := snappy.Encode(nil, data)
|
||||
|
||||
const (
|
||||
retryCount = 5
|
||||
retryBackoff = time.Second
|
||||
)
|
||||
|
||||
for attempts := 0; attempts < retryCount; attempts++ {
|
||||
retryInterval, maxRetryInterval := *retryMinInterval, *retryMaxTime
|
||||
if retryInterval > maxRetryInterval {
|
||||
retryInterval = maxRetryInterval
|
||||
}
|
||||
timeStart := time.Now()
|
||||
defer sendDuration.Add(time.Since(timeStart).Seconds())
|
||||
L:
|
||||
for attempts := 0; ; attempts++ {
|
||||
err := c.send(ctx, b)
|
||||
if err == nil {
|
||||
sentRows.Add(len(wr.Timeseries))
|
||||
|
@ -216,10 +226,10 @@ func (c *Client) flush(ctx context.Context, wr *prompbmarshal.WriteRequest) {
|
|||
return
|
||||
}
|
||||
|
||||
_, isRetriable := err.(*retriableError)
|
||||
logger.Warnf("attempt %d to send request failed: %s (retriable: %v)", attempts+1, err, isRetriable)
|
||||
_, isNotRetriable := err.(*nonRetriableError)
|
||||
logger.Warnf("attempt %d to send request failed: %s (retriable: %v)", attempts+1, err, !isNotRetriable)
|
||||
|
||||
if !isRetriable {
|
||||
if isNotRetriable {
|
||||
// exit fast if error isn't retriable
|
||||
break
|
||||
}
|
||||
|
@ -227,12 +237,24 @@ func (c *Client) flush(ctx context.Context, wr *prompbmarshal.WriteRequest) {
|
|||
// check if request has been cancelled before backoff
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
break
|
||||
logger.Errorf("interrupting retry attempt %d: context cancelled", attempts+1)
|
||||
break L
|
||||
default:
|
||||
}
|
||||
|
||||
// sleeping to avoid remote db hammering
|
||||
time.Sleep(retryBackoff)
|
||||
timeLeftForRetries := maxRetryInterval - time.Since(timeStart)
|
||||
if timeLeftForRetries < 0 {
|
||||
// the max retry time has passed, so we give up
|
||||
break
|
||||
}
|
||||
|
||||
if retryInterval > timeLeftForRetries {
|
||||
retryInterval = timeLeftForRetries
|
||||
}
|
||||
// sleeping to prevent remote db hammering
|
||||
time.Sleep(retryInterval)
|
||||
retryInterval *= 2
|
||||
|
||||
}
|
||||
|
||||
droppedRows.Add(len(wr.Timeseries))
|
||||
|
@ -276,22 +298,23 @@ func (c *Client) send(ctx context.Context, data []byte) error {
|
|||
case 2:
|
||||
// respond with a HTTP 2xx status code when the write is successful.
|
||||
return nil
|
||||
case 5:
|
||||
// respond with HTTP status code 5xx when the write fails and SHOULD be retried.
|
||||
return &retriableError{fmt.Errorf("unexpected response code %d for %s. Response body %q",
|
||||
resp.StatusCode, req.URL.Redacted(), body)}
|
||||
case 4:
|
||||
if resp.StatusCode != http.StatusTooManyRequests {
|
||||
// MUST NOT retry write requests on HTTP 4xx responses other than 429
|
||||
return &nonRetriableError{fmt.Errorf("unexpected response code %d for %s. Response body %q",
|
||||
resp.StatusCode, req.URL.Redacted(), body)}
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
// respond with HTTP status code 4xx when the request is invalid, will never be able to succeed
|
||||
// and should not be retried.
|
||||
return fmt.Errorf("unexpected response code %d for %s. Response body %q",
|
||||
resp.StatusCode, req.URL.Redacted(), body)
|
||||
}
|
||||
}
|
||||
|
||||
type retriableError struct {
|
||||
type nonRetriableError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (e *retriableError) Error() string {
|
||||
func (e *nonRetriableError) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -18,15 +19,30 @@ import (
|
|||
)
|
||||
|
||||
func TestClient_Push(t *testing.T) {
|
||||
oldMinInterval := *retryMinInterval
|
||||
*retryMinInterval = time.Millisecond * 10
|
||||
defer func() {
|
||||
*retryMinInterval = oldMinInterval
|
||||
}()
|
||||
|
||||
testSrv := newRWServer()
|
||||
cfg := Config{
|
||||
client, err := NewClient(context.Background(), Config{
|
||||
Addr: testSrv.URL,
|
||||
MaxBatchSize: 100,
|
||||
}
|
||||
client, err := NewClient(context.Background(), cfg)
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create client: %s", err)
|
||||
}
|
||||
|
||||
faultySrv := newFaultyRWServer()
|
||||
faultyClient, err := NewClient(context.Background(), Config{
|
||||
Addr: faultySrv.URL,
|
||||
MaxBatchSize: 50,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create faulty client: %s", err)
|
||||
}
|
||||
|
||||
r := rand.New(rand.NewSource(1))
|
||||
const rowsN = 1e4
|
||||
var sent int
|
||||
|
@ -38,9 +54,16 @@ func TestClient_Push(t *testing.T) {
|
|||
}},
|
||||
}
|
||||
err := client.Push(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %s", err)
|
||||
}
|
||||
if err == nil {
|
||||
sent++
|
||||
}
|
||||
err = faultyClient.Push(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %s", err)
|
||||
}
|
||||
}
|
||||
if sent == 0 {
|
||||
t.Fatalf("0 series sent")
|
||||
|
@ -48,10 +71,17 @@ func TestClient_Push(t *testing.T) {
|
|||
if err := client.Close(); err != nil {
|
||||
t.Fatalf("failed to close client: %s", err)
|
||||
}
|
||||
if err := faultyClient.Close(); err != nil {
|
||||
t.Fatalf("failed to close faulty client: %s", err)
|
||||
}
|
||||
got := testSrv.accepted()
|
||||
if got != sent {
|
||||
t.Fatalf("expected to have %d series; got %d", sent, got)
|
||||
}
|
||||
got = faultySrv.accepted()
|
||||
if got != sent {
|
||||
t.Fatalf("expected to have %d series for faulty client; got %d", sent, got)
|
||||
}
|
||||
}
|
||||
|
||||
func newRWServer() *rwServer {
|
||||
|
@ -117,3 +147,42 @@ func (rw *rwServer) handler(w http.ResponseWriter, r *http.Request) {
|
|||
atomic.AddUint64(&rw.acceptedRows, uint64(len(wr.Timeseries)))
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// faultyRWServer sometimes respond with 5XX status code
|
||||
// or just closes the connection. Is used for testing retries.
|
||||
type faultyRWServer struct {
|
||||
*rwServer
|
||||
|
||||
reqsMu sync.Mutex
|
||||
reqs int
|
||||
}
|
||||
|
||||
func newFaultyRWServer() *faultyRWServer {
|
||||
rw := &faultyRWServer{
|
||||
rwServer: &rwServer{},
|
||||
}
|
||||
rw.Server = httptest.NewServer(http.HandlerFunc(rw.handler))
|
||||
return rw
|
||||
}
|
||||
|
||||
func (frw *faultyRWServer) handler(w http.ResponseWriter, r *http.Request) {
|
||||
frw.reqsMu.Lock()
|
||||
reqs := frw.reqs
|
||||
frw.reqs++
|
||||
if frw.reqs > 5 {
|
||||
frw.reqs = 0
|
||||
}
|
||||
frw.reqsMu.Unlock()
|
||||
|
||||
switch reqs {
|
||||
case 0, 1, 2, 3:
|
||||
frw.rwServer.handler(w, r)
|
||||
case 4:
|
||||
hj, _ := w.(http.Hijacker)
|
||||
conn, _, _ := hj.Hijack()
|
||||
conn.Close()
|
||||
case 5:
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte("server overloaded"))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ type Rule interface {
|
|||
Close()
|
||||
}
|
||||
|
||||
var errDuplicate = errors.New("result contains metrics with the same labelset after applying rule labels")
|
||||
var errDuplicate = errors.New("result contains metrics with the same labelset after applying rule labels. See https://docs.victoriametrics.com/vmalert.html#series-with-the-same-labelset for details")
|
||||
|
||||
type ruleState struct {
|
||||
sync.RWMutex
|
||||
|
|
|
@ -7,9 +7,11 @@ function collapseAll() {
|
|||
}
|
||||
|
||||
function toggleByID(id) {
|
||||
let el = $("#" + id);
|
||||
if (el.length > 0) {
|
||||
el.click();
|
||||
if (id) {
|
||||
let el = $("#" + id);
|
||||
if (el.length > 0) {
|
||||
el.click();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,4 +38,4 @@ $(document).ready(function () {
|
|||
|
||||
$(document).ready(function () {
|
||||
$('[data-bs-toggle="tooltip"]').tooltip();
|
||||
});
|
||||
});
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
{% import (
|
||||
"strings"
|
||||
"net/http"
|
||||
"path"
|
||||
"net/url"
|
||||
|
@ -7,8 +6,8 @@
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
) %}
|
||||
|
||||
{% func Header(r *http.Request, navItems []NavItem, title string) %}
|
||||
{%code prefix := utils.Prefix(r.URL.Path) %}
|
||||
{% func Header(r *http.Request, navItems []NavItem, title string, userErr error) %}
|
||||
{%code prefix := utils.Prefix(r.URL.Path) %}
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
|
@ -71,8 +70,9 @@
|
|||
</style>
|
||||
</head>
|
||||
<body>
|
||||
{%= printNavItems(r, title, navItems) %}
|
||||
{%= printNavItems(r, title, navItems, userErr) %}
|
||||
<main class="px-2">
|
||||
{%= errorBody(userErr) %}
|
||||
{% endfunc %}
|
||||
|
||||
|
||||
|
@ -83,12 +83,9 @@ type NavItem struct {
|
|||
}
|
||||
%}
|
||||
|
||||
{% func printNavItems(r *http.Request, current string, items []NavItem) %}
|
||||
{% func printNavItems(r *http.Request, current string, items []NavItem, userErr error) %}
|
||||
{%code
|
||||
prefix := "/vmalert/"
|
||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
||||
prefix = ""
|
||||
}
|
||||
prefix := utils.Prefix(r.URL.Path)
|
||||
%}
|
||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||
<div class="container-fluid">
|
||||
|
@ -107,5 +104,30 @@ type NavItem struct {
|
|||
{% endfor %}
|
||||
</ul>
|
||||
</div>
|
||||
{%= errorIcon(userErr) %}
|
||||
</nav>
|
||||
{% endfunc %}
|
||||
|
||||
{% func errorIcon(err error) %}
|
||||
{% if err != nil %}
|
||||
<div class="d-flex" data-bs-toggle="tooltip" data-bs-placement="left" title="Configuration file failed to reload! Click to see more details.">
|
||||
<a type="button" data-bs-toggle="collapse" href="#reload-groups-error">
|
||||
<span class="text-danger">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-exclamation-triangle-fill" viewBox="0 0 16 16">
|
||||
<path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/>
|
||||
</svg>
|
||||
</span>
|
||||
</a>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func errorBody(err error) %}
|
||||
{% if err != nil %}
|
||||
<div class="collapse mt-2 mb-2" id="reload-groups-error">
|
||||
<div class="card card-body">
|
||||
{%s err.Error() %}
|
||||
</div>
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endfunc %}
|
||||
|
|
|
@ -9,52 +9,51 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
//line app/vmalert/tpl/header.qtpl:9
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
//line app/vmalert/tpl/header.qtpl:9
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
func StreamHeader(qw422016 *qt422016.Writer, r *http.Request, navItems []NavItem, title string) {
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
//line app/vmalert/tpl/header.qtpl:9
|
||||
func StreamHeader(qw422016 *qt422016.Writer, r *http.Request, navItems []NavItem, title string, userErr error) {
|
||||
//line app/vmalert/tpl/header.qtpl:9
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:11
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
prefix := utils.Prefix(r.URL.Path)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:11
|
||||
//line app/vmalert/tpl/header.qtpl:10
|
||||
qw422016.N().S(`
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>vmalert`)
|
||||
//line app/vmalert/tpl/header.qtpl:15
|
||||
//line app/vmalert/tpl/header.qtpl:14
|
||||
if title != "" {
|
||||
//line app/vmalert/tpl/header.qtpl:15
|
||||
//line app/vmalert/tpl/header.qtpl:14
|
||||
qw422016.N().S(` - `)
|
||||
//line app/vmalert/tpl/header.qtpl:15
|
||||
//line app/vmalert/tpl/header.qtpl:14
|
||||
qw422016.E().S(title)
|
||||
//line app/vmalert/tpl/header.qtpl:15
|
||||
//line app/vmalert/tpl/header.qtpl:14
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:15
|
||||
//line app/vmalert/tpl/header.qtpl:14
|
||||
qw422016.N().S(`</title>
|
||||
<link href="`)
|
||||
//line app/vmalert/tpl/header.qtpl:16
|
||||
//line app/vmalert/tpl/header.qtpl:15
|
||||
qw422016.E().S(prefix)
|
||||
//line app/vmalert/tpl/header.qtpl:16
|
||||
//line app/vmalert/tpl/header.qtpl:15
|
||||
qw422016.N().S(`static/css/bootstrap.min.css" rel="stylesheet" />
|
||||
<style>
|
||||
body{
|
||||
|
@ -114,32 +113,37 @@ func StreamHeader(qw422016 *qt422016.Writer, r *http.Request, navItems []NavItem
|
|||
</head>
|
||||
<body>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:74
|
||||
streamprintNavItems(qw422016, r, title, navItems)
|
||||
//line app/vmalert/tpl/header.qtpl:74
|
||||
//line app/vmalert/tpl/header.qtpl:73
|
||||
streamprintNavItems(qw422016, r, title, navItems, userErr)
|
||||
//line app/vmalert/tpl/header.qtpl:73
|
||||
qw422016.N().S(`
|
||||
<main class="px-2">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:75
|
||||
streamerrorBody(qw422016, userErr)
|
||||
//line app/vmalert/tpl/header.qtpl:75
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
func WriteHeader(qq422016 qtio422016.Writer, r *http.Request, navItems []NavItem, title string) {
|
||||
func WriteHeader(qq422016 qtio422016.Writer, r *http.Request, navItems []NavItem, title string, userErr error) {
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
StreamHeader(qw422016, r, navItems, title)
|
||||
StreamHeader(qw422016, r, navItems, title, userErr)
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
func Header(r *http.Request, navItems []NavItem, title string) string {
|
||||
func Header(r *http.Request, navItems []NavItem, title string, userErr error) string {
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
WriteHeader(qb422016, r, navItems, title)
|
||||
WriteHeader(qb422016, r, navItems, title, userErr)
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/header.qtpl:76
|
||||
|
@ -156,97 +160,205 @@ type NavItem struct {
|
|||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:86
|
||||
func streamprintNavItems(qw422016 *qt422016.Writer, r *http.Request, current string, items []NavItem) {
|
||||
func streamprintNavItems(qw422016 *qt422016.Writer, r *http.Request, current string, items []NavItem, userErr error) {
|
||||
//line app/vmalert/tpl/header.qtpl:86
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:88
|
||||
prefix := "/vmalert/"
|
||||
if strings.HasPrefix(r.URL.Path, prefix) {
|
||||
prefix = ""
|
||||
}
|
||||
prefix := utils.Prefix(r.URL.Path)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:92
|
||||
//line app/vmalert/tpl/header.qtpl:89
|
||||
qw422016.N().S(`
|
||||
<nav class="navbar navbar-expand-md navbar-dark fixed-top bg-dark">
|
||||
<div class="container-fluid">
|
||||
<div class="collapse navbar-collapse" id="navbarCollapse">
|
||||
<ul class="navbar-nav me-auto mb-2 mb-md-0">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:97
|
||||
//line app/vmalert/tpl/header.qtpl:94
|
||||
for _, item := range items {
|
||||
//line app/vmalert/tpl/header.qtpl:97
|
||||
//line app/vmalert/tpl/header.qtpl:94
|
||||
qw422016.N().S(`
|
||||
<li class="nav-item">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
//line app/vmalert/tpl/header.qtpl:97
|
||||
u, _ := url.Parse(item.Url)
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:101
|
||||
//line app/vmalert/tpl/header.qtpl:98
|
||||
qw422016.N().S(`
|
||||
<a class="nav-link`)
|
||||
//line app/vmalert/tpl/header.qtpl:102
|
||||
//line app/vmalert/tpl/header.qtpl:99
|
||||
if current == item.Name {
|
||||
//line app/vmalert/tpl/header.qtpl:102
|
||||
//line app/vmalert/tpl/header.qtpl:99
|
||||
qw422016.N().S(` active`)
|
||||
//line app/vmalert/tpl/header.qtpl:102
|
||||
//line app/vmalert/tpl/header.qtpl:99
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:102
|
||||
//line app/vmalert/tpl/header.qtpl:99
|
||||
qw422016.N().S(`"
|
||||
href="`)
|
||||
//line app/vmalert/tpl/header.qtpl:103
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
if u.IsAbs() {
|
||||
//line app/vmalert/tpl/header.qtpl:103
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
qw422016.E().S(item.Url)
|
||||
//line app/vmalert/tpl/header.qtpl:103
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
} else {
|
||||
//line app/vmalert/tpl/header.qtpl:103
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
qw422016.E().S(path.Join(prefix, item.Url))
|
||||
//line app/vmalert/tpl/header.qtpl:103
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:103
|
||||
//line app/vmalert/tpl/header.qtpl:100
|
||||
qw422016.N().S(`">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:104
|
||||
//line app/vmalert/tpl/header.qtpl:101
|
||||
qw422016.E().S(item.Name)
|
||||
//line app/vmalert/tpl/header.qtpl:104
|
||||
//line app/vmalert/tpl/header.qtpl:101
|
||||
qw422016.N().S(`
|
||||
</a>
|
||||
</li>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:107
|
||||
//line app/vmalert/tpl/header.qtpl:104
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:107
|
||||
//line app/vmalert/tpl/header.qtpl:104
|
||||
qw422016.N().S(`
|
||||
</ul>
|
||||
</div>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:107
|
||||
streamerrorIcon(qw422016, userErr)
|
||||
//line app/vmalert/tpl/header.qtpl:107
|
||||
qw422016.N().S(`
|
||||
</nav>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
func writeprintNavItems(qq422016 qtio422016.Writer, r *http.Request, current string, items []NavItem) {
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
func writeprintNavItems(qq422016 qtio422016.Writer, r *http.Request, current string, items []NavItem, userErr error) {
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
streamprintNavItems(qw422016, r, current, items)
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
streamprintNavItems(qw422016, r, current, items, userErr)
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
func printNavItems(r *http.Request, current string, items []NavItem, userErr error) string {
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
writeprintNavItems(qb422016, r, current, items, userErr)
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/header.qtpl:109
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
func printNavItems(r *http.Request, current string, items []NavItem) string {
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
writeprintNavItems(qb422016, r, current, items)
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
return qs422016
|
||||
func streamerrorIcon(qw422016 *qt422016.Writer, err error) {
|
||||
//line app/vmalert/tpl/header.qtpl:111
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:112
|
||||
if err != nil {
|
||||
//line app/vmalert/tpl/header.qtpl:112
|
||||
qw422016.N().S(`
|
||||
<div class="d-flex" data-bs-toggle="tooltip" data-bs-placement="left" title="Configuration file failed to reload! Click to see more details.">
|
||||
<a type="button" data-bs-toggle="collapse" href="#reload-groups-error">
|
||||
<span class="text-danger">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="currentColor" class="bi bi-exclamation-triangle-fill" viewBox="0 0 16 16">
|
||||
<path d="M8.982 1.566a1.13 1.13 0 0 0-1.96 0L.165 13.233c-.457.778.091 1.767.98 1.767h13.713c.889 0 1.438-.99.98-1.767L8.982 1.566zM8 5c.535 0 .954.462.9.995l-.35 3.507a.552.552 0 0 1-1.1 0L7.1 5.995A.905.905 0 0 1 8 5zm.002 6a1 1 0 1 1 0 2 1 1 0 0 1 0-2z"/>
|
||||
</svg>
|
||||
</span>
|
||||
</a>
|
||||
</div>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:122
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:122
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
func writeerrorIcon(qq422016 qtio422016.Writer, err error) {
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
streamerrorIcon(qw422016, err)
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
func errorIcon(err error) string {
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
writeerrorIcon(qb422016, err)
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/header.qtpl:123
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:125
|
||||
func streamerrorBody(qw422016 *qt422016.Writer, err error) {
|
||||
//line app/vmalert/tpl/header.qtpl:125
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:126
|
||||
if err != nil {
|
||||
//line app/vmalert/tpl/header.qtpl:126
|
||||
qw422016.N().S(`
|
||||
<div class="collapse mt-2 mb-2" id="reload-groups-error">
|
||||
<div class="card card-body">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:129
|
||||
qw422016.E().S(err.Error())
|
||||
//line app/vmalert/tpl/header.qtpl:129
|
||||
qw422016.N().S(`
|
||||
</div>
|
||||
</div>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:132
|
||||
}
|
||||
//line app/vmalert/tpl/header.qtpl:132
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
func writeerrorBody(qq422016 qtio422016.Writer, err error) {
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
streamerrorBody(qw422016, err)
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
func errorBody(err error) string {
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
writeerrorBody(qb422016, err)
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/header.qtpl:133
|
||||
}
|
||||
|
|
|
@ -1,13 +1,24 @@
|
|||
package utils
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
)
|
||||
|
||||
const prefix = "/vmalert/"
|
||||
|
||||
// Prefix returns "/vmalert/" prefix if it is missing in the path.
|
||||
func Prefix(path string) string {
|
||||
pp := httpserver.GetPathPrefix()
|
||||
path = strings.TrimLeft(path, pp)
|
||||
if strings.HasPrefix(path, prefix) {
|
||||
return ""
|
||||
return pp
|
||||
}
|
||||
return prefix
|
||||
res, err := url.JoinPath(pp, prefix)
|
||||
if err != nil {
|
||||
return path
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
|
||||
{% func Welcome(r *http.Request) %}
|
||||
{%= tpl.Header(r, navItems, "vmalert") %}
|
||||
{%= tpl.Header(r, navItems, "vmalert", configError()) %}
|
||||
<p>
|
||||
API:<br>
|
||||
{% for _, p := range apiLinks %}
|
||||
|
@ -40,7 +40,7 @@ btn-primary
|
|||
|
||||
{% func ListGroups(r *http.Request, originGroups []APIGroup) %}
|
||||
{%code prefix := utils.Prefix(r.URL.Path) %}
|
||||
{%= tpl.Header(r, navItems, "Groups") %}
|
||||
{%= tpl.Header(r, navItems, "Groups", configError()) %}
|
||||
{%code
|
||||
filter := r.URL.Query().Get("filter")
|
||||
rOk := make(map[string]int)
|
||||
|
@ -164,7 +164,7 @@ btn-primary
|
|||
|
||||
{% func ListAlerts(r *http.Request, groupAlerts []GroupAlerts) %}
|
||||
{%code prefix := utils.Prefix(r.URL.Path) %}
|
||||
{%= tpl.Header(r, navItems, "Alerts") %}
|
||||
{%= tpl.Header(r, navItems, "Alerts", configError()) %}
|
||||
{% if len(groupAlerts) > 0 %}
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
|
@ -250,7 +250,7 @@ btn-primary
|
|||
{% endfunc %}
|
||||
|
||||
{% func ListTargets(r *http.Request, targets map[notifier.TargetType][]notifier.Target) %}
|
||||
{%= tpl.Header(r, navItems, "Notifiers") %}
|
||||
{%= tpl.Header(r, navItems, "Notifiers", configError()) %}
|
||||
{% if len(targets) > 0 %}
|
||||
<a class="btn btn-primary" role="button" onclick="collapseAll()">Collapse All</a>
|
||||
<a class="btn btn-primary" role="button" onclick="expandAll()">Expand All</a>
|
||||
|
@ -307,7 +307,7 @@ btn-primary
|
|||
|
||||
{% func Alert(r *http.Request, alert *APIAlert) %}
|
||||
{%code prefix := utils.Prefix(r.URL.Path) %}
|
||||
{%= tpl.Header(r, navItems, "") %}
|
||||
{%= tpl.Header(r, navItems, "", configError()) %}
|
||||
{%code
|
||||
var labelKeys []string
|
||||
for k := range alert.Labels {
|
||||
|
@ -394,7 +394,7 @@ btn-primary
|
|||
|
||||
{% func RuleDetails(r *http.Request, rule APIRule) %}
|
||||
{%code prefix := utils.Prefix(r.URL.Path) %}
|
||||
{%= tpl.Header(r, navItems, "") %}
|
||||
{%= tpl.Header(r, navItems, "", configError()) %}
|
||||
{%code
|
||||
var labelKeys []string
|
||||
for k := range rule.Labels {
|
||||
|
@ -565,7 +565,7 @@ btn-primary
|
|||
{% if isNoMatch(r) %}
|
||||
<svg xmlns="http://www.w3.org/2000/svg"
|
||||
data-bs-toggle="tooltip"
|
||||
title="No match! This rule last evaluation hasn't selected any time series from the datasource.
|
||||
title="No match! This rule's last evaluation hasn't selected any time series from the datasource.
|
||||
It might be that either this data is missing in the datasource or there is a typo in rule's expression.
|
||||
See more in Details."
|
||||
width="18" height="18" fill="currentColor" class="bi bi-exclamation-triangle-fill flex-shrink-0 me-2" viewBox="0 0 16 16" role="img" aria-label="Warning:">
|
||||
|
@ -578,4 +578,4 @@ btn-primary
|
|||
func isNoMatch (r APIRule) bool {
|
||||
return r.LastSamples == 0 && r.LastSeriesFetched != nil && *r.LastSeriesFetched == 0
|
||||
}
|
||||
%}
|
||||
%}
|
||||
|
|
|
@ -34,7 +34,7 @@ func StreamWelcome(qw422016 *qt422016.Writer, r *http.Request) {
|
|||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:15
|
||||
tpl.StreamHeader(qw422016, r, navItems, "vmalert")
|
||||
tpl.StreamHeader(qw422016, r, navItems, "vmalert", configError())
|
||||
//line app/vmalert/web.qtpl:15
|
||||
qw422016.N().S(`
|
||||
<p>
|
||||
|
@ -207,7 +207,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, r *http.Request, originGroups [
|
|||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:43
|
||||
tpl.StreamHeader(qw422016, r, navItems, "Groups")
|
||||
tpl.StreamHeader(qw422016, r, navItems, "Groups", configError())
|
||||
//line app/vmalert/web.qtpl:43
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
|
@ -619,7 +619,7 @@ func StreamListAlerts(qw422016 *qt422016.Writer, r *http.Request, groupAlerts []
|
|||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:167
|
||||
tpl.StreamHeader(qw422016, r, navItems, "Alerts")
|
||||
tpl.StreamHeader(qw422016, r, navItems, "Alerts", configError())
|
||||
//line app/vmalert/web.qtpl:167
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
|
@ -885,7 +885,7 @@ func StreamListTargets(qw422016 *qt422016.Writer, r *http.Request, targets map[n
|
|||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:253
|
||||
tpl.StreamHeader(qw422016, r, navItems, "Notifiers")
|
||||
tpl.StreamHeader(qw422016, r, navItems, "Notifiers", configError())
|
||||
//line app/vmalert/web.qtpl:253
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
|
@ -1065,7 +1065,7 @@ func StreamAlert(qw422016 *qt422016.Writer, r *http.Request, alert *APIAlert) {
|
|||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:310
|
||||
tpl.StreamHeader(qw422016, r, navItems, "")
|
||||
tpl.StreamHeader(qw422016, r, navItems, "", configError())
|
||||
//line app/vmalert/web.qtpl:310
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
|
@ -1274,7 +1274,7 @@ func StreamRuleDetails(qw422016 *qt422016.Writer, r *http.Request, rule APIRule)
|
|||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmalert/web.qtpl:397
|
||||
tpl.StreamHeader(qw422016, r, navItems, "")
|
||||
tpl.StreamHeader(qw422016, r, navItems, "", configError())
|
||||
//line app/vmalert/web.qtpl:397
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
|
@ -1759,7 +1759,7 @@ func streamseriesFetchedWarn(qw422016 *qt422016.Writer, r APIRule) {
|
|||
qw422016.N().S(`
|
||||
<svg xmlns="http://www.w3.org/2000/svg"
|
||||
data-bs-toggle="tooltip"
|
||||
title="No match! This rule last evaluation hasn't selected any time series from the datasource.
|
||||
title="No match! This rule's last evaluation hasn't selected any time series from the datasource.
|
||||
It might be that either this data is missing in the datasource or there is a typo in rule's expression.
|
||||
See more in Details."
|
||||
width="18" height="18" fill="currentColor" class="bi bi-exclamation-triangle-fill flex-shrink-0 me-2" viewBox="0 0 16 16" role="img" aria-label="Warning:">
|
||||
|
|
|
@ -165,8 +165,8 @@ func TestHandler(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEmptyResponse(t *testing.T) {
|
||||
rh := &requestHandler{m: &manager{groups: make(map[uint64]*Group)}}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rh.handler(w, r) }))
|
||||
rhWithNoGroups := &requestHandler{m: &manager{groups: make(map[uint64]*Group)}}
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rhWithNoGroups.handler(w, r) }))
|
||||
defer ts.Close()
|
||||
|
||||
getResp := func(url string, to interface{}, code int) {
|
||||
|
@ -190,7 +190,7 @@ func TestEmptyResponse(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
t.Run("/api/v1/alerts", func(t *testing.T) {
|
||||
t.Run("no groups /api/v1/alerts", func(t *testing.T) {
|
||||
lr := listAlertsResponse{}
|
||||
getResp(ts.URL+"/api/v1/alerts", &lr, 200)
|
||||
if lr.Data.Alerts == nil {
|
||||
|
@ -204,7 +204,7 @@ func TestEmptyResponse(t *testing.T) {
|
|||
}
|
||||
})
|
||||
|
||||
t.Run("/api/v1/rules", func(t *testing.T) {
|
||||
t.Run("no groups /api/v1/rules", func(t *testing.T) {
|
||||
lr := listGroupsResponse{}
|
||||
getResp(ts.URL+"/api/v1/rules", &lr, 200)
|
||||
if lr.Data.Groups == nil {
|
||||
|
@ -217,4 +217,26 @@ func TestEmptyResponse(t *testing.T) {
|
|||
t.Errorf("expected /api/v1/rules response to have non-nil data")
|
||||
}
|
||||
})
|
||||
|
||||
rhWithEmptyGroup := &requestHandler{m: &manager{groups: map[uint64]*Group{0: {Name: "test"}}}}
|
||||
ts.Config.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rhWithEmptyGroup.handler(w, r) })
|
||||
|
||||
t.Run("empty group /api/v1/rules", func(t *testing.T) {
|
||||
lr := listGroupsResponse{}
|
||||
getResp(ts.URL+"/api/v1/rules", &lr, 200)
|
||||
if lr.Data.Groups == nil {
|
||||
t.Fatalf("expected /api/v1/rules response to have non-nil data")
|
||||
}
|
||||
|
||||
lr = listGroupsResponse{}
|
||||
getResp(ts.URL+"/vmalert/api/v1/rules", &lr, 200)
|
||||
if lr.Data.Groups == nil {
|
||||
t.Fatalf("expected /api/v1/rules response to have non-nil data")
|
||||
}
|
||||
|
||||
group := lr.Data.Groups[0]
|
||||
if group.Rules == nil {
|
||||
t.Fatalf("expected /api/v1/rules response to have non-nil rules for group")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ ip_filters:
|
|||
deny_list: [10.0.0.42]
|
||||
```
|
||||
|
||||
The following config allows requests for the user 'foobar' only from the ip `127.0.0.1`:
|
||||
The following config allows requests for the user 'foobar' only from the IP `127.0.0.1`:
|
||||
|
||||
```yml
|
||||
users:
|
||||
|
@ -96,6 +96,8 @@ users:
|
|||
allow_list: [127.0.0.1]
|
||||
```
|
||||
|
||||
See config example of using IP filters [here](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmauth/example_config_ent.yml).
|
||||
|
||||
## Auth config
|
||||
|
||||
`-auth.config` is represented in the following simple `yml` format:
|
||||
|
@ -249,7 +251,12 @@ It is recommended protecting the following endpoints with authKeys:
|
|||
`vmauth` exports various metrics in Prometheus exposition format at `http://vmauth-host:8427/metrics` page. It is recommended setting up regular scraping of this page
|
||||
either via [vmagent](https://docs.victoriametrics.com/vmagent.html) or via Prometheus, so the exported metrics could be analyzed later.
|
||||
|
||||
`vmauth` exports `vmauth_user_requests_total` metric with `username` label. The `username` label value equals to `username` field value set in the `-auth.config` file. It is possible to override or hide the value in the label by specifying `name` field. For example, the following config will result in `vmauth_user_requests_total{username="foobar"}` instead of `vmauth_user_requests_total{username="secret_user"}`:
|
||||
`vmauth` exports `vmauth_user_requests_total` [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) metric
|
||||
and `vmauth_user_request_duration_seconds_*` [summary](https://docs.victoriametrics.com/keyConcepts.html#summary) metric
|
||||
with `username` label. The `username` label value equals to `username` field value set in the `-auth.config` file.
|
||||
It is possible to override or hide the value in the label by specifying `name` field.
|
||||
For example, the following config will result in `vmauth_user_requests_total{username="foobar"}`
|
||||
instead of `vmauth_user_requests_total{username="secret_user"}`:
|
||||
|
||||
```yml
|
||||
users:
|
||||
|
@ -258,7 +265,10 @@ users:
|
|||
# other config options here
|
||||
```
|
||||
|
||||
For unauthorized users `vmauth` exports `vmauth_unauthorized_user_requests_total` metric without label (if `unauthorized_user` section of config is used).
|
||||
For unauthorized users `vmauth` exports `vmauth_unauthorized_user_requests_total`
|
||||
[counter](https://docs.victoriametrics.com/keyConcepts.html#counter) metric and
|
||||
`vmauth_unauthorized_user_request_duration_seconds_*` [summary](https://docs.victoriametrics.com/keyConcepts.html#summary)
|
||||
metric without label (if `unauthorized_user` section of config is used).
|
||||
|
||||
## How to build from sources
|
||||
|
||||
|
@ -332,7 +342,7 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
-auth.config string
|
||||
Path to auth config. It can point either to local file or to http url. See https://docs.victoriametrics.com/vmauth.html for details on the format of this auth config
|
||||
-configCheckInterval duration
|
||||
Interval for config file re-read. Zero value disables config re-reading. By default, refreshing is disabled, send SIGHUP for config refresh.
|
||||
interval for config file re-read. Zero value disables config re-reading. By default, refreshing is disabled, send SIGHUP for config refresh.
|
||||
-enableTCP6
|
||||
Whether to enable IPv6 for listening and dialing. By default, only IPv4 TCP and UDP is used
|
||||
-envflag.enable
|
||||
|
@ -364,9 +374,13 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
-httpListenAddr string
|
||||
TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol (default ":8427")
|
||||
-httpListenAddr.useProxyProtocol
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing
|
||||
-internStringCacheExpireDuration duration
|
||||
The expiry duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
|
||||
-internStringDisableCache
|
||||
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
|
||||
-internStringMaxLen int
|
||||
The maximum length for strings to intern. Lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning (default 500)
|
||||
The maximum length for strings to intern. Lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
|
||||
-logInvalidAuthTokens
|
||||
Whether to log requests with invalid auth tokens. Such requests are always counted at vmauth_http_request_errors_total{reason="invalid_auth_token"} metric, which is exposed at /metrics page
|
||||
-loggerDisableTimestamps
|
||||
|
@ -406,7 +420,7 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
-pushmetrics.url array
|
||||
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default metrics exposed at /metrics page aren't pushed to any remote storage
|
||||
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-reloadAuthKey string
|
||||
Auth key for /-/reload http endpoint. It must be passed as authKey=...
|
||||
|
|
|
@ -50,7 +50,8 @@ type UserInfo struct {
|
|||
concurrencyLimitCh chan struct{}
|
||||
concurrencyLimitReached *metrics.Counter
|
||||
|
||||
requests *metrics.Counter
|
||||
requests *metrics.Counter
|
||||
requestsDuration *metrics.Summary
|
||||
}
|
||||
|
||||
func (ui *UserInfo) beginConcurrencyLimit() error {
|
||||
|
@ -378,6 +379,7 @@ func parseAuthConfig(data []byte) (*AuthConfig, error) {
|
|||
ui := ac.UnauthorizedUser
|
||||
if ui != nil {
|
||||
ui.requests = metrics.GetOrCreateCounter(`vmauth_unauthorized_user_requests_total`)
|
||||
ui.requestsDuration = metrics.GetOrCreateSummary(`vmauth_unauthorized_user_request_duration_seconds`)
|
||||
ui.concurrencyLimitCh = make(chan struct{}, ui.getMaxConcurrentRequests())
|
||||
ui.concurrencyLimitReached = metrics.GetOrCreateCounter(`vmauth_unauthorized_user_concurrent_requests_limit_reached_total`)
|
||||
_ = metrics.GetOrCreateGauge(`vmauth_unauthorized_user_concurrent_requests_capacity`, func() float64 {
|
||||
|
@ -441,9 +443,11 @@ func parseAuthConfigUsers(ac *AuthConfig) (map[string]*UserInfo, error) {
|
|||
return nil, fmt.Errorf("password shouldn't be set for bearer_token %q", ui.BearerToken)
|
||||
}
|
||||
ui.requests = metrics.GetOrCreateCounter(fmt.Sprintf(`vmauth_user_requests_total{username=%q}`, name))
|
||||
ui.requestsDuration = metrics.GetOrCreateSummary(fmt.Sprintf(`vmauth_user_request_duration_seconds{username=%q}`, name))
|
||||
}
|
||||
if ui.Username != "" {
|
||||
ui.requests = metrics.GetOrCreateCounter(fmt.Sprintf(`vmauth_user_requests_total{username=%q}`, name))
|
||||
ui.requestsDuration = metrics.GetOrCreateSummary(fmt.Sprintf(`vmauth_user_request_duration_seconds{username=%q}`, name))
|
||||
}
|
||||
mcr := ui.getMaxConcurrentRequests()
|
||||
ui.concurrencyLimitCh = make(chan struct{}, mcr)
|
||||
|
|
|
@ -89,8 +89,6 @@ users:
|
|||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
headers:
|
||||
- "X-Scope-OrgID: abc"
|
||||
ip_filters:
|
||||
deny_list: [127.0.0.1]
|
||||
default_url:
|
||||
- "http://default1:8888/unsupported_url_handler"
|
||||
- "http://default2:8888/unsupported_url_handler"
|
||||
|
@ -104,10 +102,3 @@ unauthorized_user:
|
|||
url_prefix:
|
||||
- http://vmselect1:8481/select/0/prometheus
|
||||
- http://vmselect2:8481/select/0/prometheus
|
||||
ip_filters:
|
||||
allow_list: [8.8.8.8]
|
||||
|
||||
ip_filters:
|
||||
allow_list: ["1.2.3.0/24", "127.0.0.1"]
|
||||
deny_list:
|
||||
- 10.1.0.1
|
||||
|
|
57
app/vmauth/example_config_ent.yml
Normal file
57
app/vmauth/example_config_ent.yml
Normal file
|
@ -0,0 +1,57 @@
|
|||
# Arbitrary number of usernames may be put here.
|
||||
# It is possible to set multiple identical usernames with different passwords.
|
||||
# Such usernames can be differentiated by `name` option.
|
||||
|
||||
users:
|
||||
# A single user for querying and inserting data:
|
||||
#
|
||||
# - Requests to http://vmauth:8427/api/v1/query, http://vmauth:8427/api/v1/query_range
|
||||
# and http://vmauth:8427/api/v1/label/<label_name>/values are proxied to the following urls in a round-robin manner:
|
||||
# - http://vmselect1:8481/select/42/prometheus
|
||||
# - http://vmselect2:8481/select/42/prometheus
|
||||
# For example, http://vmauth:8427/api/v1/query is proxied to http://vmselect1:8480/select/42/prometheus/api/v1/query
|
||||
# or to http://vmselect2:8480/select/42/prometheus/api/v1/query .
|
||||
#
|
||||
# - Requests to http://vmauth:8427/api/v1/write are proxied to http://vminsert:8480/insert/42/prometheus/api/v1/write .
|
||||
# The "X-Scope-OrgID: abc" http header is added to these requests.
|
||||
#
|
||||
# Request which do not match `src_paths` from the `url_map` are proxied to the urls from `default_url`
|
||||
# in a round-robin manner. The original request path is passed in `request_path` query arg.
|
||||
# For example, request to http://vmauth:8427/non/existing/path are proxied:
|
||||
# - to http://default1:8888/unsupported_url_handler?request_path=/non/existing/path
|
||||
# - or http://default2:8888/unsupported_url_handler?request_path=/non/existing/path
|
||||
- username: "foobar"
|
||||
url_map:
|
||||
- src_paths:
|
||||
- "/api/v1/query"
|
||||
- "/api/v1/query_range"
|
||||
- "/api/v1/label/[^/]+/values"
|
||||
url_prefix:
|
||||
- "http://vmselect1:8481/select/42/prometheus"
|
||||
- "http://vmselect2:8481/select/42/prometheus"
|
||||
- src_paths: ["/api/v1/write"]
|
||||
url_prefix: "http://vminsert:8480/insert/42/prometheus"
|
||||
headers:
|
||||
- "X-Scope-OrgID: abc"
|
||||
ip_filters:
|
||||
deny_list: [127.0.0.1]
|
||||
default_url:
|
||||
- "http://default1:8888/unsupported_url_handler"
|
||||
- "http://default2:8888/unsupported_url_handler"
|
||||
|
||||
# Requests without Authorization header are routed according to `unauthorized_user` section.
|
||||
unauthorized_user:
|
||||
url_map:
|
||||
- src_paths:
|
||||
- /api/v1/query
|
||||
- /api/v1/query_range
|
||||
url_prefix:
|
||||
- http://vmselect1:8481/select/0/prometheus
|
||||
- http://vmselect2:8481/select/0/prometheus
|
||||
ip_filters:
|
||||
allow_list: [8.8.8.8]
|
||||
|
||||
ip_filters:
|
||||
allow_list: ["1.2.3.0/24", "127.0.0.1"]
|
||||
deny_list:
|
||||
- 10.1.0.1
|
|
@ -123,6 +123,9 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
|
||||
func processUserRequest(w http.ResponseWriter, r *http.Request, ui *UserInfo) {
|
||||
startTime := time.Now()
|
||||
defer ui.requestsDuration.UpdateDuration(startTime)
|
||||
|
||||
ui.requests.Inc()
|
||||
|
||||
// Limit the concurrency of requests to backends
|
||||
|
|
|
@ -219,6 +219,12 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password
|
||||
-httpListenAddr string
|
||||
TCP address for exporting metrics at /metrics page (default ":8420")
|
||||
-internStringCacheExpireDuration duration
|
||||
The expiry duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
|
||||
-internStringDisableCache
|
||||
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
|
||||
-internStringMaxLen int
|
||||
The maximum length for strings to intern. Lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
|
||||
-loggerDisableTimestamps
|
||||
Whether to disable writing timestamps in logs
|
||||
-loggerErrorsPerSecondLimit int
|
||||
|
@ -257,8 +263,15 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
-pushmetrics.url array
|
||||
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-s2a_enable_appengine_dialer
|
||||
If true, opportunistically use AppEngine-specific dialer to call S2A.
|
||||
-s2a_timeout duration
|
||||
Timeout enforced on the connection to the S2A service for handshake. (default 3s)
|
||||
-s3ForcePathStyle
|
||||
Prefixing endpoint with bucket name when set false, true by default. (default true)
|
||||
-s3StorageClass string
|
||||
The Storage Class applied to objects uploaded to AWS S3. Supported values are: GLACIER, DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, REDUCED_REDUNDANCY, STANDARD, STANDARD_IA.
|
||||
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html/
|
||||
-snapshot.createURL string
|
||||
VictoriaMetrics create snapshot url. When this is given a snapshot will automatically be created during backup. Example: http://victoriametrics:8428/snapshot/create . There is no need in setting -snapshotName if -snapshot.createURL is set
|
||||
-snapshot.deleteURL string
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
## vmbackupmanager
|
||||
# vmbackupmanager
|
||||
|
||||
***vmbackupmanager is a part of [enterprise package](https://docs.victoriametrics.com/enterprise.html). It is available for download and evaluation at [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases)***
|
||||
|
||||
|
@ -104,11 +104,11 @@ The result on the GCS bucket
|
|||
|
||||
* The root folder
|
||||
|
||||
![root](vmbackupmanager_root_folder.png)
|
||||
<img alt="root folder" src="vmbackupmanager_root_folder.png">
|
||||
|
||||
* The latest folder
|
||||
|
||||
![latest](vmbackupmanager_latest_folder.png)
|
||||
<img alt="latest folder" src="vmbackupmanager_latest_folder.png">
|
||||
|
||||
## Backup Retention Policy
|
||||
|
||||
|
@ -123,7 +123,7 @@ Backup retention policy is controlled by:
|
|||
|
||||
Let’s assume we have a backup manager collecting daily backups for the past 10 days.
|
||||
|
||||
![daily](vmbackupmanager_rp_daily_1.png)
|
||||
<img alt="retention policy daily before retention cycle" src="vmbackupmanager_rp_daily_1.png">
|
||||
|
||||
We enable backup retention policy for backup manager by using following configuration:
|
||||
|
||||
|
@ -148,7 +148,7 @@ info app/vmbackupmanager/retention.go:106 daily backups to delete [daily/2
|
|||
|
||||
The result on the GCS bucket. We see only 3 daily backups:
|
||||
|
||||
![daily](vmbackupmanager_rp_daily_2.png)
|
||||
<img alt="retention policy daily after retention cycle" src="vmbackupmanager_rp_daily_2.png">
|
||||
|
||||
### Protection backups against deletion by retention policy
|
||||
|
||||
|
@ -452,6 +452,12 @@ command-line flags:
|
|||
Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password
|
||||
-httpListenAddr string
|
||||
Address to listen for http connections (default ":8300")
|
||||
-internStringCacheExpireDuration duration
|
||||
The expiry duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
|
||||
-internStringDisableCache
|
||||
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
|
||||
-internStringMaxLen int
|
||||
The maximum length for strings to intern. Lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
|
||||
-keepLastDaily int
|
||||
Keep last N daily backups. If 0 is specified next retention cycle removes all backups for given time period. (default -1)
|
||||
-keepLastHourly int
|
||||
|
@ -496,13 +502,20 @@ command-line flags:
|
|||
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-runOnStart
|
||||
Upload backups immediately after start of the service. Otherwise, the backup starts on new hour
|
||||
Upload backups immediately after start of the service. Otherwise the backup starts on new hour
|
||||
-s2a_enable_appengine_dialer
|
||||
If true, opportunistically use AppEngine-specific dialer to call S2A.
|
||||
-s2a_timeout duration
|
||||
Timeout enforced on the connection to the S2A service for handshake. (default 3s)
|
||||
-s3ForcePathStyle
|
||||
Prefixing endpoint with bucket name when set false, true by default. (default true)
|
||||
-s3StorageClass string
|
||||
The Storage Class applied to objects uploaded to AWS S3. Supported values are: GLACIER, DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, REDUCED_REDUNDANCY, STANDARD, STANDARD_IA.
|
||||
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html/
|
||||
-snapshot.createURL string
|
||||
VictoriaMetrics create snapshot url. When this is given a snapshot will automatically be created during backup.Example: http://victoriametrics:8428/snapshot/create
|
||||
-snapshot.deleteURL string
|
||||
VictoriaMetrics delete snapshot url. Optional. Will be generated from snapshot.createURL if not provided. All created snapshots will be automatically deleted.Example: http://victoriametrics:8428/snapshot/delete
|
||||
VictoriaMetrics delete snapshot url. Optional. Will be generated from snapshot.createURL if not provided. All created snaphosts will be automatically deleted.Example: http://victoriametrics:8428/snapshot/delete
|
||||
-storageDataPath string
|
||||
Path to VictoriaMetrics data. Must match -storageDataPath from VictoriaMetrics or vmstorage (default "victoria-metrics-data")
|
||||
-tls
|
||||
|
|
|
@ -11,9 +11,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
backoffRetries = 5
|
||||
backoffFactor = 1.7
|
||||
backoffMinDuration = time.Second
|
||||
backoffRetries = 10
|
||||
backoffFactor = 1.8
|
||||
backoffMinDuration = time.Second * 2
|
||||
)
|
||||
|
||||
// retryableFunc describes call back which will repeat on errors
|
||||
|
@ -42,7 +42,6 @@ func New() *Backoff {
|
|||
func (b *Backoff) Retry(ctx context.Context, cb retryableFunc) (uint64, error) {
|
||||
var attempt uint64
|
||||
for i := 0; i < b.retries; i++ {
|
||||
// @TODO we should use context to cancel retries
|
||||
err := cb()
|
||||
if err == nil {
|
||||
return attempt, nil
|
||||
|
@ -55,7 +54,19 @@ func (b *Backoff) Retry(ctx context.Context, cb retryableFunc) (uint64, error) {
|
|||
backoff := float64(b.minDuration) * math.Pow(b.factor, float64(i))
|
||||
dur := time.Duration(backoff)
|
||||
logger.Errorf("got error: %s on attempt: %d; will retry in %v", err, attempt, dur)
|
||||
time.Sleep(time.Duration(backoff))
|
||||
|
||||
t := time.NewTimer(dur)
|
||||
select {
|
||||
case <-t.C:
|
||||
// duration elapsed, loop
|
||||
case <-ctx.Done():
|
||||
// context cancelled, kill the timer if it hasn't fired, and return
|
||||
// the last error we got
|
||||
if !t.Stop() {
|
||||
<-t.C
|
||||
}
|
||||
return attempt, err
|
||||
}
|
||||
}
|
||||
return attempt, fmt.Errorf("execution failed after %d retry attempts", b.retries)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ func TestRetry_Do(t *testing.T) {
|
|||
backoffMinDuration time.Duration
|
||||
retryableFunc retryableFunc
|
||||
ctx context.Context
|
||||
withCancel bool
|
||||
cancelTimeout time.Duration
|
||||
want uint64
|
||||
wantErr bool
|
||||
}{
|
||||
|
@ -37,7 +37,7 @@ func TestRetry_Do(t *testing.T) {
|
|||
},
|
||||
ctx: context.Background(),
|
||||
want: 0,
|
||||
wantErr: false,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "only one retry test",
|
||||
|
@ -79,10 +79,33 @@ func TestRetry_Do(t *testing.T) {
|
|||
want: 5,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "cancel context",
|
||||
backoffRetries: 5,
|
||||
backoffFactor: 1.7,
|
||||
backoffMinDuration: time.Millisecond * 10,
|
||||
retryableFunc: func() error {
|
||||
t := time.NewTicker(time.Millisecond * 5)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
return fmt.Errorf("got some error")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
ctx: context.Background(),
|
||||
cancelTimeout: time.Millisecond * 40,
|
||||
want: 3,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := New()
|
||||
r := &Backoff{retries: tt.backoffRetries, factor: tt.backoffFactor, minDuration: tt.backoffMinDuration}
|
||||
if tt.cancelTimeout != 0 {
|
||||
newCtx, cancelFn := context.WithTimeout(tt.ctx, tt.cancelTimeout)
|
||||
tt.ctx = newCtx
|
||||
defer cancelFn()
|
||||
}
|
||||
got, err := r.Retry(tt.ctx, tt.retryableFunc)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("Retry() error = %v, wantErr %v", err, tt.wantErr)
|
||||
|
|
|
@ -3,7 +3,13 @@
|
|||
// altogether.
|
||||
package barpool
|
||||
|
||||
import "github.com/cheggaaa/pb/v3"
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/terminal"
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
)
|
||||
|
||||
var pool = pb.NewPool()
|
||||
|
||||
|
@ -20,7 +26,22 @@ func Stop() { _ = pool.Stop() }
|
|||
// AddWithTemplate adds bar with the given template
|
||||
// to the global pool
|
||||
func AddWithTemplate(format string, total int) *pb.ProgressBar {
|
||||
bar := pb.ProgressBarTemplate(format).New(total)
|
||||
tpl := getTemplate(format)
|
||||
bar := pb.ProgressBarTemplate(tpl).New(total)
|
||||
Add(bar)
|
||||
return bar
|
||||
}
|
||||
|
||||
// NewSingleProgress returns progress bar with given template
|
||||
func NewSingleProgress(format string, total int) *pb.ProgressBar {
|
||||
tpl := getTemplate(format)
|
||||
return pb.ProgressBarTemplate(tpl).New(total)
|
||||
}
|
||||
|
||||
func getTemplate(format string) string {
|
||||
isTerminal := terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||
if !isTerminal {
|
||||
format = fmt.Sprintf("%s\n", format)
|
||||
}
|
||||
return format
|
||||
}
|
||||
|
|
|
@ -18,9 +18,11 @@ type influxProcessor struct {
|
|||
separator string
|
||||
skipDbLabel bool
|
||||
promMode bool
|
||||
isSilent bool
|
||||
isVerbose bool
|
||||
}
|
||||
|
||||
func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator string, skipDbLabel bool, promMode bool) *influxProcessor {
|
||||
func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator string, skipDbLabel, promMode, silent, verbose bool) *influxProcessor {
|
||||
if cc < 1 {
|
||||
cc = 1
|
||||
}
|
||||
|
@ -31,10 +33,12 @@ func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator st
|
|||
separator: separator,
|
||||
skipDbLabel: skipDbLabel,
|
||||
promMode: promMode,
|
||||
isSilent: silent,
|
||||
isVerbose: verbose,
|
||||
}
|
||||
}
|
||||
|
||||
func (ip *influxProcessor) run(silent, verbose bool) error {
|
||||
func (ip *influxProcessor) run() error {
|
||||
series, err := ip.ic.Explore()
|
||||
if err != nil {
|
||||
return fmt.Errorf("explore query failed: %s", err)
|
||||
|
@ -44,7 +48,7 @@ func (ip *influxProcessor) run(silent, verbose bool) error {
|
|||
}
|
||||
|
||||
question := fmt.Sprintf("Found %d timeseries to import. Continue?", len(series))
|
||||
if !silent && !prompt(question) {
|
||||
if !ip.isSilent && !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -79,7 +83,7 @@ func (ip *influxProcessor) run(silent, verbose bool) error {
|
|||
case infErr := <-errCh:
|
||||
return fmt.Errorf("influx error: %s", infErr)
|
||||
case vmErr := <-ip.im.Errors():
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, ip.isVerbose))
|
||||
case seriesCh <- s:
|
||||
}
|
||||
}
|
||||
|
@ -91,7 +95,7 @@ func (ip *influxProcessor) run(silent, verbose bool) error {
|
|||
// drain import errors channel
|
||||
for vmErr := range ip.im.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, ip.isVerbose))
|
||||
}
|
||||
}
|
||||
for err := range errCh {
|
||||
|
|
|
@ -153,6 +153,10 @@ func (c *Client) Explore() ([]*Series, error) {
|
|||
return nil, fmt.Errorf("failed to get field keys: %s", err)
|
||||
}
|
||||
|
||||
if len(mFields) < 1 {
|
||||
return nil, fmt.Errorf("found no numeric fields for import in database %q", c.database)
|
||||
}
|
||||
|
||||
series, err := c.getSeries()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get series: %s", err)
|
||||
|
|
|
@ -16,7 +16,6 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/terminal"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||
|
@ -72,8 +71,8 @@ func main() {
|
|||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
}
|
||||
|
||||
otsdbProcessor := newOtsdbProcessor(otsdbClient, importer, c.Int(otsdbConcurrency))
|
||||
return otsdbProcessor.run(isNonInteractive(c), c.Bool(globalVerbose))
|
||||
otsdbProcessor := newOtsdbProcessor(otsdbClient, importer, c.Int(otsdbConcurrency), c.Bool(globalSilent), c.Bool(globalVerbose))
|
||||
return otsdbProcessor.run()
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -113,8 +112,10 @@ func main() {
|
|||
c.Int(influxConcurrency),
|
||||
c.String(influxMeasurementFieldSeparator),
|
||||
c.Bool(influxSkipDatabaseLabel),
|
||||
c.Bool(influxPrometheusMode))
|
||||
return processor.run(isNonInteractive(c), c.Bool(globalVerbose))
|
||||
c.Bool(influxPrometheusMode),
|
||||
c.Bool(globalSilent),
|
||||
c.Bool(globalVerbose))
|
||||
return processor.run()
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -152,9 +153,11 @@ func main() {
|
|||
timeEnd: c.Timestamp(remoteReadFilterTimeEnd),
|
||||
chunk: c.String(remoteReadStepInterval),
|
||||
},
|
||||
cc: c.Int(remoteReadConcurrency),
|
||||
cc: c.Int(remoteReadConcurrency),
|
||||
isSilent: c.Bool(globalSilent),
|
||||
isVerbose: c.Bool(globalVerbose),
|
||||
}
|
||||
return rmp.run(ctx, isNonInteractive(c), c.Bool(globalVerbose))
|
||||
return rmp.run(ctx)
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -188,7 +191,7 @@ func main() {
|
|||
im: importer,
|
||||
cc: c.Int(promConcurrency),
|
||||
}
|
||||
return pp.run(isNonInteractive(c), c.Bool(globalVerbose))
|
||||
return pp.run(c.Bool(globalSilent), c.Bool(globalVerbose))
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -250,8 +253,9 @@ func main() {
|
|||
backoff: backoff.New(),
|
||||
cc: c.Int(vmConcurrency),
|
||||
disableRetries: c.Bool(vmNativeDisableRetries),
|
||||
isSilent: c.Bool(globalSilent),
|
||||
}
|
||||
return p.run(ctx, isNonInteractive(c))
|
||||
return p.run(ctx)
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -324,8 +328,3 @@ func initConfigVM(c *cli.Context) vm.Config {
|
|||
DisableProgressBar: c.Bool(vmDisableProgressBar),
|
||||
}
|
||||
}
|
||||
|
||||
func isNonInteractive(c *cli.Context) bool {
|
||||
isTerminal := terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||
return c.Bool(globalSilent) || !isTerminal
|
||||
}
|
||||
|
|
|
@ -12,9 +12,11 @@ import (
|
|||
)
|
||||
|
||||
type otsdbProcessor struct {
|
||||
oc *opentsdb.Client
|
||||
im *vm.Importer
|
||||
otsdbcc int
|
||||
oc *opentsdb.Client
|
||||
im *vm.Importer
|
||||
otsdbcc int
|
||||
isSilent bool
|
||||
isVerbose bool
|
||||
}
|
||||
|
||||
type queryObj struct {
|
||||
|
@ -24,18 +26,20 @@ type queryObj struct {
|
|||
StartTime int64
|
||||
}
|
||||
|
||||
func newOtsdbProcessor(oc *opentsdb.Client, im *vm.Importer, otsdbcc int) *otsdbProcessor {
|
||||
func newOtsdbProcessor(oc *opentsdb.Client, im *vm.Importer, otsdbcc int, silent, verbose bool) *otsdbProcessor {
|
||||
if otsdbcc < 1 {
|
||||
otsdbcc = 1
|
||||
}
|
||||
return &otsdbProcessor{
|
||||
oc: oc,
|
||||
im: im,
|
||||
otsdbcc: otsdbcc,
|
||||
oc: oc,
|
||||
im: im,
|
||||
otsdbcc: otsdbcc,
|
||||
isSilent: silent,
|
||||
isVerbose: verbose,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *otsdbProcessor) run(silent, verbose bool) error {
|
||||
func (op *otsdbProcessor) run() error {
|
||||
log.Println("Loading all metrics from OpenTSDB for filters: ", op.oc.Filters)
|
||||
var metrics []string
|
||||
for _, filter := range op.oc.Filters {
|
||||
|
@ -51,7 +55,7 @@ func (op *otsdbProcessor) run(silent, verbose bool) error {
|
|||
}
|
||||
|
||||
question := fmt.Sprintf("Found %d metrics to import. Continue?", len(metrics))
|
||||
if !silent && !prompt(question) {
|
||||
if !op.isSilent && !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
op.im.ResetStats()
|
||||
|
@ -114,7 +118,7 @@ func (op *otsdbProcessor) run(silent, verbose bool) error {
|
|||
case otsdbErr := <-errCh:
|
||||
return fmt.Errorf("opentsdb error: %s", otsdbErr)
|
||||
case vmErr := <-op.im.Errors():
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, op.isVerbose))
|
||||
case seriesCh <- queryObj{
|
||||
Tr: tr, StartTime: startTime,
|
||||
Series: series, Rt: opentsdb.RetentionMeta{
|
||||
|
@ -138,7 +142,7 @@ func (op *otsdbProcessor) run(silent, verbose bool) error {
|
|||
op.im.Close()
|
||||
for vmErr := range op.im.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, op.isVerbose))
|
||||
}
|
||||
}
|
||||
log.Println("Import finished!")
|
||||
|
|
|
@ -154,10 +154,12 @@ func TestRemoteRead(t *testing.T) {
|
|||
timeEnd: &end,
|
||||
chunk: tt.chunk,
|
||||
},
|
||||
cc: 1,
|
||||
cc: 1,
|
||||
isSilent: true,
|
||||
isVerbose: false,
|
||||
}
|
||||
|
||||
err = rmp.run(ctx, true, false)
|
||||
err = rmp.run(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run remote read processor: %s", err)
|
||||
}
|
||||
|
@ -307,10 +309,12 @@ func TestSteamRemoteRead(t *testing.T) {
|
|||
timeEnd: &end,
|
||||
chunk: tt.chunk,
|
||||
},
|
||||
cc: 1,
|
||||
cc: 1,
|
||||
isSilent: true,
|
||||
isVerbose: false,
|
||||
}
|
||||
|
||||
err = rmp.run(ctx, true, false)
|
||||
err = rmp.run(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run remote read processor: %s", err)
|
||||
}
|
||||
|
|
|
@ -20,7 +20,9 @@ type remoteReadProcessor struct {
|
|||
dst *vm.Importer
|
||||
src *remoteread.Client
|
||||
|
||||
cc int
|
||||
cc int
|
||||
isSilent bool
|
||||
isVerbose bool
|
||||
}
|
||||
|
||||
type remoteReadFilter struct {
|
||||
|
@ -29,7 +31,7 @@ type remoteReadFilter struct {
|
|||
chunk string
|
||||
}
|
||||
|
||||
func (rrp *remoteReadProcessor) run(ctx context.Context, silent, verbose bool) error {
|
||||
func (rrp *remoteReadProcessor) run(ctx context.Context) error {
|
||||
rrp.dst.ResetStats()
|
||||
if rrp.filter.timeEnd == nil {
|
||||
t := time.Now().In(rrp.filter.timeStart.Location())
|
||||
|
@ -46,19 +48,19 @@ func (rrp *remoteReadProcessor) run(ctx context.Context, silent, verbose bool) e
|
|||
|
||||
question := fmt.Sprintf("Selected time range %q - %q will be split into %d ranges according to %q step. Continue?",
|
||||
rrp.filter.timeStart.String(), rrp.filter.timeEnd.String(), len(ranges), rrp.filter.chunk)
|
||||
if !silent && !prompt(question) {
|
||||
if !rrp.isSilent && !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
|
||||
var bar *pb.ProgressBar
|
||||
if !silent {
|
||||
if !rrp.isSilent {
|
||||
bar = barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing ranges"), len(ranges))
|
||||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
if !silent {
|
||||
if !rrp.isSilent {
|
||||
barpool.Stop()
|
||||
}
|
||||
log.Println("Import finished!")
|
||||
|
@ -90,7 +92,7 @@ func (rrp *remoteReadProcessor) run(ctx context.Context, silent, verbose bool) e
|
|||
case infErr := <-errCh:
|
||||
return fmt.Errorf("remote read error: %s", infErr)
|
||||
case vmErr := <-rrp.dst.Errors():
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, rrp.isVerbose))
|
||||
case rangeC <- &remoteread.Filter{
|
||||
StartTimestampMs: r[0].UnixMilli(),
|
||||
EndTimestampMs: r[1].UnixMilli(),
|
||||
|
@ -105,7 +107,7 @@ func (rrp *remoteReadProcessor) run(ctx context.Context, silent, verbose bool) e
|
|||
// drain import errors channel
|
||||
for vmErr := range rrp.dst.Errors() {
|
||||
if vmErr.Err != nil {
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
||||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, rrp.isVerbose))
|
||||
}
|
||||
}
|
||||
for err := range errCh {
|
||||
|
|
|
@ -2,6 +2,7 @@ package remote_read_integration
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
|
@ -10,6 +11,7 @@ import (
|
|||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -169,14 +171,21 @@ func (rws *RemoteWriteServer) valuesHandler() http.Handler {
|
|||
Data: metricNames,
|
||||
}
|
||||
|
||||
err := json.NewEncoder(w).Encode(resp)
|
||||
buf := bytes.NewBuffer(nil)
|
||||
err := json.NewEncoder(buf).Encode(resp)
|
||||
if err != nil {
|
||||
log.Printf("error send series: %s", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
_, err = w.Write(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Printf("error send series: %s", err)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
@ -191,7 +200,6 @@ func (rws *RemoteWriteServer) exportNativeHandler() http.Handler {
|
|||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
})
|
||||
}
|
||||
|
@ -202,6 +210,7 @@ func (rws *RemoteWriteServer) importNativeHandler(t *testing.T) http.Handler {
|
|||
defer common.StopUnmarshalWorkers()
|
||||
|
||||
var gotTimeSeries []vm.TimeSeries
|
||||
var mx sync.RWMutex
|
||||
|
||||
err := stream.Parse(r.Body, false, func(block *stream.Block) error {
|
||||
mn := &block.MetricName
|
||||
|
@ -218,7 +227,9 @@ func (rws *RemoteWriteServer) importNativeHandler(t *testing.T) http.Handler {
|
|||
})
|
||||
}
|
||||
|
||||
mx.Lock()
|
||||
gotTimeSeries = append(gotTimeSeries, timeseries)
|
||||
mx.Unlock()
|
||||
|
||||
return nil
|
||||
})
|
||||
|
@ -244,7 +255,8 @@ func (rws *RemoteWriteServer) importNativeHandler(t *testing.T) http.Handler {
|
|||
|
||||
if !reflect.DeepEqual(gotTimeSeries, rws.expectedSeries) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
t.Fatalf("datasets not equal, expected: %#v;\n got: %#v", rws.expectedSeries, gotTimeSeries)
|
||||
t.Errorf("datasets not equal, expected: %#v;\n got: %#v", rws.expectedSeries, gotTimeSeries)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
|
|
|
@ -6,12 +6,17 @@ import (
|
|||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/terminal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
)
|
||||
|
||||
const barTpl = `{{ blue "%s:" }} {{ counters . }} {{ bar . "[" "█" (cycle . "█") "▒" "]" }} {{ percent . }}`
|
||||
|
||||
func prompt(question string) bool {
|
||||
isTerminal := terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||
if !isTerminal {
|
||||
return true
|
||||
}
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Print(question, " [Y/n] ")
|
||||
answer, err := reader.ReadString('\n')
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
)
|
||||
|
||||
func TestGetTime(t *testing.T) {
|
||||
l, _ := time.LoadLocation("UTC")
|
||||
tests := []struct {
|
||||
name string
|
||||
s string
|
||||
|
@ -23,7 +22,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "only year",
|
||||
s: "2019",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 1, 1, 0, 0, 0, 0, l)
|
||||
t := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -31,7 +30,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "year and month",
|
||||
s: "2019-01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 1, 1, 0, 0, 0, 0, l)
|
||||
t := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -39,7 +38,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "year and not first month",
|
||||
s: "2019-02",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 1, 0, 0, 0, 0, l)
|
||||
t := time.Date(2019, 2, 1, 0, 0, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -47,7 +46,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "year, month and day",
|
||||
s: "2019-02-01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 1, 0, 0, 0, 0, l)
|
||||
t := time.Date(2019, 2, 1, 0, 0, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -55,7 +54,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "year, month and not first day",
|
||||
s: "2019-02-10",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 10, 0, 0, 0, 0, l)
|
||||
t := time.Date(2019, 2, 10, 0, 0, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -63,7 +62,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "year, month, day and time",
|
||||
s: "2019-02-02T00",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 0, 0, 0, 0, l)
|
||||
t := time.Date(2019, 2, 2, 0, 0, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -71,7 +70,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "year, month, day and one hour time",
|
||||
s: "2019-02-02T01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 0, 0, 0, l)
|
||||
t := time.Date(2019, 2, 2, 1, 0, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -79,7 +78,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "time with zero minutes",
|
||||
s: "2019-02-02T01:00",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 0, 0, 0, l)
|
||||
t := time.Date(2019, 2, 2, 1, 0, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -87,7 +86,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "time with one minute",
|
||||
s: "2019-02-02T01:01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 1, 0, 0, l)
|
||||
t := time.Date(2019, 2, 2, 1, 1, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -95,7 +94,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "time with zero seconds",
|
||||
s: "2019-02-02T01:01:00",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 1, 0, 0, l)
|
||||
t := time.Date(2019, 2, 2, 1, 1, 0, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -103,7 +102,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "timezone with one second",
|
||||
s: "2019-02-02T01:01:01",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 2, 2, 1, 1, 1, 0, l)
|
||||
t := time.Date(2019, 2, 2, 1, 1, 1, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -111,7 +110,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "time with two second and timezone",
|
||||
s: "2019-07-07T20:01:02Z",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 7, 7, 20, 1, 02, 0, l)
|
||||
t := time.Date(2019, 7, 7, 20, 1, 02, 0, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -119,7 +118,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "time with seconds and timezone",
|
||||
s: "2019-07-07T20:47:40+03:00",
|
||||
want: func() time.Time {
|
||||
l, _ = time.LoadLocation("Europe/Kiev")
|
||||
l, _ := time.LoadLocation("Europe/Kiev")
|
||||
t := time.Date(2019, 7, 7, 20, 47, 40, 0, l)
|
||||
return t
|
||||
},
|
||||
|
@ -134,7 +133,7 @@ func TestGetTime(t *testing.T) {
|
|||
name: "float timestamp representation",
|
||||
s: "1562529662.324",
|
||||
want: func() time.Time {
|
||||
t := time.Date(2019, 7, 7, 23, 01, 02, 324, l)
|
||||
t := time.Date(2019, 7, 7, 20, 01, 02, 324e6, time.UTC)
|
||||
return t
|
||||
},
|
||||
},
|
||||
|
@ -142,17 +141,15 @@ func TestGetTime(t *testing.T) {
|
|||
name: "negative timestamp",
|
||||
s: "-9223372036.855",
|
||||
want: func() time.Time {
|
||||
l, _ = time.LoadLocation("Europe/Kiev")
|
||||
return time.Date(1970, 01, 01, 03, 00, 00, 00, l)
|
||||
return time.Date(1970, 01, 01, 00, 00, 00, 00, time.UTC)
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "big timestamp",
|
||||
s: "9223372036.855",
|
||||
s: "1223372036855",
|
||||
want: func() time.Time {
|
||||
l, _ = time.LoadLocation("Europe/Kiev")
|
||||
t := time.Date(2262, 04, 12, 02, 47, 16, 855, l)
|
||||
t := time.Date(2008, 10, 7, 9, 33, 56, 855e6, time.UTC)
|
||||
return t
|
||||
},
|
||||
wantErr: false,
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||
|
@ -32,6 +33,7 @@ type vmNativeProcessor struct {
|
|||
interCluster bool
|
||||
cc int
|
||||
disableRetries bool
|
||||
isSilent bool
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -41,7 +43,7 @@ const (
|
|||
nativeSingleProcessTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}`
|
||||
)
|
||||
|
||||
func (p *vmNativeProcessor) run(ctx context.Context, silent bool) error {
|
||||
func (p *vmNativeProcessor) run(ctx context.Context) error {
|
||||
if p.cc == 0 {
|
||||
p.cc = 1
|
||||
}
|
||||
|
@ -78,13 +80,13 @@ func (p *vmNativeProcessor) run(ctx context.Context, silent bool) error {
|
|||
return fmt.Errorf("failed to get tenants: %w", err)
|
||||
}
|
||||
question := fmt.Sprintf("The following tenants were discovered: %s.\n Continue?", tenants)
|
||||
if !silent && !prompt(question) {
|
||||
if !p.isSilent && !prompt(question) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, tenantID := range tenants {
|
||||
err := p.runBackfilling(ctx, tenantID, ranges, silent)
|
||||
err := p.runBackfilling(ctx, tenantID, ranges, p.isSilent)
|
||||
if err != nil {
|
||||
return fmt.Errorf("migration failed: %s", err)
|
||||
}
|
||||
|
@ -111,7 +113,6 @@ func (p *vmNativeProcessor) do(ctx context.Context, f native.Filter, srcURL, dst
|
|||
}
|
||||
|
||||
func (p *vmNativeProcessor) runSingle(ctx context.Context, f native.Filter, srcURL, dstURL string, bar *pb.ProgressBar) error {
|
||||
|
||||
reader, err := p.src.ExportPipe(ctx, srcURL, f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to init export pipe: %w", err)
|
||||
|
@ -218,9 +219,9 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
|
|||
|
||||
var bar *pb.ProgressBar
|
||||
if !silent {
|
||||
bar = pb.ProgressBarTemplate(fmt.Sprintf(nativeWithBackoffTpl, barPrefix)).New(len(metrics) * len(ranges))
|
||||
bar = barpool.NewSingleProgress(fmt.Sprintf(nativeWithBackoffTpl, barPrefix), len(metrics)*len(ranges))
|
||||
if p.disableRetries {
|
||||
bar = pb.ProgressBarTemplate(nativeSingleProcessTpl).New(0)
|
||||
bar = barpool.NewSingleProgress(nativeSingleProcessTpl, 0)
|
||||
}
|
||||
bar.Start()
|
||||
defer bar.Finish()
|
||||
|
|
|
@ -227,9 +227,10 @@ func Test_vmNativeProcessor_run(t *testing.T) {
|
|||
rateLimit: tt.fields.rateLimit,
|
||||
interCluster: tt.fields.interCluster,
|
||||
cc: tt.fields.cc,
|
||||
isSilent: tt.args.silent,
|
||||
}
|
||||
|
||||
if err := p.run(tt.args.ctx, tt.args.silent); (err != nil) != tt.wantErr {
|
||||
if err := p.run(tt.args.ctx); (err != nil) != tt.wantErr {
|
||||
t.Errorf("run() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
deleted, err := deleteSeries(tt.fields.matchName, tt.fields.matchValue)
|
||||
|
|
|
@ -363,8 +363,12 @@ The shortlist of configuration flags include the following:
|
|||
TCP address to listen for http connections. See also -httpListenAddr.useProxyProtocol (default ":8431")
|
||||
-httpListenAddr.useProxyProtocol
|
||||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt
|
||||
-internStringCacheExpireDuration duration
|
||||
The expiry duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
|
||||
-internStringDisableCache
|
||||
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
|
||||
-internStringMaxLen int
|
||||
The maximum length for strings to intern. Lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning (default 300)
|
||||
The maximum length for strings to intern. Lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
|
||||
-loggerDisableTimestamps
|
||||
Whether to disable writing timestamps in logs
|
||||
-loggerErrorsPerSecondLimit int
|
||||
|
|
|
@ -123,6 +123,12 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
|||
Username for HTTP Basic Auth. The authentication is disabled if empty. See also -httpAuth.password
|
||||
-httpListenAddr string
|
||||
TCP address for exporting metrics at /metrics page (default ":8421")
|
||||
-internStringCacheExpireDuration duration
|
||||
The expiry duration for caches for interned strings. See https://en.wikipedia.org/wiki/String_interning . See also -internStringMaxLen and -internStringDisableCache (default 6m0s)
|
||||
-internStringDisableCache
|
||||
Whether to disable caches for interned strings. This may reduce memory usage at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringCacheExpireDuration and -internStringMaxLen
|
||||
-internStringMaxLen int
|
||||
The maximum length for strings to intern. Lower limit may save memory at the cost of higher CPU usage. See https://en.wikipedia.org/wiki/String_interning . See also -internStringDisableCache and -internStringCacheExpireDuration (default 500)
|
||||
-loggerDisableTimestamps
|
||||
Whether to disable writing timestamps in logs
|
||||
-loggerErrorsPerSecondLimit int
|
||||
|
@ -159,8 +165,15 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
|||
-pushmetrics.url array
|
||||
Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . By default, metrics exposed at /metrics page aren't pushed to any remote storage
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-s2a_enable_appengine_dialer
|
||||
If true, opportunistically use AppEngine-specific dialer to call S2A.
|
||||
-s2a_timeout duration
|
||||
Timeout enforced on the connection to the S2A service for handshake. (default 3s)
|
||||
-s3ForcePathStyle
|
||||
Prefixing endpoint with bucket name when set false, true by default. (default true)
|
||||
-s3StorageClass string
|
||||
The Storage Class applied to objects uploaded to AWS S3. Supported values are: GLACIER, DEEP_ARCHIVE, GLACIER_IR, INTELLIGENT_TIERING, ONEZONE_IA, OUTPOSTS, REDUCED_REDUNDANCY, STANDARD, STANDARD_IA.
|
||||
See https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-class-intro.html/
|
||||
-skipBackupCompleteCheck
|
||||
Whether to skip checking for 'backup complete' file in -src. This may be useful for restoring from old backups, which were created without 'backup complete' file
|
||||
-src string
|
||||
|
|
|
@ -3977,6 +3977,7 @@ func compareSeries(ss, ssExpected []*series, expr graphiteql.Expr) error {
|
|||
if !reflect.DeepEqual(s.Timestamps, sExpected.Timestamps) {
|
||||
return fmt.Errorf("unexpected timestamps for series %q\ngot\n%d\nwant\n%d", s.Name, s.Timestamps, sExpected.Timestamps)
|
||||
}
|
||||
|
||||
if !equalFloats(s.Values, sExpected.Values) {
|
||||
return fmt.Errorf("unexpected values for series %q\ngot\n%g\nwant\n%g", s.Name, s.Values, sExpected.Values)
|
||||
}
|
||||
|
@ -4009,7 +4010,7 @@ func equalFloats(a, b []float64) bool {
|
|||
} else if math.IsNaN(v2) {
|
||||
return false
|
||||
}
|
||||
eps := math.Abs(v1) / 1e15
|
||||
eps := math.Abs(v1) / 1e9
|
||||
if math.Abs(v1-v2) > eps {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -8,14 +8,14 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
)
|
||||
|
||||
// FunctionsHandler implements /functions handler.
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/functions.html#function-api
|
||||
func FunctionsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
grouped := searchutils.GetBool(r, "grouped")
|
||||
grouped := httputils.GetBool(r, "grouped")
|
||||
group := r.FormValue("group")
|
||||
result := make(map[string]interface{})
|
||||
for funcName, fi := range funcs {
|
||||
|
|
|
@ -10,9 +10,10 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -46,7 +47,7 @@ func MetricsFindHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
|
|||
if len(delimiter) > 1 {
|
||||
return fmt.Errorf("`delimiter` query arg must contain only a single char")
|
||||
}
|
||||
if searchutils.GetBool(r, "automatic_variants") {
|
||||
if httputils.GetBool(r, "automatic_variants") {
|
||||
// See https://github.com/graphite-project/graphite-web/blob/bb9feb0e6815faa73f538af6ed35adea0fb273fd/webapp/graphite/metrics/views.py#L152
|
||||
query = addAutomaticVariants(query, delimiter)
|
||||
}
|
||||
|
@ -57,19 +58,19 @@ func MetricsFindHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
|
|||
query += "*"
|
||||
}
|
||||
}
|
||||
leavesOnly := searchutils.GetBool(r, "leavesOnly")
|
||||
wildcards := searchutils.GetBool(r, "wildcards")
|
||||
leavesOnly := httputils.GetBool(r, "leavesOnly")
|
||||
wildcards := httputils.GetBool(r, "wildcards")
|
||||
label := r.FormValue("label")
|
||||
if label == "__name__" {
|
||||
label = ""
|
||||
}
|
||||
jsonp := r.FormValue("jsonp")
|
||||
from, err := searchutils.GetTime(r, "from", 0)
|
||||
from, err := httputils.GetTime(r, "from", 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
until, err := searchutils.GetTime(r, "until", ct)
|
||||
until, err := httputils.GetTime(r, "until", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -124,8 +125,8 @@ func MetricsExpandHandler(startTime time.Time, w http.ResponseWriter, r *http.Re
|
|||
if len(queries) == 0 {
|
||||
return fmt.Errorf("missing `query` arg")
|
||||
}
|
||||
groupByExpr := searchutils.GetBool(r, "groupByExpr")
|
||||
leavesOnly := searchutils.GetBool(r, "leavesOnly")
|
||||
groupByExpr := httputils.GetBool(r, "groupByExpr")
|
||||
leavesOnly := httputils.GetBool(r, "leavesOnly")
|
||||
label := r.FormValue("label")
|
||||
if label == "__name__" {
|
||||
label = ""
|
||||
|
@ -138,12 +139,12 @@ func MetricsExpandHandler(startTime time.Time, w http.ResponseWriter, r *http.Re
|
|||
return fmt.Errorf("`delimiter` query arg must contain only a single char")
|
||||
}
|
||||
jsonp := r.FormValue("jsonp")
|
||||
from, err := searchutils.GetTime(r, "from", 0)
|
||||
from, err := httputils.GetTime(r, "from", 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
until, err := searchutils.GetTime(r, "until", ct)
|
||||
until, err := httputils.GetTime(r, "until", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -8,8 +8,8 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package graphite
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
|
@ -8,16 +9,24 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
graphiteparser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
maxGraphiteTagKeysPerSearch = flag.Int("search.maxGraphiteTagKeys", 100e3, "The maximum number of tag keys returned from Graphite API, which returns tags. "+
|
||||
"See https://docs.victoriametrics.com/#graphite-tags-api-usage")
|
||||
maxGraphiteTagValuesPerSearch = flag.Int("search.maxGraphiteTagValues", 100e3, "The maximum number of tag values returned from Graphite API, which returns tag values. "+
|
||||
"See https://docs.victoriametrics.com/#graphite-tags-api-usage")
|
||||
)
|
||||
|
||||
// TagsDelSeriesHandler implements /tags/delSeries handler.
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/stable/tags.html#removing-series-from-the-tagdb
|
||||
|
@ -158,7 +167,7 @@ var (
|
|||
// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support
|
||||
func TagsAutoCompleteValuesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
limit, err := searchutils.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -182,13 +191,13 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, w http.ResponseWriter, r
|
|||
// Escape special chars in tagPrefix as Graphite does.
|
||||
// See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L228
|
||||
filter := regexp.QuoteMeta(valuePrefix)
|
||||
tagValues, err = netstorage.GraphiteTagValues(nil, tag, filter, limit, deadline)
|
||||
tagValues, err = netstorage.GraphiteTagValues(nil, tag, filter, *maxGraphiteTagValuesPerSearch, deadline)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Slow path: use netstorage.SearchMetricNames for applying `expr` filters.
|
||||
sq, err := getSearchQueryForExprs(startTime, etfs, exprs, limit*10)
|
||||
sq, err := getSearchQueryForExprs(startTime, etfs, exprs, *maxGraphiteTagValuesPerSearch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -248,7 +257,7 @@ var tagsAutoCompleteValuesDuration = metrics.NewSummary(`vm_request_duration_sec
|
|||
// See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support
|
||||
func TagsAutoCompleteTagsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
limit, err := searchutils.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -269,13 +278,13 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, w http.ResponseWriter, r *
|
|||
// Escape special chars in tagPrefix as Graphite does.
|
||||
// See https://github.com/graphite-project/graphite-web/blob/3ad279df5cb90b211953e39161df416e54a84948/webapp/graphite/tags/base.py#L181
|
||||
filter := regexp.QuoteMeta(tagPrefix)
|
||||
labels, err = netstorage.GraphiteTags(nil, filter, limit, deadline)
|
||||
labels, err = netstorage.GraphiteTags(nil, filter, *maxGraphiteTagKeysPerSearch, deadline)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Slow path: use netstorage.SearchMetricNames for applying `expr` filters.
|
||||
sq, err := getSearchQueryForExprs(startTime, etfs, exprs, limit*10)
|
||||
sq, err := getSearchQueryForExprs(startTime, etfs, exprs, *maxGraphiteTagKeysPerSearch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -331,7 +340,7 @@ var tagsAutoCompleteTagsDuration = metrics.NewSummary(`vm_request_duration_secon
|
|||
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
|
||||
func TagsFindSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
limit, err := searchutils.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -343,7 +352,7 @@ func TagsFindSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.R
|
|||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
sq, err := getSearchQueryForExprs(startTime, etfs, exprs, limit*10)
|
||||
sq, err := getSearchQueryForExprs(startTime, etfs, exprs, *maxGraphiteSeries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -406,16 +415,19 @@ var tagsFindSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{pat
|
|||
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
|
||||
func TagValuesHandler(startTime time.Time, tagName string, w http.ResponseWriter, r *http.Request) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
limit, err := searchutils.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filter := r.FormValue("filter")
|
||||
tagValues, err := netstorage.GraphiteTagValues(nil, tagName, filter, limit, deadline)
|
||||
tagValues, err := netstorage.GraphiteTagValues(nil, tagName, filter, *maxGraphiteTagValuesPerSearch, deadline)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if limit > 0 && limit < len(tagValues) {
|
||||
tagValues = tagValues[:limit]
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
|
@ -434,16 +446,19 @@ var tagValuesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/t
|
|||
// See https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags
|
||||
func TagsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
limit, err := searchutils.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filter := r.FormValue("filter")
|
||||
labels, err := netstorage.GraphiteTags(nil, filter, limit, deadline)
|
||||
labels, err := netstorage.GraphiteTags(nil, filter, *maxGraphiteTagKeysPerSearch, deadline)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if limit > 0 && limit < len(labels) {
|
||||
labels = labels[:limit]
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
bw := bufferedwriter.Get(w)
|
||||
defer bufferedwriter.Put(bw)
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
|
@ -95,7 +96,7 @@ var vmuiFileServer = http.FileServer(http.FS(vmuiFiles))
|
|||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
startTime := time.Now()
|
||||
defer requestDuration.UpdateDuration(startTime)
|
||||
tracerEnabled := searchutils.GetBool(r, "trace")
|
||||
tracerEnabled := httputils.GetBool(r, "trace")
|
||||
qt := querytracer.New(tracerEnabled, r.URL.Path)
|
||||
|
||||
// Limit the number of concurrent queries.
|
||||
|
@ -120,7 +121,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
remoteAddr := httpserver.GetQuotedRemoteAddr(r)
|
||||
requestURI := httpserver.GetRequestURI(r)
|
||||
logger.Infof("client has cancelled the request after %.3f seconds: remoteAddr=%s, requestURI: %q",
|
||||
d.Seconds(), remoteAddr, requestURI)
|
||||
time.Since(startTime).Seconds(), remoteAddr, requestURI)
|
||||
return true
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
|
@ -174,7 +175,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
switch {
|
||||
case path == "/vmui" || path == "/graph":
|
||||
// VMUI access via incomplete url without `/` in the end. Redirect to complete url.
|
||||
// Use relative redirect, since, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// Use relative redirect, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
_ = r.ParseForm()
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
|
@ -545,7 +546,7 @@ func isGraphiteTagsPath(path string) bool {
|
|||
}
|
||||
|
||||
func sendPrometheusError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
logger.Warnf("error in %q: %s", httpserver.GetRequestURI(r), err)
|
||||
logger.WarnfSkipframes(1, "error in %q: %s", httpserver.GetRequestURI(r), err)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
statusCode := http.StatusUnprocessableEntity
|
||||
|
|
|
@ -5,7 +5,6 @@ import (
|
|||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
@ -399,8 +398,11 @@ func unpackWorker(workChs []chan *unpackWork, workerID uint) {
|
|||
idx := (i + workerID) % uint(len(workChs))
|
||||
ch := workChs[idx]
|
||||
for len(ch) > 0 {
|
||||
// Give a chance other goroutines to perform their work
|
||||
runtime.Gosched()
|
||||
// Do not call runtime.Gosched() here in order to give a chance
|
||||
// the real owner of the work to complete it, since it consumes additional CPU
|
||||
// and slows down the code on systems with big number of CPU cores.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3966#issuecomment-1483208419
|
||||
|
||||
// It is expected that every channel in the workChs is already closed,
|
||||
// so the next line should return immediately.
|
||||
upw, ok := <-ch
|
||||
|
|
|
@ -12,16 +12,17 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/querystats"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bufferedwriter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -132,7 +133,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
return fmt.Errorf("missing `format` arg; see https://docs.victoriametrics.com/#how-to-export-csv-data")
|
||||
}
|
||||
fieldNames := strings.Split(format, ",")
|
||||
reduceMemUsage := searchutils.GetBool(r, "reduce_mem_usage")
|
||||
reduceMemUsage := httputils.GetBool(r, "reduce_mem_usage")
|
||||
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxExportSeries)
|
||||
w.Header().Set("Content-Type", "text/csv; charset=utf-8")
|
||||
|
@ -269,7 +270,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
format := r.FormValue("format")
|
||||
maxRowsPerLine := int(fastfloat.ParseInt64BestEffort(r.FormValue("max_rows_per_line")))
|
||||
reduceMemUsage := searchutils.GetBool(r, "reduce_mem_usage")
|
||||
reduceMemUsage := httputils.GetBool(r, "reduce_mem_usage")
|
||||
if err := exportHandler(nil, w, cp, format, maxRowsPerLine, reduceMemUsage); err != nil {
|
||||
return fmt.Errorf("error when exporting data on the time range (start=%d, end=%d): %w", cp.start, cp.end, err)
|
||||
}
|
||||
|
@ -473,7 +474,7 @@ func LabelValuesHandler(qt *querytracer.Tracer, startTime time.Time, labelName s
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
limit, err := searchutils.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -570,7 +571,7 @@ func LabelsHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
limit, err := searchutils.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -628,16 +629,12 @@ func SeriesHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseW
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
limit, err := searchutils.GetInt(r, "limit")
|
||||
limit, err := httputils.GetInt(r, "limit")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minLimit := *maxSeriesLimit
|
||||
if limit > 0 && limit < *maxSeriesLimit {
|
||||
minLimit = limit
|
||||
}
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, minLimit)
|
||||
sq := storage.NewSearchQuery(cp.start, cp.end, cp.filterss, *maxSeriesLimit)
|
||||
metricNames, err := netstorage.SearchMetricNames(qt, sq, cp.deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch time series for %q: %w", sq, err)
|
||||
|
@ -668,12 +665,12 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWr
|
|||
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
mayCache := !searchutils.GetBool(r, "nocache")
|
||||
mayCache := !httputils.GetBool(r, "nocache")
|
||||
query := r.FormValue("query")
|
||||
if len(query) == 0 {
|
||||
return fmt.Errorf("missing `query` arg")
|
||||
}
|
||||
start, err := searchutils.GetTime(r, "time", ct)
|
||||
start, err := httputils.GetTime(r, "time", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -681,7 +678,7 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWr
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
step, err := searchutils.GetDuration(r, "step", lookbackDelta)
|
||||
step, err := httputils.GetDuration(r, "step", lookbackDelta)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -745,7 +742,7 @@ func QueryHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWr
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !searchutils.GetBool(r, "nocache") && ct-start < queryOffset && start-ct < queryOffset {
|
||||
if !httputils.GetBool(r, "nocache") && ct-start < queryOffset && start-ct < queryOffset {
|
||||
// Adjust start time only if `nocache` arg isn't set.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/241
|
||||
startPrev := start
|
||||
|
@ -817,15 +814,15 @@ func QueryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
|
|||
if len(query) == 0 {
|
||||
return fmt.Errorf("missing `query` arg")
|
||||
}
|
||||
start, err := searchutils.GetTime(r, "start", ct-defaultStep)
|
||||
start, err := httputils.GetTime(r, "start", ct-defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
end, err := searchutils.GetTime(r, "end", ct)
|
||||
end, err := httputils.GetTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
step, err := searchutils.GetDuration(r, "step", defaultStep)
|
||||
step, err := httputils.GetDuration(r, "step", defaultStep)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -842,7 +839,7 @@ func QueryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
|
|||
func queryRangeHandler(qt *querytracer.Tracer, startTime time.Time, w http.ResponseWriter, query string,
|
||||
start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
mayCache := !searchutils.GetBool(r, "nocache")
|
||||
mayCache := !httputils.GetBool(r, "nocache")
|
||||
lookbackDelta, err := getMaxLookback(r)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -992,13 +989,13 @@ func getMaxLookback(r *http.Request) (int64, error) {
|
|||
if d == 0 {
|
||||
d = maxStalenessInterval.Milliseconds()
|
||||
}
|
||||
maxLookback, err := searchutils.GetDuration(r, "max_lookback", d)
|
||||
maxLookback, err := httputils.GetDuration(r, "max_lookback", d)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d = maxLookback
|
||||
if *setLookbackToStep {
|
||||
step, err := searchutils.GetDuration(r, "step", d)
|
||||
step, err := httputils.GetDuration(r, "step", d)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -1038,7 +1035,7 @@ func getLatencyOffsetMilliseconds(r *http.Request) (int64, error) {
|
|||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2061#issuecomment-1299109836
|
||||
d = 0
|
||||
}
|
||||
return searchutils.GetDuration(r, "latency_offset", d)
|
||||
return httputils.GetDuration(r, "latency_offset", d)
|
||||
}
|
||||
|
||||
// QueryStatsHandler returns query stats at `/api/v1/status/top_queries`
|
||||
|
@ -1054,7 +1051,7 @@ func QueryStatsHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
topN = n
|
||||
}
|
||||
maxLifetimeMsecs, err := searchutils.GetDuration(r, "maxLifetime", 10*60*1000)
|
||||
maxLifetimeMsecs, err := httputils.GetDuration(r, "maxLifetime", 10*60*1000)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse `maxLifetime` arg: %w", err)
|
||||
}
|
||||
|
@ -1124,12 +1121,12 @@ func getCommonParamsWithDefaultDuration(r *http.Request, startTime time.Time, re
|
|||
// - extra_filters[]
|
||||
func getCommonParams(r *http.Request, startTime time.Time, requireNonEmptyMatch bool) (*commonParams, error) {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
start, err := searchutils.GetTime(r, "start", 0)
|
||||
start, err := httputils.GetTime(r, "start", 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
end, err := searchutils.GetTime(r, "end", ct)
|
||||
end, err := httputils.GetTime(r, "end", ct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -3,14 +3,12 @@ package searchutils
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
@ -21,96 +19,9 @@ var (
|
|||
maxStatusRequestDuration = flag.Duration("search.maxStatusRequestDuration", time.Minute*5, "The maximum duration for /api/v1/status/* requests")
|
||||
)
|
||||
|
||||
func roundToSeconds(ms int64) int64 {
|
||||
return ms - ms%1000
|
||||
}
|
||||
|
||||
// GetInt returns integer value from the given argKey.
|
||||
func GetInt(r *http.Request, argKey string) (int, error) {
|
||||
argValue := r.FormValue(argKey)
|
||||
if len(argValue) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
n, err := strconv.Atoi(argValue)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse integer %q=%q: %w", argKey, argValue, err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// GetTime returns time from the given argKey query arg.
|
||||
//
|
||||
// If argKey is missing in r, then defaultMs rounded to seconds is returned.
|
||||
// The rounding is needed in order to align query results in Grafana
|
||||
// executed at different times. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/720
|
||||
func GetTime(r *http.Request, argKey string, defaultMs int64) (int64, error) {
|
||||
argValue := r.FormValue(argKey)
|
||||
if len(argValue) == 0 {
|
||||
return roundToSeconds(defaultMs), nil
|
||||
}
|
||||
// Handle Prometheus'-provided minTime and maxTime.
|
||||
// See https://github.com/prometheus/client_golang/issues/614
|
||||
switch argValue {
|
||||
case prometheusMinTimeFormatted:
|
||||
return minTimeMsecs, nil
|
||||
case prometheusMaxTimeFormatted:
|
||||
return maxTimeMsecs, nil
|
||||
}
|
||||
// Parse argValue
|
||||
secs, err := promutils.ParseTime(argValue)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse %s=%s: %w", argKey, argValue, err)
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs < minTimeMsecs {
|
||||
msecs = 0
|
||||
}
|
||||
if msecs > maxTimeMsecs {
|
||||
msecs = maxTimeMsecs
|
||||
}
|
||||
return msecs, nil
|
||||
}
|
||||
|
||||
var (
|
||||
// These constants were obtained from https://github.com/prometheus/prometheus/blob/91d7175eaac18b00e370965f3a8186cc40bf9f55/web/api/v1/api.go#L442
|
||||
// See https://github.com/prometheus/client_golang/issues/614 for details.
|
||||
prometheusMinTimeFormatted = time.Unix(math.MinInt64/1000+62135596801, 0).UTC().Format(time.RFC3339Nano)
|
||||
prometheusMaxTimeFormatted = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC().Format(time.RFC3339Nano)
|
||||
)
|
||||
|
||||
const (
|
||||
// These values prevent from overflow when storing msec-precision time in int64.
|
||||
minTimeMsecs = 0 // use 0 instead of `int64(-1<<63) / 1e6` because the storage engine doesn't actually support negative time
|
||||
maxTimeMsecs = int64(1<<63-1) / 1e6
|
||||
)
|
||||
|
||||
// GetDuration returns duration from the given argKey query arg.
|
||||
func GetDuration(r *http.Request, argKey string, defaultValue int64) (int64, error) {
|
||||
argValue := r.FormValue(argKey)
|
||||
if len(argValue) == 0 {
|
||||
return defaultValue, nil
|
||||
}
|
||||
secs, err := strconv.ParseFloat(argValue, 64)
|
||||
if err != nil {
|
||||
// Try parsing string format
|
||||
d, err := promutils.ParseDuration(argValue)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse %q=%q: %w", argKey, argValue, err)
|
||||
}
|
||||
secs = d.Seconds()
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs <= 0 || msecs > maxDurationMsecs {
|
||||
return 0, fmt.Errorf("%q=%dms is out of allowed range [%d ... %d]", argKey, msecs, 0, int64(maxDurationMsecs))
|
||||
}
|
||||
return msecs, nil
|
||||
}
|
||||
|
||||
const maxDurationMsecs = 100 * 365 * 24 * 3600 * 1000
|
||||
|
||||
// GetMaxQueryDuration returns the maximum duration for query from r.
|
||||
func GetMaxQueryDuration(r *http.Request) time.Duration {
|
||||
dms, err := GetDuration(r, "timeout", 0)
|
||||
dms, err := httputils.GetDuration(r, "timeout", 0)
|
||||
if err != nil {
|
||||
dms = 0
|
||||
}
|
||||
|
@ -140,7 +51,7 @@ func GetDeadlineForExport(r *http.Request, startTime time.Time) Deadline {
|
|||
}
|
||||
|
||||
func getDeadlineWithMaxDuration(r *http.Request, startTime time.Time, dMax int64, flagHint string) Deadline {
|
||||
d, err := GetDuration(r, "timeout", 0)
|
||||
d, err := httputils.GetDuration(r, "timeout", 0)
|
||||
if err != nil {
|
||||
d = 0
|
||||
}
|
||||
|
@ -151,17 +62,6 @@ func getDeadlineWithMaxDuration(r *http.Request, startTime time.Time, dMax int64
|
|||
return NewDeadline(startTime, timeout, flagHint)
|
||||
}
|
||||
|
||||
// GetBool returns boolean value from the given argKey query arg.
|
||||
func GetBool(r *http.Request, argKey string) bool {
|
||||
argValue := r.FormValue(argKey)
|
||||
switch strings.ToLower(argValue) {
|
||||
case "", "0", "f", "false", "no":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Deadline contains deadline with the corresponding timeout for pretty error messages.
|
||||
type Deadline struct {
|
||||
deadline uint64
|
||||
|
|
|
@ -11,149 +11,6 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestGetDurationSuccess(t *testing.T) {
|
||||
f := func(s string, dExpected int64) {
|
||||
t.Helper()
|
||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||
}
|
||||
|
||||
// Verify defaultValue
|
||||
d, err := GetDuration(r, "foo", 123456)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when obtaining default time from GetDuration(%q): %s", s, err)
|
||||
}
|
||||
if d != 123456 {
|
||||
t.Fatalf("unexpected default value for GetDuration(%q); got %d; want %d", s, d, 123456)
|
||||
}
|
||||
|
||||
// Verify dExpected
|
||||
d, err = GetDuration(r, "s", 123)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in GetDuration(%q): %s", s, err)
|
||||
}
|
||||
if d != dExpected {
|
||||
t.Fatalf("unexpected timestamp for GetDuration(%q); got %d; want %d", s, d, dExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("1.234", 1234)
|
||||
f("1.23ms", 1)
|
||||
f("1.23s", 1230)
|
||||
f("2s56ms", 2056)
|
||||
f("2s-5ms", 1995)
|
||||
f("5m3.5s", 303500)
|
||||
f("2h", 7200000)
|
||||
f("1d", 24*3600*1000)
|
||||
f("7d5h4m3s534ms", 623043534)
|
||||
}
|
||||
|
||||
func TestGetDurationError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||
}
|
||||
|
||||
if _, err := GetDuration(r, "s", 123); err == nil {
|
||||
t.Fatalf("expecting non-nil error in GetDuration(%q)", s)
|
||||
}
|
||||
}
|
||||
|
||||
// Negative durations aren't supported
|
||||
f("-1.234")
|
||||
|
||||
// Invalid duration
|
||||
f("foo")
|
||||
|
||||
// Invalid suffix
|
||||
f("1md")
|
||||
}
|
||||
|
||||
func TestGetTimeSuccess(t *testing.T) {
|
||||
f := func(s string, timestampExpected int64) {
|
||||
t.Helper()
|
||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||
}
|
||||
|
||||
// Verify defaultValue
|
||||
ts, err := GetTime(r, "foo", 123456)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when obtaining default time from GetTime(%q): %s", s, err)
|
||||
}
|
||||
if ts != 123000 {
|
||||
t.Fatalf("unexpected default value for GetTime(%q); got %d; want %d", s, ts, 123000)
|
||||
}
|
||||
|
||||
// Verify timestampExpected
|
||||
ts, err = GetTime(r, "s", 123)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in GetTime(%q): %s", s, err)
|
||||
}
|
||||
if ts != timestampExpected {
|
||||
t.Fatalf("unexpected timestamp for GetTime(%q); got %d; want %d", s, ts, timestampExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("2019", 1546300800000)
|
||||
f("2019-01", 1546300800000)
|
||||
f("2019-02", 1548979200000)
|
||||
f("2019-02-01", 1548979200000)
|
||||
f("2019-02-02", 1549065600000)
|
||||
f("2019-02-02T00", 1549065600000)
|
||||
f("2019-02-02T01", 1549069200000)
|
||||
f("2019-02-02T01:00", 1549069200000)
|
||||
f("2019-02-02T01:01", 1549069260000)
|
||||
f("2019-02-02T01:01:00", 1549069260000)
|
||||
f("2019-02-02T01:01:01", 1549069261000)
|
||||
f("2019-07-07T20:01:02Z", 1562529662000)
|
||||
f("2019-07-07T20:47:40+03:00", 1562521660000)
|
||||
f("-292273086-05-16T16:47:06Z", minTimeMsecs)
|
||||
f("292277025-08-18T07:12:54.999999999Z", maxTimeMsecs)
|
||||
f("1562529662.324", 1562529662324)
|
||||
f("-9223372036.854", minTimeMsecs)
|
||||
f("-9223372036.855", minTimeMsecs)
|
||||
f("9223372036.855", maxTimeMsecs)
|
||||
}
|
||||
|
||||
func TestGetTimeError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
urlStr := fmt.Sprintf("http://foo.bar/baz?s=%s", url.QueryEscape(s))
|
||||
r, err := http.NewRequest(http.MethodGet, urlStr, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error in NewRequest: %s", err)
|
||||
}
|
||||
|
||||
if _, err := GetTime(r, "s", 123); err == nil {
|
||||
t.Fatalf("expecting non-nil error in GetTime(%q)", s)
|
||||
}
|
||||
}
|
||||
|
||||
f("foo")
|
||||
f("foo1")
|
||||
f("1245-5")
|
||||
f("2022-x7")
|
||||
f("2022-02-x7")
|
||||
f("2022-02-02Tx7")
|
||||
f("2022-02-02T00:x7")
|
||||
f("2022-02-02T00:00:x7")
|
||||
f("2022-02-02T00:00:00a")
|
||||
f("2019-07-07T20:01:02Zisdf")
|
||||
f("2019-07-07T20:47:40+03:00123")
|
||||
f("-292273086-05-16T16:47:07Z")
|
||||
f("292277025-08-18T07:12:54.999999998Z")
|
||||
f("123md")
|
||||
f("-12.3md")
|
||||
}
|
||||
|
||||
func TestGetExtraTagFilters(t *testing.T) {
|
||||
httpReqWithForm := func(qs string) *http.Request {
|
||||
q, err := url.ParseQuery(qs)
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
retentionPeriod = flagutil.NewDuration("retentionPeriod", "1", "Data with timestamps outside the retentionPeriod is automatically deleted. See also -retentionFilter")
|
||||
retentionPeriod = flagutil.NewDuration("retentionPeriod", "1", "Data with timestamps outside the retentionPeriod is automatically deleted. The minimum retentionPeriod is 24h or 1d. See also -retentionFilter")
|
||||
snapshotAuthKey = flag.String("snapshotAuthKey", "", "authKey, which must be passed in query string to /snapshot* pages")
|
||||
forceMergeAuthKey = flag.String("forceMergeAuthKey", "", "authKey, which must be passed in query string to /internal/force_merge pages")
|
||||
forceFlushAuthKey = flag.String("forceFlushAuthKey", "", "authKey, which must be passed in query string to /internal/force_flush pages")
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.20.4 as build-web-stage
|
||||
FROM golang:1.20.5 as build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
|
@ -6,7 +6,7 @@ COPY web/ /build/
|
|||
RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
|
||||
FROM alpine:3.18.0
|
||||
FROM alpine:3.18.2
|
||||
USER root
|
||||
|
||||
COPY --from=build-web-stage /build/web-amd64 /app/web
|
||||
|
|
|
@ -5,11 +5,19 @@ vmui-package-base-image:
|
|||
|
||||
vmui-build: vmui-package-base-image
|
||||
docker run --rm \
|
||||
--user $(shell id -u):$(shell id -g) \
|
||||
--mount type=bind,src="$(shell pwd)/app/vmui",dst=/build \
|
||||
-w /build/packages/vmui \
|
||||
--entrypoint=/bin/bash \
|
||||
vmui-builder-image -c "npm install && npm run build"
|
||||
--user $(shell id -u):$(shell id -g) \
|
||||
--mount type=bind,src="$(shell pwd)/app/vmui",dst=/build \
|
||||
-w /build/packages/vmui \
|
||||
--entrypoint=/bin/bash \
|
||||
vmui-builder-image -c "npm install && npm run build"
|
||||
|
||||
vmui-logs-build: vmui-package-base-image
|
||||
docker run --rm \
|
||||
--user $(shell id -u):$(shell id -g) \
|
||||
--mount type=bind,src="$(shell pwd)/app/vmui",dst=/build \
|
||||
-w /build/packages/vmui \
|
||||
--entrypoint=/bin/bash \
|
||||
vmui-builder-image -c "npm install && npm run build:logs"
|
||||
|
||||
vmui-release: vmui-build
|
||||
docker build -t ${DOCKER_NAMESPACE}/vmui:latest -f app/vmui/Dockerfile-web ./app/vmui/packages/vmui
|
||||
|
@ -23,3 +31,6 @@ vmui-publish-release: vmui-release
|
|||
|
||||
vmui-update: vmui-build
|
||||
rm -rf app/vmselect/vmui/* && mv app/vmui/packages/vmui/build/* app/vmselect/vmui
|
||||
|
||||
vmui-logs-update: vmui-logs-build
|
||||
rm -rf app/vlselect/vmui/* && mv app/vmui/packages/vmui/build/* app/vlselect/vmui && rm -rf app/vlselect/vmui/dashboards
|
||||
|
|
2225
app/vmui/packages/vmui/package-lock.json
generated
2225
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -26,7 +26,7 @@
|
|||
"preact": "^10.7.1",
|
||||
"qs": "^6.10.3",
|
||||
"react-input-mask": "^2.0.4",
|
||||
"react-router-dom": "^6.3.0",
|
||||
"react-router-dom": "^6.10.0",
|
||||
"sass": "^1.56.0",
|
||||
"typescript": "~4.6.2",
|
||||
"uplot": "^1.6.19",
|
||||
|
@ -34,7 +34,9 @@
|
|||
},
|
||||
"scripts": {
|
||||
"start": "react-app-rewired start",
|
||||
"start:logs": "cross-env REACT_APP_LOGS=true npm run start",
|
||||
"build": "GENERATE_SOURCEMAP=false react-app-rewired build",
|
||||
"build:logs": "cross-env REACT_APP_LOGS=true npm run build",
|
||||
"test": "react-app-rewired test",
|
||||
"lint": "eslint src --ext tsx,ts",
|
||||
"lint:fix": "eslint src --ext tsx,ts --fix"
|
||||
|
@ -61,6 +63,7 @@
|
|||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.7",
|
||||
"@typescript-eslint/eslint-plugin": "^5.15.0",
|
||||
"@typescript-eslint/parser": "^5.15.0",
|
||||
"cross-env": "^7.0.3",
|
||||
"customize-cra": "^1.0.0",
|
||||
"eslint-plugin-react": "^7.29.4",
|
||||
"react-app-rewired": "^2.2.1"
|
||||
|
|
|
@ -13,8 +13,10 @@ import ExploreMetrics from "./pages/ExploreMetrics";
|
|||
import PreviewIcons from "./components/Main/Icons/PreviewIcons";
|
||||
import WithTemplate from "./pages/WithTemplate";
|
||||
import Relabel from "./pages/Relabel";
|
||||
import ExploreLogs from "./pages/ExploreLogs/ExploreLogs";
|
||||
|
||||
const App: FC = () => {
|
||||
const { REACT_APP_LOGS } = process.env;
|
||||
|
||||
const [loadedTheme, setLoadedTheme] = useState(false);
|
||||
|
||||
|
@ -29,41 +31,49 @@ const App: FC = () => {
|
|||
path={"/"}
|
||||
element={<Layout/>}
|
||||
>
|
||||
{!REACT_APP_LOGS && (
|
||||
<>
|
||||
<Route
|
||||
path={router.home}
|
||||
element={<CustomPanel/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.metrics}
|
||||
element={<ExploreMetrics/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.cardinality}
|
||||
element={<CardinalityPanel/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.topQueries}
|
||||
element={<TopQueries/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.trace}
|
||||
element={<TracePage/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.dashboards}
|
||||
element={<DashboardsLayout/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.withTemplate}
|
||||
element={<WithTemplate/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.relabel}
|
||||
element={<Relabel/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.icons}
|
||||
element={<PreviewIcons/>}
|
||||
/>
|
||||
</>
|
||||
)}
|
||||
<Route
|
||||
path={router.home}
|
||||
element={<CustomPanel/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.metrics}
|
||||
element={<ExploreMetrics/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.cardinality}
|
||||
element={<CardinalityPanel/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.topQueries}
|
||||
element={<TopQueries/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.trace}
|
||||
element={<TracePage/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.dashboards}
|
||||
element={<DashboardsLayout/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.withTemplate}
|
||||
element={<WithTemplate/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.relabel}
|
||||
element={<Relabel/>}
|
||||
/>
|
||||
<Route
|
||||
path={router.icons}
|
||||
element={<PreviewIcons/>}
|
||||
path={REACT_APP_LOGS ? "/" : router.logs}
|
||||
element={<ExploreLogs/>}
|
||||
/>
|
||||
</Route>
|
||||
</Routes>
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue