mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
all: open-sourcing single-node version
This commit is contained in:
parent
81bbbf2cae
commit
1836c415e6
575 changed files with 276569 additions and 37 deletions
6
.dockerignore
Normal file
6
.dockerignore
Normal file
|
@ -0,0 +1,6 @@
|
|||
.git
|
||||
vendor
|
||||
gocache-for-docker
|
||||
victoria-metrics-data
|
||||
vmstorage-data
|
||||
vmselect-cache
|
11
.gitignore
vendored
Normal file
11
.gitignore
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
/tags
|
||||
/pkg
|
||||
*.pprof
|
||||
/bin
|
||||
.idea
|
||||
*.test
|
||||
*.swp
|
||||
/gocache-for-docker
|
||||
/victoria-metrics-data
|
||||
/vmstorage-data
|
||||
/vmselect-cache
|
190
LICENSE
Normal file
190
LICENSE
Normal file
|
@ -0,0 +1,190 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2019 VictoriaMetrics, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
64
Makefile
Normal file
64
Makefile
Normal file
|
@ -0,0 +1,64 @@
|
|||
PKG_PREFIX := github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
BUILDINFO_TAG ?= $(shell echo $$(git describe --long --all | tr '/' '-')$$( \
|
||||
git diff-index --quiet HEAD -- || echo '-dirty-'$$(git diff-index -u HEAD | sha1sum | grep -oP '^.{8}')))
|
||||
|
||||
PKG_TAG ?= $(shell git tag -l --points-at HEAD)
|
||||
ifeq ($(PKG_TAG),)
|
||||
PKG_TAG := $(BUILDINFO_TAG)
|
||||
endif
|
||||
|
||||
GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(shell date -u +'%Y%m%d-%H%M%S')-$(BUILDINFO_TAG)'
|
||||
|
||||
all: \
|
||||
victoria-metrics-prod
|
||||
|
||||
include app/*/Makefile
|
||||
include deployment/*/Makefile
|
||||
|
||||
clean:
|
||||
rm -rf bin/*
|
||||
|
||||
release: victoria-metrics-prod
|
||||
cd bin && tar czf victoria-metrics-$(PKG_TAG).tar.gz victoria-metrics-prod
|
||||
|
||||
fmt:
|
||||
go fmt $(PKG_PREFIX)/lib/...
|
||||
go fmt $(PKG_PREFIX)/app/...
|
||||
|
||||
vet:
|
||||
go vet $(PKG_PREFIX)/lib/...
|
||||
go vet $(PKG_PREFIX)/app/...
|
||||
|
||||
lint: install-golint
|
||||
golint lib/...
|
||||
golint app/...
|
||||
|
||||
install-golint:
|
||||
which golint || GO111MODULE=off go get -u github.com/golang/lint/golint
|
||||
|
||||
errcheck: install-errcheck
|
||||
errcheck -exclude=errcheck_excludes.txt ./lib/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vminsert/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmselect/...
|
||||
errcheck -exclude=errcheck_excludes.txt ./app/vmstorage/...
|
||||
|
||||
install-errcheck:
|
||||
which errcheck || GO111MODULE=off go get -u github.com/kisielk/errcheck
|
||||
|
||||
test:
|
||||
go test $(PKG_PREFIX)/lib/...
|
||||
|
||||
benchmark:
|
||||
go test -bench=. $(PKG_PREFIX)/lib/...
|
||||
|
||||
vendor-update:
|
||||
go get -u
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
|
||||
quicktemplate-gen: install-qtc
|
||||
qtc
|
||||
|
||||
install-qtc:
|
||||
which qtc || GO111MODULE=off go get -u github.com/valyala/quicktemplate/qtc
|
433
README.md
433
README.md
|
@ -1,42 +1,404 @@
|
|||
<img text-align="center" alt="Victoria Metrics" src="logo.png">
|
||||
|
||||
## VictoriaMetrics - the best long-term remote storage for Prometheus
|
||||
## Single-node VictoriaMetrics
|
||||
|
||||
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
|
||||
|
||||
### VictoriaMetrics features
|
||||
VictoriaMetrics is a long-term remote storage for Prometheus.
|
||||
It is available in [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases),
|
||||
[docker images](https://hub.docker.com/r/valyala/victoria-metrics/) and
|
||||
in [source code](https://github.com/VictoriaMetrics/VictoriaMetrics).
|
||||
|
||||
- Native [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) support. Additionally, VictoriaMetrics extends PromQL with useful features. See [Extended PromQL](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/ExtendedPromQL) for more details.
|
||||
- Simple configuration. Just copy-n-paste remote storage URL to Prometheus config and that's it! See [Quick Start](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/Quick-Start) for more info.
|
||||
- Reduced operational overhead. Offload operational burden - configuration, capacity planning, scalability, backups, retention, durability - from Prometheus local storage to VictoriaMetrics.
|
||||
- Insertion rate scales to [millions of metric values per second](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893).
|
||||
- Storage scales to [millions of metrics](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b) with trillions of metric values.
|
||||
- Wide range of retention periods - from 1 month to 5 years. Users may create different projects (aka `storage namespaces`) with different retention periods.
|
||||
- [Fast query engine](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4). It excels on heavy queries over thousands of metrics with millions of metric values.
|
||||
- The same remote storage URL may be used by multiple Prometheus instances collecting distinct metric sets, so all these metrics may be used in a single query (aka `global querying view`). This works ideally for multiple Prometheus instances located in different subnetworks / datacenters.
|
||||
- Accepts data in [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_reference/), so [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/) and other influx-compatible agents may send data to VictoriaMetrics.
|
||||
- [Single-server VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) accepts data in [Graphite plaintext protocol](https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol) if `-graphiteListenAddr` is set and in [OpenTSDB format](http://opentsdb.net/docs/build/html/api_telnet/put.html) if `-opentsdbListenAddr` is set.
|
||||
- VictoriaMetrics supports backfilling, i.e. data points from the past may be inserted into the DB.
|
||||
- Time series data may be exported via `/api/v1/export?match=<metric_selector>`. Optional `start` and `end` timestamps may be added for exporting time series in the given time range.
|
||||
- Time series may be deleted via `/api/v1/admin/tsdb/delete_series?match[]=<metric_selector>`.
|
||||
- [Instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) are supported.
|
||||
Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
|
||||
### Useful links
|
||||
## Prominent features
|
||||
|
||||
* [Free single-node VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/Single-server-VictoriaMetrics)
|
||||
* [Site](https://victoriametrics.com/)
|
||||
* [`WITH` templates playground](https://play.victoriametrics.com/promql/expand-with-exprs)
|
||||
* [Grafana playground](http://play-grafana.victoriametrics.com:3000/d/4ome8yJmz/node-exporter-on-victoriametrics-demo)
|
||||
* [Docs](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki)
|
||||
* [FAQ](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/FAQ)
|
||||
* [Issues](https://github.com/VictoriaMetrics/VictoriaMetrics/issues)
|
||||
* [Google group](https://groups.google.com/forum/#!forum/victoriametrics)
|
||||
* [Creating the best remote storage for Prometheus](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac) - an article with technical details about VictoriaMetrics.
|
||||
* [Official Grafana Dashboard](https://grafana.com/dashboards/10229)
|
||||
* Supports [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/), so it can be used as Prometheus drop-in replacement in Grafana.
|
||||
Additionally, VictoriaMetrics extends PromQL with opt-in [useful features](https://github.com/VictoriaMetrics/VictoriaMetrics/wiki/ExtendedPromQL).
|
||||
* High performance and good scalability for both [inserts](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b)
|
||||
and [selects](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4).
|
||||
[Outperforms InfluxDB and TimescaleDB by up to 20x](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* [Uses 10x less RAM than InfluxDB](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) when working with millions of unique time series (aka high cardinality).
|
||||
* High data compression, so [up to 70x more data points](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4)
|
||||
may be crammed into a limited storage comparing to TimescaleDB.
|
||||
* Optimized for storage with high-latency IO and low iops (HDD and network storage in AWS, Google Cloud, Microsoft Azure, etc). See [graphs from these benchmarks](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b).
|
||||
* A single-node VictoriaMetrics may substitute moderately sized clusters built with competing solutions such as Thanos, Uber M3, Cortex, InfluxDB or TimescaleDB.
|
||||
See [vertical scalability benchmarks](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
|
||||
* Easy operation:
|
||||
* VictoriaMetrics consists of a single executable without external dependencies.
|
||||
* All the configuration is done via explicit command-line flags with reasonable defaults.
|
||||
* All the data is stored in a single directory pointed by `-storageDataPath` flag.
|
||||
* Easy backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Storage is protected from corruption on unclean shutdown (i.e. hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
|
||||
* Supports metrics' ingestion and backfilling via the following protocols:
|
||||
* [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write)
|
||||
* [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/)
|
||||
* [Graphite plaintext protocol](https://graphite.readthedocs.io/en/latest/feeding-carbon.html) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon)
|
||||
if `-graphiteListenAddr` is set.
|
||||
* [OpenTSDB put message](http://opentsdb.net/docs/build/html/api_telnet/put.html) if `-opentsdbListenAddr` is set.
|
||||
* Ideally works with big amounts of time series data from IoT sensors, connected car sensors and industrial sensors.
|
||||
* Has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
|
||||
|
||||
### Victoria Metrics Logo
|
||||
## Operation
|
||||
|
||||
|
||||
### Table of contents
|
||||
|
||||
* [How to build from sources](#how-to-build-from-sources)
|
||||
* [How to start VictoriaMetrics](#how-to-start-victoriametrics)
|
||||
* [Prometheus setup](#prometheus-setup)
|
||||
* [Grafana setup](#grafana-setup)
|
||||
* [How to send data from InfluxDB-compatible agents such as Telegraf](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf)
|
||||
* [How to send data from Graphite-compatible agents such as StatsD](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd)
|
||||
* [How to send data from OpenTSDB-compatible agents](#how-to-send-data-from-opentsdb-compatible-agents)
|
||||
* [How to apply new config / ugrade VictoriaMetrics](#how-to-apply-new-config--upgrade-victoriametrics)
|
||||
* [How to work with snapshots](#how-to-work-with-snapshots)
|
||||
* [How to delete time series](#how-to-delete-time-series)
|
||||
* [How to export time series](#how-to-export-time-series)
|
||||
* [Federation](#federation)
|
||||
* [Capacity planning](#capacity-planning)
|
||||
* [High Availability](#high-availability)
|
||||
* [Multiple retentions](#multiple-retentions)
|
||||
* [Scalability and cluster version](#scalability-and-cluster-version)
|
||||
* [Security](#security)
|
||||
* [Tuning](#tuning)
|
||||
* [Monitoring](#monitoring)
|
||||
* [Troubleshooting](#troubleshooting)
|
||||
* [Community and contributions](#community-and-contributions)
|
||||
* [Reporting bugs](#reporting-bugs)
|
||||
|
||||
|
||||
### How to build from sources
|
||||
|
||||
We recommend using either [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) or
|
||||
[docker images](https://hub.docker.com/r/valyala/victoria-metrics/) instead of building VictoriaMetrics
|
||||
from sources. Building from sources is reasonable when developing an additional features specific
|
||||
to your needs.
|
||||
|
||||
|
||||
#### Development build
|
||||
|
||||
1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12.
|
||||
2. Run `go build ./app/victoria-metrics` from the root folder of the repository.
|
||||
It will build `victoria-metrics` binary in the root folder of the repository.
|
||||
|
||||
#### Production build
|
||||
|
||||
1. [Install docker](https://docs.docker.com/install/).
|
||||
2. Run `make victoria-metrics-prod` from the root folder of the respository.
|
||||
It will build `victoria-metrics-prod` binary and put it into the `bin` folder.
|
||||
|
||||
#### Building docker images
|
||||
|
||||
Run `make package-victoria-metrics`. It will build `valyala/victoria-metrics:<PKG_TAG>` docker image locally.
|
||||
`<PKG_TAG>` is auto-generated image tag, which depends on source code in the repository.
|
||||
The `<PKG_TAG>` may be manually set via `PKG_TAG=foobar make package`.
|
||||
|
||||
|
||||
|
||||
### How to start VictoriaMetrics
|
||||
|
||||
Just start VictoriaMetrics executable or docker image with the desired command-line flags.
|
||||
|
||||
The following command line flags are used the most:
|
||||
|
||||
* `-storageDataPath` - path to data directory. VictoriaMetrics stores all the data in this directory.
|
||||
* `-retentionPeriod` - retention period in months for the data. Older data is automatically deleted.
|
||||
* `-httpListenAddr` - TCP address to listen to for http requests. By default it listens port `8428` on all the network interfaces.
|
||||
* `-graphiteListenAddr` - TCP and UDP address to listen to for Graphite data. By default it is disabled.
|
||||
* `-opentsdbListenAddr` - TCP and UDP address to listen to for OpenTSDB data. By default it is disabled.
|
||||
|
||||
Pass `-help` to see all the available flags with description and default values.
|
||||
|
||||
|
||||
### Prometheus setup
|
||||
|
||||
Add the following lines to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`):
|
||||
|
||||
```yml
|
||||
remote_write:
|
||||
- url: http://<victoriametrics-addr>:8428/api/v1/write
|
||||
queue_config:
|
||||
max_samples_per_send: 10000
|
||||
```
|
||||
|
||||
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
|
||||
Then apply the new config via the following command:
|
||||
|
||||
```
|
||||
kill -HUP `pidof prometheus`
|
||||
```
|
||||
|
||||
Prometheus writes incoming data to local storage and to remote storage in parallel.
|
||||
This means the data remains available in local storage for `--storage.tsdb.retention.time` duration
|
||||
if remote storage stops working.
|
||||
|
||||
If you plan sending data to VictoriaMetrics from multiple Prometheus instances, then add the following lines into `global` section
|
||||
of [Prometheus config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file):
|
||||
|
||||
```yml
|
||||
global:
|
||||
external_labels:
|
||||
datacenter: dc-123
|
||||
```
|
||||
|
||||
This instructs Prometheus to add `datacenter=dc-123` label to each time series sent to remote storage.
|
||||
The label name may be arbitrary - `datacenter` is just an example. The label value must be unique
|
||||
across Prometheus instances, so time series may be filtered and grouped by this label.
|
||||
|
||||
|
||||
### Grafana setup
|
||||
|
||||
Create [Prometheus datasource](http://docs.grafana.org/features/datasources/prometheus/) in Grafana with the following Url:
|
||||
|
||||
```
|
||||
http://<victoriametrics-addr>:8428
|
||||
```
|
||||
|
||||
Substitute `<victoriametrics-addr>` with the hostname or IP address of VictoriaMetrics.
|
||||
|
||||
Then build graphs with the created datasource using [Prometheus query language](https://prometheus.io/docs/prometheus/latest/querying/basics/).
|
||||
VictoriaMetrics supports native PromQL and [extends it with useful features](ExtendedPromQL).
|
||||
|
||||
|
||||
### How to send data from InfluxDB-compatible agents such as [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)?
|
||||
|
||||
Just use `http://<victoriametric-addr>:8428` url instead of InfluxDB url in agents' configs.
|
||||
For instance, put the following lines into `Telegraf` config, so it sends data to VictoriaMetrics instead of InfluxDB:
|
||||
|
||||
```
|
||||
[[outputs.influxdb]]
|
||||
urls = ["http://<victoriametrics-addr>:8428"]
|
||||
```
|
||||
|
||||
Do not forget substituting `<victoriametrics-addr>` with the real address where VictoriaMetrics runs.
|
||||
|
||||
VictoriaMetrics maps Influx data using the following rules:
|
||||
* [`db` query arg](https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint) is mapped into `db` label value
|
||||
* Field names are mapped to time series names prefixed by `{measurement}.` value
|
||||
* Field values are mapped to time series values
|
||||
* Tags are mapped to Prometheus labels as-is
|
||||
|
||||
|
||||
### How to send data from Graphite-compatible agents such as [StatsD](https://github.com/etsy/statsd)?
|
||||
|
||||
1) Enable Graphite receiver in VictoriaMetrics by setting `-graphiteListenAddr` command line flag. For instance,
|
||||
the following command will enable Graphite receiver in VictoriaMetrics on TCP and UDP port `2003`:
|
||||
|
||||
```
|
||||
/path/to/victoria-metrics-prod ... -graphiteListenAddr=:2003
|
||||
```
|
||||
|
||||
2) Use the configured address in Graphite-compatible agents. For instance, set `graphiteHost`
|
||||
to the VictoriaMetrics host in `StatsD` configs.
|
||||
|
||||
|
||||
### How to send data from OpenTSDB-compatible agents?
|
||||
|
||||
1) Enable OpenTSDB receiver in VictoriaMetrics by setting `-opentsdbListenAddr` command line flag. For instance,
|
||||
the following command will enable OpenTSDB receiver in VictoriaMetrics on TCP and UDP port `4242`:
|
||||
|
||||
```
|
||||
/path/to/victoria-metrics-prod ... -opentsdbListenAddr=:4242
|
||||
```
|
||||
|
||||
2) Send data to the given address from OpenTSDB-compatible agents.
|
||||
|
||||
|
||||
### How to apply new config / upgrade VictoriaMetrics?
|
||||
|
||||
VictoriaMetrics must be restarted in order to upgrade or apply new config:
|
||||
|
||||
1) Send `SIGINT` signal to VictoriaMetrics process in order to gracefully stop it.
|
||||
2) Wait until the process stops. This can take a few seconds.
|
||||
3) Start the upgraded VictoriaMetrics with new config.
|
||||
|
||||
|
||||
### How to work with snapshots?
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/create` in order to create an instant snapshot.
|
||||
The page will return the following JSON response:
|
||||
|
||||
```
|
||||
{"status":"ok","snapshot":"<snapshot-name>"}
|
||||
```
|
||||
|
||||
Snapshots are created under `<-storageDataPath>/snapshots` directory, where `<-storageDataPath>`
|
||||
is the command-line flag value. Snapshots can be archived to backup storage via `rsync -L`, `scp -r`
|
||||
or any similar tool that follows symlinks during copying.
|
||||
|
||||
The `http://<victoriametrics-addr>:8428/snapshot/list` page contains the list of available snapshots.
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/delete?snapshot=<snapshot-name>` in order
|
||||
to delete `<snapshot-name>` snapshot.
|
||||
|
||||
Navigate to `http://<victoriametrics-addr>:8428/snapshot/delete_all` in order to delete all the snapshots.
|
||||
|
||||
|
||||
### How to delete time series?
|
||||
|
||||
Send a request to `http://<victoriametrics-addr>:8428/api/v1/admin/tsdb/delete_series?match[]=<timeseries_selector_for_delete>`,
|
||||
where `<timeseries_selector_for_delete>` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)
|
||||
for metrics to delete. After that all the time series matching the given selector are deleted. Storage space for
|
||||
the deleted time series isn't freed instantly - it is freed during subsequent merges of data files.
|
||||
|
||||
|
||||
### How to export time series?
|
||||
|
||||
Send a request to `http://<victoriametrics-addr>:8428/api/v1/export?match[]=<timeseries_selector_for_export>`,
|
||||
where `<timeseries_selector_for_export>` may contain any [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors)
|
||||
for metrics to export. The response would contain all the data for the selected time series in [JSON streaming format](https://en.wikipedia.org/wiki/JSON_streaming#Line-delimited_JSON).
|
||||
Each JSON line would contain data for a single time series. An example output:
|
||||
|
||||
```
|
||||
{"metric":{"__name__":"up","job":"node_exporter","instance":"localhost:9100"},"values":[0,0,0],"timestamps":[1549891472010,1549891487724,1549891503438]}
|
||||
{"metric":{"__name__":"up","job":"prometheus","instance":"localhost:9090"},"values":[1,1,1],"timestamps":[1549891461511,1549891476511,1549891491511]}
|
||||
```
|
||||
|
||||
Optional `start` and `end` args may be added to the request in order to limit the time frame for the exported data. These args may contain either
|
||||
unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) values.
|
||||
|
||||
|
||||
### Federation
|
||||
|
||||
VictoriaMetrics exports [Prometheus-compatible federation data](https://prometheus.io/docs/prometheus/latest/federation/)
|
||||
at `http://<victoriametrics-addr>:8428/federate?match[]=<timeseries_selector_for_federation>`.
|
||||
|
||||
Optional `start` and `end` args may be added to the request in order to scrape the last point for each selected time series on the `[start ... end]` interval.
|
||||
`start` and `end` may contain either unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) values. By default the last point
|
||||
on the interval `[now - max_lookback ... now]` is scraped for each time series. Default value for `max_lookback` is `5m` (5 minutes), but can be overriden.
|
||||
For instance, `/federate?match[]=up&max_lookback=1h` would return last points on the `[now - 1h ... now]` interval. This may be useful for time series federation
|
||||
with scrape intervals exceeding `5m`.
|
||||
|
||||
|
||||
### Capacity planning
|
||||
|
||||
Rough estimation of the required resources:
|
||||
|
||||
* RAM size: less than 1KB per active time series. So, ~1GB of RAM is required for 1M active time series.
|
||||
Time series is considered active if new data points have been added to it recently or if it has been recently queried.
|
||||
VictoriaMetrics stores various caches in RAM. Memory size for these caches may be limited with `-memory.allowedPercent` flag.
|
||||
* CPU cores: a CPU core per 300K inserted data points per second. So, ~4 CPU cores are required for processing
|
||||
the insert stream of 1M data points per second.
|
||||
If you see lower numbers per CPU core, then it is likely active time series info doesn't fit caches,
|
||||
so you need more RAM for lowering CPU usage.
|
||||
* Storage size: less than a byte per data point on average. So, ~260GB is required for storing a month-long insert stream
|
||||
of 100K data points per second.
|
||||
The actual storage size heavily depends on data randomness (entropy). Higher randomness means higher storage size requirements.
|
||||
|
||||
|
||||
### High availability
|
||||
|
||||
1) Install multiple VictoriaMetrics instances in distinct datacenters.
|
||||
2) Add addresses of these instances to `remote_write` section in Prometheus config:
|
||||
|
||||
```yml
|
||||
remote_write:
|
||||
- url: http://<victoriametrics-addr-1>:8428/api/v1/write
|
||||
queue_config:
|
||||
max_samples_per_send: 10000
|
||||
# ...
|
||||
- url: http://<victoriametrics-addr-N>:8428/api/v1/write
|
||||
queue_config:
|
||||
max_samples_per_send: 10000
|
||||
```
|
||||
|
||||
3) Apply the updated config:
|
||||
|
||||
```
|
||||
kill -HUP `pidof prometheus`
|
||||
```
|
||||
|
||||
4) Now Prometheus should write data into all the configured `remote_write` urls in parallel.
|
||||
5) Set up [Promxy](https://github.com/jacksontj/promxy) in front of all the VictoriaMetrics replicas.
|
||||
6) Set up Prometheus datasource in Grafana that points to Promxy.
|
||||
|
||||
|
||||
### Multiple retentions
|
||||
|
||||
Just start multiple VictoriaMetrics instances with distinct values for the following flags:
|
||||
|
||||
* `-retentionPeriod`
|
||||
* `-storageDataPath`, so the data for each retention period is saved in a separate directory
|
||||
* `-httpListenAddr`, so clients may reach VictoriaMetrics instance with proper retention
|
||||
|
||||
|
||||
### Scalability and cluster version
|
||||
|
||||
Though single-node VictoriaMetrics cannot scale to multiple nodes, it is optimized for resource usage - storage size / bandwidth / IOPS, RAM, CPU.
|
||||
This means that a single-node VictoriaMetrics may scale vertically and substitute moderately sized cluster built with competing solutions
|
||||
such as Thanos, Uber M3, InfluxDB or TimescaleDB.
|
||||
|
||||
So try single-node VictoriaMetrics at first and then [switch to cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) if you still need
|
||||
horizontally scalable long-term remote storage for really large Prometheus deployments.
|
||||
[Contact us](mailto:info@victoriametrics.com) for paid support.
|
||||
|
||||
|
||||
### Security
|
||||
|
||||
Do not forget protecting sensitive endpoints in VictoriaMetrics when exposing it to untrusted networks such as internet.
|
||||
Consider setting the following command-line flags:
|
||||
|
||||
* `-tls`, `-tlsCertFile` and `-tlsKeyFile` for switching from HTTP to HTTPS.
|
||||
* `-httpAuth.username` and `-httpAuth.password` for protecting all the HTTP endpoints
|
||||
with [HTTP Basic Authentication](https://en.wikipedia.org/wiki/Basic_access_authentication).
|
||||
* `-deleteAuthKey` for protecting `/api/v1/admin/tsdb/delete_series` endpoint. See [how to delete time series](#how-to-delete-time-series).
|
||||
* `-snapshotAuthKey` for protecting `/snapshot*` endpoints. See [how to work with snapshots](#how-to-work-with-snapshots).
|
||||
|
||||
Explicitly set internal network interface for TCP and UDP ports for data ingestion with Graphite and OpenTSDB formats.
|
||||
For example, substitute `-graphiteListenAddr=:2003` with `-graphiteListenAddr=<internal_iface_ip>:2003`.
|
||||
|
||||
|
||||
### Tuning
|
||||
|
||||
* There is no need in VictoriaMetrics tuning, since it uses reasonable defaults for command-line flags,
|
||||
which are automatically adjusted for the available CPU and RAM resources.
|
||||
* There is no need in Operating System tuning, since VictoriaMetrics is optimized for default OS settings.
|
||||
The only option is increasing the limit on [the number open files in the OS](https://medium.com/@muhammadtriwibowo/set-permanently-ulimit-n-open-files-in-ubuntu-4d61064429a),
|
||||
so Prometheus instances could establish more connections to VictoriaMetrics.
|
||||
|
||||
|
||||
### Monitoring
|
||||
|
||||
VictoriaMetrics exports internal metrics in Prometheus format on the `/metrics` page.
|
||||
Add this page to Prometheus' scrape config in order to collect VictoriaMetrics metrics.
|
||||
There is [an official Grafana dashboard for single-node VictoriaMetrics](https://grafana.com/dashboards/10229).
|
||||
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
* If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second,
|
||||
then it is likely you have too many active time series for the current amount of RAM.
|
||||
It is recommended increasing the amount of RAM on the node with VictoriaMetrics in order to improve
|
||||
ingestion performance.
|
||||
Another option is to increase `-memory.allowedPercent` command-line flag value. Be careful with this
|
||||
option, since too big value for `-memory.allowedPercent` may result in high I/O usage.
|
||||
|
||||
|
||||
## Community and contributions
|
||||
|
||||
Feel free asking any questions regarding VictoriaMetrics [here](https://groups.google.com/forum/#!forum/victorametrics-users).
|
||||
|
||||
We are open to third-party pull requests provided they follow [KISS design principle](https://en.wikipedia.org/wiki/KISS_principle):
|
||||
|
||||
- Prefer simple code and architecture.
|
||||
- Avoid complex abstractions.
|
||||
- Avoid magic code and fancy algorithms.
|
||||
- Avoid [big external dependencies](https://medium.com/@valyala/stripping-dependency-bloat-in-victoriametrics-docker-image-983fb5912b0d).
|
||||
- Minimize the number of moving parts in the distributed system.
|
||||
- Avoid automated decisions, which may hurt cluster availability, consistency or performance.
|
||||
|
||||
Adhering `KISS` principle simplifies the resulting code and architecture, so it can be reviewed, understood and verified by many people.
|
||||
|
||||
|
||||
## Reporting bugs
|
||||
|
||||
Report bugs and propose new features [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
|
||||
|
||||
|
||||
## Victoria Metrics Logo
|
||||
|
||||
[Zip](VM_logo.zip) contains three folders with different image orientation (main color and inverted version).
|
||||
|
||||
|
@ -47,24 +409,21 @@ Files included in each folder:
|
|||
* 2 EPS Adobe Illustrator EPS10 files
|
||||
|
||||
|
||||
#### Logo Usage Guidelines
|
||||
### Logo Usage Guidelines
|
||||
|
||||
##### Font used:
|
||||
#### Font used:
|
||||
|
||||
* Lato Black
|
||||
* Lato Black
|
||||
* Lato Regular
|
||||
|
||||
##### Color Palette:
|
||||
#### Color Palette:
|
||||
|
||||
* HEX [#110f0f](https://www.color-hex.com/color/110f0f)
|
||||
* HEX [#110f0f](https://www.color-hex.com/color/110f0f)
|
||||
* HEX [#ffffff](https://www.color-hex.com/color/ffffff)
|
||||
|
||||
#### We kindly ask:
|
||||
### We kindly ask:
|
||||
|
||||
- Please don't use any other font instead of suggested.
|
||||
- There should be sufficient clear space around the logo.
|
||||
- Do not change spacing, alignment, or relative locations of the design elements.
|
||||
- Do not change the proportions of any of the design elements or the design itself. You may resize as needed but must retain all proportions.
|
||||
|
||||
|
||||
|
||||
|
|
21
app/victoria-metrics/Makefile
Normal file
21
app/victoria-metrics/Makefile
Normal file
|
@ -0,0 +1,21 @@
|
|||
# All these commands must run from repository root.
|
||||
|
||||
victoria-metrics-prod:
|
||||
APP_NAME=victoria-metrics $(MAKE) app-via-docker
|
||||
|
||||
package-victoria-metrics:
|
||||
APP_NAME=victoria-metrics \
|
||||
$(MAKE) package-via-docker
|
||||
|
||||
publish-victoria-metrics:
|
||||
APP_NAME=victoria-metrics $(MAKE) publish-via-docker
|
||||
|
||||
run-victoria-metrics:
|
||||
mkdir -p victoria-metrics-data
|
||||
DOCKER_OPTS='-v $(shell pwd)/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 -p 2003:2003 -p 2003:2003/udp' \
|
||||
APP_NAME=victoria-metrics \
|
||||
ARGS='-graphiteListenAddr=:2003 -opentsdbListenAddr=:4242 -retentionPeriod=12 -search.maxUniqueTimeseries=1000000 -search.maxQueryDuration=10m' \
|
||||
$(MAKE) run-via-docker
|
||||
|
||||
victoria-metrics-arm:
|
||||
CC=arm-linux-gnueabi-gcc CGO_ENABLED=1 GOARCH=arm GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/victoria-metrics-arm ./app/victoria-metrics
|
5
app/victoria-metrics/deployment/Dockerfile
Normal file
5
app/victoria-metrics/deployment/Dockerfile
Normal file
|
@ -0,0 +1,5 @@
|
|||
FROM scratch
|
||||
COPY --from=local/certs:1.0.2 /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY bin/victoria-metrics-prod .
|
||||
EXPOSE 8428
|
||||
ENTRYPOINT ["/victoria-metrics-prod"]
|
60
app/victoria-metrics/main.go
Normal file
60
app/victoria-metrics/main.go
Normal file
|
@ -0,0 +1,60 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
||||
)
|
||||
|
||||
var httpListenAddr = flag.String("httpListenAddr", ":8428", "TCP address to listen for http connections")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
logger.Infof("starting VictoraMetrics at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
vmstorage.Init()
|
||||
vmselect.Init()
|
||||
vminsert.Init()
|
||||
|
||||
go httpserver.Serve(*httpListenAddr, requestHandler)
|
||||
logger.Infof("started VictoriaMetrics in %s", time.Since(startTime))
|
||||
|
||||
sig := procutil.WaitForSigterm()
|
||||
logger.Infof("received signal %s", sig)
|
||||
|
||||
logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr)
|
||||
startTime = time.Now()
|
||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||
logger.Fatalf("cannot stop the webservice: %s", err)
|
||||
}
|
||||
vminsert.Stop()
|
||||
logger.Infof("successfully shut down the webservice in %s", time.Since(startTime))
|
||||
|
||||
vmstorage.Stop()
|
||||
vmselect.Stop()
|
||||
|
||||
logger.Infof("the VictoriaMetrics has been stopped in %s", time.Since(startTime))
|
||||
}
|
||||
|
||||
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
if vminsert.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
if vmselect.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
if vmstorage.RequestHandler(w, r) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
1
app/vminsert/README.md
Normal file
1
app/vminsert/README.md
Normal file
|
@ -0,0 +1 @@
|
|||
`vminsert` routes the ingested data to `vmstorage`.
|
106
app/vminsert/common/insert_ctx.go
Normal file
106
app/vminsert/common/insert_ctx.go
Normal file
|
@ -0,0 +1,106 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
// InsertCtx contains common bits for data points insertion.
|
||||
type InsertCtx struct {
|
||||
Labels []prompb.Label
|
||||
|
||||
mrs []storage.MetricRow
|
||||
metricNamesBuf []byte
|
||||
}
|
||||
|
||||
// Reset resets ctx for future fill with rowsLen rows.
|
||||
func (ctx *InsertCtx) Reset(rowsLen int) {
|
||||
for _, label := range ctx.Labels {
|
||||
label.Name = nil
|
||||
label.Value = nil
|
||||
}
|
||||
ctx.Labels = ctx.Labels[:0]
|
||||
|
||||
for i := range ctx.mrs {
|
||||
mr := &ctx.mrs[i]
|
||||
mr.MetricNameRaw = nil
|
||||
}
|
||||
ctx.mrs = ctx.mrs[:0]
|
||||
|
||||
if n := rowsLen - cap(ctx.mrs); n > 0 {
|
||||
ctx.mrs = append(ctx.mrs[:cap(ctx.mrs)], make([]storage.MetricRow, n)...)
|
||||
}
|
||||
ctx.mrs = ctx.mrs[:rowsLen]
|
||||
ctx.metricNamesBuf = ctx.metricNamesBuf[:0]
|
||||
}
|
||||
|
||||
func (ctx *InsertCtx) marshalMetricNameRaw(prefix []byte, labels []prompb.Label) []byte {
|
||||
start := len(ctx.metricNamesBuf)
|
||||
ctx.metricNamesBuf = append(ctx.metricNamesBuf, prefix...)
|
||||
ctx.metricNamesBuf = storage.MarshalMetricNameRaw(ctx.metricNamesBuf, labels)
|
||||
metricNameRaw := ctx.metricNamesBuf[start:]
|
||||
return metricNameRaw[:len(metricNameRaw):len(metricNameRaw)]
|
||||
}
|
||||
|
||||
// WriteDataPoint writes (timestamp, value) with the given prefix and lables into ctx buffer.
|
||||
func (ctx *InsertCtx) WriteDataPoint(prefix []byte, labels []prompb.Label, timestamp int64, value float64) {
|
||||
metricNameRaw := ctx.marshalMetricNameRaw(prefix, labels)
|
||||
ctx.addRow(metricNameRaw, timestamp, value)
|
||||
}
|
||||
|
||||
// WriteDataPointExt writes (timestamp, value) with the given metricNameRaw and labels into ctx buffer.
|
||||
//
|
||||
// It returns metricNameRaw for the given labels if len(metricNameRaw) == 0.
|
||||
func (ctx *InsertCtx) WriteDataPointExt(metricNameRaw []byte, labels []prompb.Label, timestamp int64, value float64) []byte {
|
||||
if len(metricNameRaw) == 0 {
|
||||
metricNameRaw = ctx.marshalMetricNameRaw(nil, labels)
|
||||
}
|
||||
ctx.addRow(metricNameRaw, timestamp, value)
|
||||
return metricNameRaw
|
||||
}
|
||||
|
||||
func (ctx *InsertCtx) addRow(metricNameRaw []byte, timestamp int64, value float64) {
|
||||
mrs := ctx.mrs
|
||||
if cap(mrs) > len(mrs) {
|
||||
mrs = mrs[:len(mrs)+1]
|
||||
} else {
|
||||
mrs = append(mrs, storage.MetricRow{})
|
||||
}
|
||||
mr := &mrs[len(mrs)-1]
|
||||
ctx.mrs = mrs
|
||||
mr.MetricNameRaw = metricNameRaw
|
||||
mr.Timestamp = timestamp
|
||||
mr.Value = value
|
||||
}
|
||||
|
||||
// AddLabel adds (name, value) label to ctx.Labels.
|
||||
//
|
||||
// name and value must exist until ctx.Labels is used.
|
||||
func (ctx *InsertCtx) AddLabel(name, value string) {
|
||||
labels := ctx.Labels
|
||||
if cap(labels) > len(labels) {
|
||||
labels = labels[:len(labels)+1]
|
||||
} else {
|
||||
labels = append(labels, prompb.Label{})
|
||||
}
|
||||
label := &labels[len(labels)-1]
|
||||
|
||||
// Do not copy name and value contents for performance reasons.
|
||||
// This reduces GC overhead on the number of objects and allocations.
|
||||
label.Name = bytesutil.ToUnsafeBytes(name)
|
||||
label.Value = bytesutil.ToUnsafeBytes(value)
|
||||
|
||||
ctx.Labels = labels
|
||||
}
|
||||
|
||||
// FlushBufs flushes buffered rows to the underlying storage.
|
||||
func (ctx *InsertCtx) FlushBufs() error {
|
||||
if err := vmstorage.AddRows(ctx.mrs); err != nil {
|
||||
return fmt.Errorf("cannot store metrics: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
34
app/vminsert/concurrencylimiter/concurrencylimiter.go
Normal file
34
app/vminsert/concurrencylimiter/concurrencylimiter.go
Normal file
|
@ -0,0 +1,34 @@
|
|||
package concurrencylimiter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// ch is the channel for limiting concurrent inserts.
|
||||
// Put an item into it before performing an insert and remove
|
||||
// the item after the insert is complete.
|
||||
ch = make(chan struct{}, runtime.GOMAXPROCS(-1)*2)
|
||||
|
||||
// waitDuration is the amount of time to wait until at least a single
|
||||
// concurrent insert out of cap(Ch) inserts is complete.
|
||||
waitDuration = time.Second * 30
|
||||
)
|
||||
|
||||
// Do calls f with the limited concurrency.
|
||||
func Do(f func() error) error {
|
||||
// Limit the number of conurrent inserts in order to prevent from excess
|
||||
// memory usage and CPU trashing.
|
||||
t := time.NewTimer(waitDuration)
|
||||
select {
|
||||
case ch <- struct{}{}:
|
||||
t.Stop()
|
||||
err := f()
|
||||
<-ch
|
||||
return err
|
||||
case <-t.C:
|
||||
return fmt.Errorf("the server is overloaded with %d concurrent inserts; either increase the number of CPUs or reduce the load", cap(ch))
|
||||
}
|
||||
}
|
176
app/vminsert/graphite/parser.go
Normal file
176
app/vminsert/graphite/parser.go
Normal file
|
@ -0,0 +1,176 @@
|
|||
package graphite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
// Rows contains parsed graphite rows.
|
||||
type Rows struct {
|
||||
Rows []Row
|
||||
|
||||
tagsPool []Tag
|
||||
}
|
||||
|
||||
// Reset resets rs.
|
||||
func (rs *Rows) Reset() {
|
||||
// Reset items, so they can be GC'ed
|
||||
|
||||
for i := range rs.Rows {
|
||||
rs.Rows[i].reset()
|
||||
}
|
||||
rs.Rows = rs.Rows[:0]
|
||||
|
||||
for i := range rs.tagsPool {
|
||||
rs.tagsPool[i].reset()
|
||||
}
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals grahite plaintext protocol rows from s.
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) error {
|
||||
var err error
|
||||
rs.Rows, rs.tagsPool, err = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Row is a single graphite row.
|
||||
type Row struct {
|
||||
Metric string
|
||||
Tags []Tag
|
||||
Value float64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (r *Row) reset() {
|
||||
r.Metric = ""
|
||||
r.Tags = nil
|
||||
r.Value = 0
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
||||
r.reset()
|
||||
n := strings.IndexByte(s, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between metric and value in %q", s)
|
||||
}
|
||||
metricAndTags := s[:n]
|
||||
tail := s[n+1:]
|
||||
|
||||
n = strings.IndexByte(metricAndTags, ';')
|
||||
if n < 0 {
|
||||
// No tags
|
||||
r.Metric = metricAndTags
|
||||
} else {
|
||||
// Tags found
|
||||
r.Metric = metricAndTags[:n]
|
||||
tagsStart := len(tagsPool)
|
||||
var err error
|
||||
tagsPool, err = unmarshalTags(tagsPool, metricAndTags[n+1:])
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("cannot umarshal tags: %s", err)
|
||||
}
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
}
|
||||
|
||||
n = strings.IndexByte(tail, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between value and timestamp in %q", s)
|
||||
}
|
||||
r.Value = fastfloat.ParseBestEffort(tail[:n])
|
||||
r.Timestamp = fastfloat.ParseInt64BestEffort(tail[n+1:])
|
||||
return tagsPool, nil
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag, error) {
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n == 0 {
|
||||
// Skip empty line
|
||||
s = s[1:]
|
||||
continue
|
||||
}
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s, tagsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, err
|
||||
}
|
||||
return dst, tagsPool, nil
|
||||
}
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s[:n], tagsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, err
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst, tagsPool, nil
|
||||
}
|
||||
|
||||
func unmarshalTags(dst []Tag, s string) ([]Tag, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Tag{})
|
||||
}
|
||||
tag := &dst[len(dst)-1]
|
||||
|
||||
n := strings.IndexByte(s, ';')
|
||||
if n < 0 {
|
||||
// The last tag found
|
||||
if err := tag.unmarshal(s); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := tag.unmarshal(s[:n]); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Tag is a graphite tag.
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (t *Tag) reset() {
|
||||
t.Key = ""
|
||||
t.Value = ""
|
||||
}
|
||||
|
||||
func (t *Tag) unmarshal(s string) error {
|
||||
t.reset()
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
t.Key = s[:n]
|
||||
if len(t.Key) == 0 {
|
||||
return fmt.Errorf("tag key cannot be empty for %q", s)
|
||||
}
|
||||
t.Value = s[n+1:]
|
||||
return nil
|
||||
}
|
133
app/vminsert/graphite/parser_test.go
Normal file
133
app/vminsert/graphite/parser_test.go
Normal file
|
@ -0,0 +1,133 @@
|
|||
package graphite
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
|
||||
// Try again
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// Missing value
|
||||
f("aaa")
|
||||
|
||||
// Missing timestamp
|
||||
f("aaa 1123")
|
||||
|
||||
// Invalid multiline
|
||||
f("aaa\nbbb 123 34")
|
||||
|
||||
// missing tag
|
||||
f("aa; 12 34")
|
||||
|
||||
// missing tag value
|
||||
f("aa;bb 23 34")
|
||||
f("aa;=dsd 234 45")
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("non-empty rows after reset: %+v", rows.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
|
||||
// Single line
|
||||
f("foobar -123.456 789", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
}},
|
||||
})
|
||||
f("foo.bar 123.456 789\n", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo.bar",
|
||||
Value: 123.456,
|
||||
Timestamp: 789,
|
||||
}},
|
||||
})
|
||||
|
||||
// Tags
|
||||
f("foo;bar=baz 1 2", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
}},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
f("foo;bar=baz;aa=;x=y 1 2", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
},
|
||||
{
|
||||
Key: "aa",
|
||||
Value: "",
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
|
||||
// Multi lines
|
||||
f("foo 0.3 2\nbar.baz 0.34 43\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
24
app/vminsert/graphite/parser_timing_test.go
Normal file
24
app/vminsert/graphite/parser_timing_test.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package graphite
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `cpu.usage_user 1.23 1234556768
|
||||
cpu.usage_system 23.344 1234556768
|
||||
cpu.usage_iowait 3.3443 1234556769
|
||||
cpu.usage_irq 0.34432 1234556768
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal %q: %s", s, err))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
166
app/vminsert/graphite/request_handler.go
Normal file
166
app/vminsert/graphite/request_handler.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
package graphite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="graphite"}`)
|
||||
|
||||
// insertHandler processes remote write for graphite plaintext protocol.
|
||||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
func insertHandler(r io.Reader) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(r)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(r io.Reader) error {
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
for ctx.Read(r) {
|
||||
if err := ctx.InsertRows(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ctx.Error()
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) InsertRows() error {
|
||||
rows := ctx.Rows.Rows
|
||||
ic := &ctx.Common
|
||||
ic.Reset(len(rows))
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("", r.Metric)
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
ic.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
const maxReadPacketSize = 4 * 1024 * 1024
|
||||
|
||||
const flushTimeout = 3 * time.Second
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader) bool {
|
||||
graphiteReadCalls.Inc()
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
if c, ok := r.(net.Conn); ok {
|
||||
if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil {
|
||||
graphiteReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot set read deadline: %s", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
lr := io.LimitReader(r, maxReadPacketSize)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.reqBuf.B = append(ctx.reqBuf.B[:0], ctx.tailBuf...)
|
||||
n, err := io.CopyBuffer(&ctx.reqBuf, lr, ctx.copyBuf[:])
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
// Flush the read data on timeout and try reading again.
|
||||
} else {
|
||||
graphiteReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read graphite plaintext protocol data: %s", err)
|
||||
return false
|
||||
}
|
||||
} else if n < maxReadPacketSize {
|
||||
// Mark the end of stream.
|
||||
ctx.err = io.EOF
|
||||
}
|
||||
|
||||
// Parse all the rows until the last newline in ctx.reqBuf.B
|
||||
nn := bytes.LastIndexByte(ctx.reqBuf.B, '\n')
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
if nn >= 0 {
|
||||
ctx.tailBuf = append(ctx.tailBuf[:0], ctx.reqBuf.B[nn+1:]...)
|
||||
ctx.reqBuf.B = ctx.reqBuf.B[:nn]
|
||||
}
|
||||
if err = ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf.B)); err != nil {
|
||||
graphiteUnmarshalErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot unmarshal graphite plaintext protocol data with size %d: %s", len(ctx.reqBuf.B), err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Convert timestamps from seconds to milliseconds
|
||||
for i := range ctx.Rows.Rows {
|
||||
ctx.Rows.Rows[i].Timestamp *= 1e3
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf bytesutil.ByteBuffer
|
||||
tailBuf []byte
|
||||
copyBuf [16 * 1024]byte
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Error() error {
|
||||
if ctx.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return ctx.err
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
|
||||
ctx.err = nil
|
||||
}
|
||||
|
||||
var (
|
||||
graphiteReadCalls = metrics.NewCounter(`vm_read_calls_total{name="graphite"}`)
|
||||
graphiteReadErrors = metrics.NewCounter(`vm_read_errors_total{name="graphite"}`)
|
||||
graphiteUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="graphite"}`)
|
||||
)
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
137
app/vminsert/graphite/server.go
Normal file
137
app/vminsert/graphite/server.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
package graphite
|
||||
|
||||
import (
|
||||
"net"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
writeRequestsTCP = metrics.NewCounter(`vm_graphite_requests_total{name="write", net="tcp"}`)
|
||||
writeErrorsTCP = metrics.NewCounter(`vm_graphite_request_errors_total{name="write", net="tcp"}`)
|
||||
|
||||
writeRequestsUDP = metrics.NewCounter(`vm_graphite_requests_total{name="write", net="udp"}`)
|
||||
writeErrorsUDP = metrics.NewCounter(`vm_graphite_request_errors_total{name="write", net="udp"}`)
|
||||
)
|
||||
|
||||
// Serve starts graphite server on the given addr.
|
||||
func Serve(addr string) {
|
||||
logger.Infof("starting TCP Graphite server at %q", addr)
|
||||
lnTCP, err := net.Listen("tcp4", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start TCP Graphite server at %q: %s", addr, err)
|
||||
}
|
||||
listenerTCP = lnTCP
|
||||
|
||||
logger.Infof("starting UDP Graphite server at %q", addr)
|
||||
lnUDP, err := net.ListenPacket("udp4", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start UDP Graphite server at %q: %s", addr, err)
|
||||
}
|
||||
listenerUDP = lnUDP
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveTCP(listenerTCP)
|
||||
logger.Infof("stopped TCP Graphite server at %q", addr)
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveUDP(listenerUDP)
|
||||
logger.Infof("stopped UDP Graphite server at %q", addr)
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func serveTCP(ln net.Listener) {
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
logger.Fatalf("unrecoverable error when accepting TCP Graphite connections: %s", err)
|
||||
}
|
||||
logger.Fatalf("unexpected error when accepting TCP Graphite connections: %s", err)
|
||||
}
|
||||
go func() {
|
||||
writeRequestsTCP.Inc()
|
||||
if err := insertHandler(c); err != nil {
|
||||
writeErrorsTCP.Inc()
|
||||
logger.Errorf("error in TCP Graphite conn %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
|
||||
}
|
||||
_ = c.Close()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func serveUDP(ln net.PacketConn) {
|
||||
gomaxprocs := runtime.GOMAXPROCS(-1)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < gomaxprocs; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var bb bytesutil.ByteBuffer
|
||||
bb.B = bytesutil.Resize(bb.B, 64*1024)
|
||||
for {
|
||||
bb.Reset()
|
||||
bb.B = bb.B[:cap(bb.B)]
|
||||
n, addr, err := ln.ReadFrom(bb.B)
|
||||
if err != nil {
|
||||
writeErrorsUDP.Inc()
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
}
|
||||
logger.Errorf("cannot read Graphite UDP data: %s", err)
|
||||
continue
|
||||
}
|
||||
bb.B = bb.B[:n]
|
||||
writeRequestsUDP.Inc()
|
||||
if err := insertHandler(bb.NewReader()); err != nil {
|
||||
writeErrorsUDP.Inc()
|
||||
logger.Errorf("error in UDP Graphite conn %q<->%q: %s", ln.LocalAddr(), addr, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var (
|
||||
listenerTCP net.Listener
|
||||
listenerUDP net.PacketConn
|
||||
)
|
||||
|
||||
// Stop stops the server.
|
||||
func Stop() {
|
||||
logger.Infof("stopping TCP Graphite server at %q...", listenerTCP.Addr())
|
||||
if err := listenerTCP.Close(); err != nil {
|
||||
logger.Errorf("cannot close TCP Graphite server: %s", err)
|
||||
}
|
||||
logger.Infof("stopping UDP Graphite server at %q...", listenerUDP.LocalAddr())
|
||||
if err := listenerUDP.Close(); err != nil {
|
||||
logger.Errorf("cannot close UDP Graphite server: %s", err)
|
||||
}
|
||||
}
|
384
app/vminsert/influx/parser.go
Normal file
384
app/vminsert/influx/parser.go
Normal file
|
@ -0,0 +1,384 @@
|
|||
package influx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
// Rows contains parsed influx rows.
|
||||
type Rows struct {
|
||||
Rows []Row
|
||||
|
||||
tagsPool []Tag
|
||||
fieldsPool []Field
|
||||
}
|
||||
|
||||
// Reset resets rs.
|
||||
func (rs *Rows) Reset() {
|
||||
// Reset rows, tags and fields in order to remove references to old data,
|
||||
// so GC could collect it.
|
||||
|
||||
for i := range rs.Rows {
|
||||
rs.Rows[i].reset()
|
||||
}
|
||||
rs.Rows = rs.Rows[:0]
|
||||
|
||||
for i := range rs.tagsPool {
|
||||
rs.tagsPool[i].reset()
|
||||
}
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
|
||||
for i := range rs.fieldsPool {
|
||||
rs.fieldsPool[i].reset()
|
||||
}
|
||||
rs.fieldsPool = rs.fieldsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals influx line protocol rows from s.
|
||||
//
|
||||
// See https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) error {
|
||||
var err error
|
||||
rs.Rows, rs.tagsPool, rs.fieldsPool, err = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0], rs.fieldsPool[:0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Row is a single influx row.
|
||||
type Row struct {
|
||||
Measurement string
|
||||
Tags []Tag
|
||||
Fields []Field
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (r *Row) reset() {
|
||||
r.Measurement = ""
|
||||
r.Tags = nil
|
||||
r.Fields = nil
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(s string, tagsPool []Tag, fieldsPool []Field) ([]Tag, []Field, error) {
|
||||
r.reset()
|
||||
noEscapeChars := strings.IndexByte(s, '\\') < 0
|
||||
n := nextUnescapedChar(s, ' ', noEscapeChars)
|
||||
if n < 0 {
|
||||
return tagsPool, fieldsPool, fmt.Errorf("cannot find Whitespace I in %q", s)
|
||||
}
|
||||
measurementTags := s[:n]
|
||||
s = s[n+1:]
|
||||
|
||||
// Parse measurement and tags
|
||||
var err error
|
||||
n = nextUnescapedChar(measurementTags, ',', noEscapeChars)
|
||||
if n >= 0 {
|
||||
tagsStart := len(tagsPool)
|
||||
tagsPool, err = unmarshalTags(tagsPool, measurementTags[n+1:], noEscapeChars)
|
||||
if err != nil {
|
||||
return tagsPool, fieldsPool, err
|
||||
}
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
measurementTags = measurementTags[:n]
|
||||
}
|
||||
r.Measurement = unescapeTagValue(measurementTags, noEscapeChars)
|
||||
if len(r.Measurement) == 0 {
|
||||
return tagsPool, fieldsPool, fmt.Errorf("measurement cannot be empty. measurementTags=%q", s)
|
||||
}
|
||||
|
||||
// Parse fields
|
||||
fieldsStart := len(fieldsPool)
|
||||
hasQuotedFields := nextUnescapedChar(s, '"', noEscapeChars) >= 0
|
||||
n = nextUnquotedChar(s, ' ', noEscapeChars, hasQuotedFields)
|
||||
if n < 0 {
|
||||
// No timestamp.
|
||||
fieldsPool, err = unmarshalInfluxFields(fieldsPool, s, noEscapeChars, hasQuotedFields)
|
||||
if err != nil {
|
||||
return tagsPool, fieldsPool, err
|
||||
}
|
||||
fields := fieldsPool[fieldsStart:]
|
||||
r.Fields = fields[:len(fields):len(fields)]
|
||||
return tagsPool, fieldsPool, nil
|
||||
}
|
||||
fieldsPool, err = unmarshalInfluxFields(fieldsPool, s[:n], noEscapeChars, hasQuotedFields)
|
||||
if err != nil {
|
||||
return tagsPool, fieldsPool, err
|
||||
}
|
||||
r.Fields = fieldsPool[fieldsStart:]
|
||||
s = s[n+1:]
|
||||
|
||||
// Parse timestamp
|
||||
timestamp := fastfloat.ParseInt64BestEffort(s)
|
||||
if timestamp == 0 && s != "0" {
|
||||
return tagsPool, fieldsPool, fmt.Errorf("cannot parse timestamp %q", s)
|
||||
}
|
||||
r.Timestamp = timestamp
|
||||
return tagsPool, fieldsPool, nil
|
||||
}
|
||||
|
||||
// Tag represents influx tag.
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (tag *Tag) reset() {
|
||||
tag.Key = ""
|
||||
tag.Value = ""
|
||||
}
|
||||
|
||||
func (tag *Tag) unmarshal(s string, noEscapeChars bool) error {
|
||||
tag.reset()
|
||||
n := nextUnescapedChar(s, '=', noEscapeChars)
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
tag.Key = unescapeTagValue(s[:n], noEscapeChars)
|
||||
if len(tag.Key) == 0 {
|
||||
return fmt.Errorf("tag key cannot be empty")
|
||||
}
|
||||
tag.Value = unescapeTagValue(s[n+1:], noEscapeChars)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Field represents influx field.
|
||||
type Field struct {
|
||||
Key string
|
||||
Value float64
|
||||
}
|
||||
|
||||
func (f *Field) reset() {
|
||||
f.Key = ""
|
||||
f.Value = 0
|
||||
}
|
||||
|
||||
func (f *Field) unmarshal(s string, noEscapeChars, hasQuotedFields bool) error {
|
||||
f.reset()
|
||||
n := nextUnescapedChar(s, '=', noEscapeChars)
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing field value for %q", s)
|
||||
}
|
||||
f.Key = unescapeTagValue(s[:n], noEscapeChars)
|
||||
if len(f.Key) == 0 {
|
||||
return fmt.Errorf("field key cannot be empty")
|
||||
}
|
||||
v, err := parseFieldValue(s[n+1:], hasQuotedFields)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse field value for %q: %s", f.Key, err)
|
||||
}
|
||||
f.Value = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag, fieldsPool []Field) ([]Row, []Tag, []Field, error) {
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n == 0 {
|
||||
// Skip empty line
|
||||
s = s[1:]
|
||||
continue
|
||||
}
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
var err error
|
||||
tagsPool, fieldsPool, err = r.unmarshal(s, tagsPool, fieldsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, fieldsPool, err
|
||||
}
|
||||
return dst, tagsPool, fieldsPool, nil
|
||||
}
|
||||
var err error
|
||||
tagsPool, fieldsPool, err = r.unmarshal(s[:n], tagsPool, fieldsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, fieldsPool, err
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst, tagsPool, fieldsPool, nil
|
||||
}
|
||||
|
||||
func unmarshalTags(dst []Tag, s string, noEscapeChars bool) ([]Tag, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Tag{})
|
||||
}
|
||||
tag := &dst[len(dst)-1]
|
||||
n := nextUnescapedChar(s, ',', noEscapeChars)
|
||||
if n < 0 {
|
||||
if err := tag.unmarshal(s, noEscapeChars); err != nil {
|
||||
return dst, err
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := tag.unmarshal(s[:n], noEscapeChars); err != nil {
|
||||
return dst, err
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
}
|
||||
|
||||
func unmarshalInfluxFields(dst []Field, s string, noEscapeChars, hasQuotedFields bool) ([]Field, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Field{})
|
||||
}
|
||||
f := &dst[len(dst)-1]
|
||||
n := nextUnquotedChar(s, ',', noEscapeChars, hasQuotedFields)
|
||||
if n < 0 {
|
||||
if err := f.unmarshal(s, noEscapeChars, hasQuotedFields); err != nil {
|
||||
return dst, err
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := f.unmarshal(s[:n], noEscapeChars, hasQuotedFields); err != nil {
|
||||
return dst, err
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
}
|
||||
|
||||
func unescapeTagValue(s string, noEscapeChars bool) string {
|
||||
if noEscapeChars {
|
||||
// Fast path - no escape chars.
|
||||
return s
|
||||
}
|
||||
n := strings.IndexByte(s, '\\')
|
||||
if n < 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
// Slow path. Remove escape chars.
|
||||
dst := make([]byte, 0, len(s))
|
||||
for {
|
||||
dst = append(dst, s[:n]...)
|
||||
s = s[n+1:]
|
||||
if len(s) == 0 {
|
||||
return string(append(dst, '\\'))
|
||||
}
|
||||
ch := s[0]
|
||||
if ch != ' ' && ch != ',' && ch != '=' && ch != '\\' {
|
||||
dst = append(dst, '\\')
|
||||
}
|
||||
dst = append(dst, ch)
|
||||
s = s[1:]
|
||||
n = strings.IndexByte(s, '\\')
|
||||
if n < 0 {
|
||||
return string(append(dst, s...))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func parseFieldValue(s string, hasQuotedFields bool) (float64, error) {
|
||||
if len(s) == 0 {
|
||||
return 0, fmt.Errorf("field value cannot be empty")
|
||||
}
|
||||
if hasQuotedFields && s[0] == '"' {
|
||||
if len(s) < 2 || s[len(s)-1] != '"' {
|
||||
return 0, fmt.Errorf("missing closing quote for quoted field value %s", s)
|
||||
}
|
||||
// Quoted string is translated to empty value.
|
||||
return 0, nil
|
||||
}
|
||||
ch := s[len(s)-1]
|
||||
if ch == 'i' {
|
||||
// Integer value
|
||||
ss := s[:len(s)-1]
|
||||
n := fastfloat.ParseInt64BestEffort(ss)
|
||||
return float64(n), nil
|
||||
}
|
||||
if ch == 'u' {
|
||||
// Unsigned integer value
|
||||
ss := s[:len(s)-1]
|
||||
n := fastfloat.ParseUint64BestEffort(ss)
|
||||
return float64(n), nil
|
||||
}
|
||||
if s == "t" || s == "T" || s == "true" || s == "True" || s == "TRUE" {
|
||||
return 1, nil
|
||||
}
|
||||
if s == "f" || s == "F" || s == "false" || s == "False" || s == "FALSE" {
|
||||
return 0, nil
|
||||
}
|
||||
return fastfloat.ParseBestEffort(s), nil
|
||||
}
|
||||
|
||||
func nextUnescapedChar(s string, ch byte, noEscapeChars bool) int {
|
||||
if noEscapeChars {
|
||||
// Fast path: just search for ch in s, since s has no escape chars.
|
||||
return strings.IndexByte(s, ch)
|
||||
}
|
||||
|
||||
sOrig := s
|
||||
again:
|
||||
n := strings.IndexByte(s, ch)
|
||||
if n < 0 {
|
||||
return -1
|
||||
}
|
||||
if n == 0 {
|
||||
return len(sOrig) - len(s) + n
|
||||
}
|
||||
if s[n-1] != '\\' {
|
||||
return len(sOrig) - len(s) + n
|
||||
}
|
||||
nOrig := n
|
||||
slashes := 0
|
||||
for n > 0 && s[n-1] == '\\' {
|
||||
slashes++
|
||||
n--
|
||||
}
|
||||
if slashes&1 == 0 {
|
||||
return len(sOrig) - len(s) + nOrig
|
||||
}
|
||||
s = s[nOrig+1:]
|
||||
goto again
|
||||
}
|
||||
|
||||
func nextUnquotedChar(s string, ch byte, noEscapeChars, hasQuotedFields bool) int {
|
||||
if !hasQuotedFields {
|
||||
return nextUnescapedChar(s, ch, noEscapeChars)
|
||||
}
|
||||
sOrig := s
|
||||
for {
|
||||
n := nextUnescapedChar(s, ch, noEscapeChars)
|
||||
if n < 0 {
|
||||
return -1
|
||||
}
|
||||
if !isInQuote(s[:n], noEscapeChars) {
|
||||
return n + len(sOrig) - len(s)
|
||||
}
|
||||
s = s[n+1:]
|
||||
n = nextUnescapedChar(s, '"', noEscapeChars)
|
||||
if n < 0 {
|
||||
return -1
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
}
|
||||
|
||||
func isInQuote(s string, noEscapeChars bool) bool {
|
||||
isQuote := false
|
||||
for {
|
||||
n := nextUnescapedChar(s, '"', noEscapeChars)
|
||||
if n < 0 {
|
||||
return isQuote
|
||||
}
|
||||
isQuote = !isQuote
|
||||
s = s[n+1:]
|
||||
}
|
||||
}
|
341
app/vminsert/influx/parser_test.go
Normal file
341
app/vminsert/influx/parser_test.go
Normal file
|
@ -0,0 +1,341 @@
|
|||
package influx
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNextUnquotedChar(t *testing.T) {
|
||||
f := func(s string, ch byte, noUnescape bool, nExpected int) {
|
||||
t.Helper()
|
||||
n := nextUnquotedChar(s, ch, noUnescape, true)
|
||||
if n != nExpected {
|
||||
t.Fatalf("unexpected n for nextUnqotedChar(%q, '%c', %v); got %d; want %d", s, ch, noUnescape, n, nExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f(``, ' ', false, -1)
|
||||
f(``, ' ', true, -1)
|
||||
f(`""`, ' ', false, -1)
|
||||
f(`""`, ' ', true, -1)
|
||||
f(`"foo bar\" " baz`, ' ', false, 12)
|
||||
f(`"foo bar\" " baz`, ' ', true, 10)
|
||||
}
|
||||
|
||||
func TestNextUnescapedChar(t *testing.T) {
|
||||
f := func(s string, ch byte, noUnescape bool, nExpected int) {
|
||||
t.Helper()
|
||||
n := nextUnescapedChar(s, ch, noUnescape)
|
||||
if n != nExpected {
|
||||
t.Fatalf("unexpected n for nextUnescapedChar(%q, '%c', %v); got %d; want %d", s, ch, noUnescape, n, nExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", ' ', true, -1)
|
||||
f("", ' ', false, -1)
|
||||
f(" ", ' ', true, 0)
|
||||
f(" ", ' ', false, 0)
|
||||
f("x y", ' ', true, 1)
|
||||
f("x y", ' ', false, 1)
|
||||
f(`x\ y`, ' ', true, 2)
|
||||
f(`x\ y`, ' ', false, 3)
|
||||
f(`\\,`, ',', true, 2)
|
||||
f(`\\,`, ',', false, 2)
|
||||
f(`\\\=`, '=', true, 3)
|
||||
f(`\\\=`, '=', false, -1)
|
||||
f(`\\\=aa`, '=', true, 3)
|
||||
f(`\\\=aa`, '=', false, -1)
|
||||
f(`\\\=a=a`, '=', true, 3)
|
||||
f(`\\\=a=a`, '=', false, 5)
|
||||
f(`a\`, ' ', true, -1)
|
||||
f(`a\`, ' ', false, -1)
|
||||
}
|
||||
|
||||
func TestUnescapeTagValue(t *testing.T) {
|
||||
f := func(s, sExpected string) {
|
||||
t.Helper()
|
||||
ss := unescapeTagValue(s, false)
|
||||
if ss != sExpected {
|
||||
t.Fatalf("unexpected value for %q; got %q; want %q", s, ss, sExpected)
|
||||
}
|
||||
}
|
||||
|
||||
f("", "")
|
||||
f("x", "x")
|
||||
f("foobar", "foobar")
|
||||
f("привет", "привет")
|
||||
f(`\a\b\cd`, `\a\b\cd`)
|
||||
f(`\`, `\`)
|
||||
f(`foo\`, `foo\`)
|
||||
f(`\,foo\\\=\ bar`, `,foo\= bar`)
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
|
||||
// Try again
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// Missing measurement
|
||||
f(",foo=bar baz=123")
|
||||
|
||||
// No fields
|
||||
f("foo")
|
||||
f("foo,bar=baz 1234")
|
||||
|
||||
// Missing tag value
|
||||
f("foo,bar")
|
||||
f("foo,bar baz")
|
||||
f("foo,bar= baz")
|
||||
f("foo,bar=123, 123")
|
||||
|
||||
// Missing tag name
|
||||
f("foo,=bar baz=234")
|
||||
|
||||
// Missing field value
|
||||
f("foo bar")
|
||||
f("foo bar=")
|
||||
f("foo bar=,baz=23 123")
|
||||
f("foo bar=1, 123")
|
||||
f(`foo bar=" 123`)
|
||||
f(`foo bar="123`)
|
||||
f(`foo bar=",123`)
|
||||
f(`foo bar=a"", 123`)
|
||||
|
||||
// Missing field name
|
||||
f("foo =123")
|
||||
f("foo =123\nbar")
|
||||
|
||||
// Invalid timestamp
|
||||
f("foo bar=123 baz")
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("non-empty rows after reset: %+v", rows.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
|
||||
// Minimal line without tags and timestamp
|
||||
f("foo bar=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
f("foo bar=123\n", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Line without tags and with a timestamp.
|
||||
f("foo bar=123.45 -345", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123.45,
|
||||
}},
|
||||
Timestamp: -345,
|
||||
}},
|
||||
})
|
||||
|
||||
// Line with a single tag
|
||||
f("foo,tag1=xyz bar=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "tag1",
|
||||
Value: "xyz",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Line with multiple tags
|
||||
f("foo,tag1=xyz,tag2=43as bar=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "tag1",
|
||||
Value: "xyz",
|
||||
},
|
||||
{
|
||||
Key: "tag2",
|
||||
Value: "43as",
|
||||
},
|
||||
},
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Line with empty tag values
|
||||
f("foo,tag1=xyz,tagN=,tag2=43as bar=123", &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "tag1",
|
||||
Value: "xyz",
|
||||
},
|
||||
{
|
||||
Key: "tagN",
|
||||
Value: "",
|
||||
},
|
||||
{
|
||||
Key: "tag2",
|
||||
Value: "43as",
|
||||
},
|
||||
},
|
||||
Fields: []Field{{
|
||||
Key: "bar",
|
||||
Value: 123,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Line with multiple tags, multiple fields and timestamp
|
||||
f(`system,host=ip-172-16-10-144 uptime_format="3 days, 21:01" 1557761040000000000`, &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "system",
|
||||
Tags: []Tag{{
|
||||
Key: "host",
|
||||
Value: "ip-172-16-10-144",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "uptime_format",
|
||||
Value: 0,
|
||||
}},
|
||||
Timestamp: 1557761040000000000,
|
||||
}},
|
||||
})
|
||||
f(`foo,tag1=xyz,tag2=43as bar=-123e4,x=True,y=-45i,z=f,aa="f,= \"a",bb=23u 48934`, &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "tag1",
|
||||
Value: "xyz",
|
||||
},
|
||||
{
|
||||
Key: "tag2",
|
||||
Value: "43as",
|
||||
},
|
||||
},
|
||||
Fields: []Field{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: -123e4,
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: 1,
|
||||
},
|
||||
{
|
||||
Key: "y",
|
||||
Value: -45,
|
||||
},
|
||||
{
|
||||
Key: "z",
|
||||
Value: 0,
|
||||
},
|
||||
{
|
||||
Key: "aa",
|
||||
Value: 0,
|
||||
},
|
||||
{
|
||||
Key: "bb",
|
||||
Value: 23,
|
||||
},
|
||||
},
|
||||
Timestamp: 48934,
|
||||
}},
|
||||
})
|
||||
|
||||
// Escape chars
|
||||
f(`fo\,bar\=baz,x\==\\a\,\=\q\ \\\a\=\,=4.34`, &Rows{
|
||||
Rows: []Row{{
|
||||
Measurement: `fo,bar=baz`,
|
||||
Tags: []Tag{{
|
||||
Key: `x=`,
|
||||
Value: `\a,=\q `,
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: `\\a=,`,
|
||||
Value: 4.34,
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Multiple lines
|
||||
f("foo,tag=xyz field=1.23 48934\n"+
|
||||
"bar x=-1i\n\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Measurement: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "tag",
|
||||
Value: "xyz",
|
||||
}},
|
||||
Fields: []Field{{
|
||||
Key: "field",
|
||||
Value: 1.23,
|
||||
}},
|
||||
Timestamp: 48934,
|
||||
},
|
||||
{
|
||||
Measurement: "bar",
|
||||
Fields: []Field{{
|
||||
Key: "x",
|
||||
Value: -1,
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
20
app/vminsert/influx/parser_timing_test.go
Normal file
20
app/vminsert/influx/parser_timing_test.go
Normal file
|
@ -0,0 +1,20 @@
|
|||
package influx
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `cpu usage_user=1.23,usage_system=4.34,usage_iowait=0.1112 1234556768`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal %q: %s", s, err))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
244
app/vminsert/influx/request_handler.go
Normal file
244
app/vminsert/influx/request_handler.go
Normal file
|
@ -0,0 +1,244 @@
|
|||
package influx
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="influx"}`)
|
||||
|
||||
// InsertHandler processes remote write for influx line protocol.
|
||||
//
|
||||
// See https://github.com/influxdata/influxdb/blob/4cbdc197b8117fee648d62e2e5be75c6575352f0/tsdb/README.md
|
||||
func InsertHandler(req *http.Request) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(req)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(req *http.Request) error {
|
||||
influxReadCalls.Inc()
|
||||
|
||||
r := req.Body
|
||||
if req.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := getGzipReader(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot read gzipped influx line protocol data: %s", err)
|
||||
}
|
||||
defer putGzipReader(zr)
|
||||
r = zr
|
||||
}
|
||||
|
||||
q := req.URL.Query()
|
||||
tsMultiplier := int64(1e6)
|
||||
switch q.Get("precision") {
|
||||
case "ns":
|
||||
tsMultiplier = 1e6
|
||||
case "u":
|
||||
tsMultiplier = 1e3
|
||||
case "ms":
|
||||
tsMultiplier = 1
|
||||
case "s":
|
||||
tsMultiplier = -1e3
|
||||
case "m":
|
||||
tsMultiplier = -1e3 * 60
|
||||
case "h":
|
||||
tsMultiplier = -1e3 * 3600
|
||||
}
|
||||
|
||||
// Read db tag from https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint
|
||||
db := q.Get("db")
|
||||
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
for ctx.Read(r, tsMultiplier) {
|
||||
if err := ctx.InsertRows(db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ctx.Error()
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) InsertRows(db string) error {
|
||||
rows := ctx.Rows.Rows
|
||||
rowsLen := 0
|
||||
for i := range rows {
|
||||
rowsLen += len(rows[i].Tags)
|
||||
}
|
||||
ic := &ctx.Common
|
||||
ic.Reset(rowsLen)
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("db", db)
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
ic.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
ctx.metricNameBuf = storage.MarshalMetricNameRaw(ctx.metricNameBuf[:0], ic.Labels)
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf[:0], r.Measurement...)
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf, '.')
|
||||
metricGroupPrefixLen := len(ctx.metricGroupBuf)
|
||||
for j := range r.Fields {
|
||||
f := &r.Fields[j]
|
||||
ctx.metricGroupBuf = append(ctx.metricGroupBuf[:metricGroupPrefixLen], f.Key...)
|
||||
metricGroup := bytesutil.ToUnsafeString(ctx.metricGroupBuf)
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("", metricGroup)
|
||||
ic.WriteDataPoint(ctx.metricNameBuf, ic.Labels[:1], r.Timestamp, f.Value)
|
||||
}
|
||||
rowsInserted.Add(len(r.Fields))
|
||||
}
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
func getGzipReader(r io.Reader) (*gzip.Reader, error) {
|
||||
v := gzipReaderPool.Get()
|
||||
if v == nil {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
zr := v.(*gzip.Reader)
|
||||
if err := zr.Reset(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return zr, nil
|
||||
}
|
||||
|
||||
func putGzipReader(zr *gzip.Reader) {
|
||||
_ = zr.Close()
|
||||
gzipReaderPool.Put(zr)
|
||||
}
|
||||
|
||||
var gzipReaderPool sync.Pool
|
||||
|
||||
const maxReadPacketSize = 4 * 1024 * 1024
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader, tsMultiplier int64) bool {
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
lr := io.LimitReader(r, maxReadPacketSize)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.reqBuf.B = append(ctx.reqBuf.B[:0], ctx.tailBuf...)
|
||||
n, err := io.CopyBuffer(&ctx.reqBuf, lr, ctx.copyBuf[:])
|
||||
if err != nil {
|
||||
influxReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read influx line protocol data: %s", err)
|
||||
return false
|
||||
}
|
||||
if n < maxReadPacketSize {
|
||||
// Mark the end of stream.
|
||||
ctx.err = io.EOF
|
||||
}
|
||||
|
||||
// Parse all the rows until the last newline in ctx.reqBuf.B
|
||||
nn := bytes.LastIndexByte(ctx.reqBuf.B, '\n')
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
if nn >= 0 {
|
||||
ctx.tailBuf = append(ctx.tailBuf[:0], ctx.reqBuf.B[nn+1:]...)
|
||||
ctx.reqBuf.B = ctx.reqBuf.B[:nn]
|
||||
}
|
||||
if err = ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf.B)); err != nil {
|
||||
influxUnmarshalErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot unmarshal influx line protocol data with size %d: %s", len(ctx.reqBuf.B), err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Adjust timestamps according to tsMultiplier
|
||||
currentTs := time.Now().UnixNano() / 1e6
|
||||
if tsMultiplier >= 1 {
|
||||
for i := range ctx.Rows.Rows {
|
||||
row := &ctx.Rows.Rows[i]
|
||||
if row.Timestamp == 0 {
|
||||
row.Timestamp = currentTs
|
||||
} else {
|
||||
row.Timestamp /= tsMultiplier
|
||||
}
|
||||
}
|
||||
} else if tsMultiplier < 0 {
|
||||
tsMultiplier = -tsMultiplier
|
||||
for i := range ctx.Rows.Rows {
|
||||
row := &ctx.Rows.Rows[i]
|
||||
if row.Timestamp == 0 {
|
||||
row.Timestamp = currentTs
|
||||
} else {
|
||||
row.Timestamp *= tsMultiplier
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
influxReadCalls = metrics.NewCounter(`vm_read_calls_total{name="influx"}`)
|
||||
influxReadErrors = metrics.NewCounter(`vm_read_errors_total{name="influx"}`)
|
||||
influxUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="influx"}`)
|
||||
)
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf bytesutil.ByteBuffer
|
||||
tailBuf []byte
|
||||
copyBuf [16 * 1024]byte
|
||||
metricNameBuf []byte
|
||||
metricGroupBuf []byte
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Error() error {
|
||||
if ctx.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return ctx.err
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
ctx.metricNameBuf = ctx.metricNameBuf[:0]
|
||||
ctx.metricGroupBuf = ctx.metricGroupBuf[:0]
|
||||
|
||||
ctx.err = nil
|
||||
}
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
84
app/vminsert/main.go
Normal file
84
app/vminsert/main.go
Normal file
|
@ -0,0 +1,84 @@
|
|||
package vminsert
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
graphiteListenAddr = flag.String("graphiteListenAddr", "", "TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty")
|
||||
opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpentTSDB put messages. Usually :4242 must be set. Doesn't work if empty")
|
||||
maxInsertRequestSize = flag.Int("maxInsertRequestSize", 32*1024*1024, "The maximum size of a single insert request in bytes")
|
||||
)
|
||||
|
||||
// Init initializes vminsert.
|
||||
func Init() {
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
go graphite.Serve(*graphiteListenAddr)
|
||||
}
|
||||
if len(*opentsdbListenAddr) > 0 {
|
||||
go opentsdb.Serve(*opentsdbListenAddr)
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops vminsert.
|
||||
func Stop() {
|
||||
if len(*graphiteListenAddr) > 0 {
|
||||
graphite.Stop()
|
||||
}
|
||||
if len(*opentsdbListenAddr) > 0 {
|
||||
opentsdb.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// RequestHandler is a handler for Prometheus remote storage write API
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := strings.Replace(r.URL.Path, "//", "/", -1)
|
||||
switch path {
|
||||
case "/api/v1/write":
|
||||
prometheusWriteRequests.Inc()
|
||||
if err := prometheus.InsertHandler(r, int64(*maxInsertRequestSize)); err != nil {
|
||||
prometheusWriteErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/write", "/api/v2/write":
|
||||
influxWriteRequests.Inc()
|
||||
if err := influx.InsertHandler(r); err != nil {
|
||||
influxWriteErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
case "/query":
|
||||
// Emulate fake response for influx query
|
||||
influxQueryRequests.Inc()
|
||||
fmt.Fprintf(w, `{"results":[{"series":[{"values":[]}]}]}`)
|
||||
return true
|
||||
default:
|
||||
// This is not our link
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
prometheusWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/write", protocol="prometheus"}`)
|
||||
prometheusWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/write", protocol="prometheus"}`)
|
||||
|
||||
influxWriteRequests = metrics.NewCounter(`vm_http_requests_total{path="/write", protocol="influx"}`)
|
||||
influxWriteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/write", protocol="influx"}`)
|
||||
|
||||
influxQueryRequests = metrics.NewCounter(`vm_http_requests_total{path="/query", protocol="influx"}`)
|
||||
)
|
175
app/vminsert/opentsdb/parser.go
Normal file
175
app/vminsert/opentsdb/parser.go
Normal file
|
@ -0,0 +1,175 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/valyala/fastjson/fastfloat"
|
||||
)
|
||||
|
||||
// Rows contains parsed OpenTSDB rows.
|
||||
type Rows struct {
|
||||
Rows []Row
|
||||
|
||||
tagsPool []Tag
|
||||
}
|
||||
|
||||
// Reset resets rs.
|
||||
func (rs *Rows) Reset() {
|
||||
// Release references to objects, so they can be GC'ed.
|
||||
|
||||
for i := range rs.Rows {
|
||||
rs.Rows[i].reset()
|
||||
}
|
||||
rs.Rows = rs.Rows[:0]
|
||||
|
||||
for i := range rs.tagsPool {
|
||||
rs.tagsPool[i].reset()
|
||||
}
|
||||
rs.tagsPool = rs.tagsPool[:0]
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals OpenTSDB put rows from s.
|
||||
//
|
||||
// See http://opentsdb.net/docs/build/html/api_telnet/put.html
|
||||
//
|
||||
// s must be unchanged until rs is in use.
|
||||
func (rs *Rows) Unmarshal(s string) error {
|
||||
var err error
|
||||
rs.Rows, rs.tagsPool, err = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Row is a single OpenTSDB row.
|
||||
type Row struct {
|
||||
Metric string
|
||||
Tags []Tag
|
||||
Value float64
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
func (r *Row) reset() {
|
||||
r.Metric = ""
|
||||
r.Tags = nil
|
||||
r.Value = 0
|
||||
r.Timestamp = 0
|
||||
}
|
||||
|
||||
func (r *Row) unmarshal(s string, tagsPool []Tag) ([]Tag, error) {
|
||||
r.reset()
|
||||
if !strings.HasPrefix(s, "put ") {
|
||||
return tagsPool, fmt.Errorf("missing `put ` prefix in %q", s)
|
||||
}
|
||||
s = s[len("put "):]
|
||||
n := strings.IndexByte(s, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between metric and timestamp in %q", s)
|
||||
}
|
||||
r.Metric = s[:n]
|
||||
tail := s[n+1:]
|
||||
n = strings.IndexByte(tail, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between timestamp and value in %q", s)
|
||||
}
|
||||
r.Timestamp = int64(fastfloat.ParseBestEffort(tail[:n]))
|
||||
tail = tail[n+1:]
|
||||
n = strings.IndexByte(tail, ' ')
|
||||
if n < 0 {
|
||||
return tagsPool, fmt.Errorf("cannot find whitespace between value and the first tag in %q", s)
|
||||
}
|
||||
r.Value = fastfloat.ParseBestEffort(tail[:n])
|
||||
var err error
|
||||
tagsStart := len(tagsPool)
|
||||
tagsPool, err = unmarshalTags(tagsPool, tail[n+1:])
|
||||
if err != nil {
|
||||
return tagsPool, fmt.Errorf("cannot unmarshal tags in %q: %s", s, err)
|
||||
}
|
||||
tags := tagsPool[tagsStart:]
|
||||
r.Tags = tags[:len(tags):len(tags)]
|
||||
return tagsPool, nil
|
||||
}
|
||||
|
||||
func unmarshalRows(dst []Row, s string, tagsPool []Tag) ([]Row, []Tag, error) {
|
||||
for len(s) > 0 {
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n == 0 {
|
||||
// Skip empty line
|
||||
s = s[1:]
|
||||
continue
|
||||
}
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Row{})
|
||||
}
|
||||
r := &dst[len(dst)-1]
|
||||
if n < 0 {
|
||||
// The last line.
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s, tagsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, err
|
||||
}
|
||||
return dst, tagsPool, nil
|
||||
}
|
||||
var err error
|
||||
tagsPool, err = r.unmarshal(s[:n], tagsPool)
|
||||
if err != nil {
|
||||
return dst, tagsPool, err
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
return dst, tagsPool, nil
|
||||
}
|
||||
|
||||
func unmarshalTags(dst []Tag, s string) ([]Tag, error) {
|
||||
for {
|
||||
if cap(dst) > len(dst) {
|
||||
dst = dst[:len(dst)+1]
|
||||
} else {
|
||||
dst = append(dst, Tag{})
|
||||
}
|
||||
tag := &dst[len(dst)-1]
|
||||
|
||||
n := strings.IndexByte(s, ' ')
|
||||
if n < 0 {
|
||||
// The last tag found
|
||||
if err := tag.unmarshal(s); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
if err := tag.unmarshal(s[:n]); err != nil {
|
||||
return dst[:len(dst)-1], err
|
||||
}
|
||||
s = s[n+1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Tag is an OpenTSDB tag.
|
||||
type Tag struct {
|
||||
Key string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (t *Tag) reset() {
|
||||
t.Key = ""
|
||||
t.Value = ""
|
||||
}
|
||||
|
||||
func (t *Tag) unmarshal(s string) error {
|
||||
t.reset()
|
||||
n := strings.IndexByte(s, '=')
|
||||
if n < 0 {
|
||||
return fmt.Errorf("missing tag value for %q", s)
|
||||
}
|
||||
t.Key = s[:n]
|
||||
if len(t.Key) == 0 {
|
||||
return fmt.Errorf("tag key cannot be empty for %q", s)
|
||||
}
|
||||
t.Value = s[n+1:]
|
||||
return nil
|
||||
}
|
203
app/vminsert/opentsdb/parser_test.go
Normal file
203
app/vminsert/opentsdb/parser_test.go
Normal file
|
@ -0,0 +1,203 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRowsUnmarshalFailure(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
|
||||
// Try again
|
||||
if err := rows.Unmarshal(s); err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// Missing put prefix
|
||||
f("xx")
|
||||
|
||||
// Missing timestamp
|
||||
f("put aaa")
|
||||
|
||||
// Missing value
|
||||
f("put aaa 1123")
|
||||
|
||||
// Invalid timestamp
|
||||
f("put aaa timestamp")
|
||||
|
||||
// Missing first tag
|
||||
f("put aaa 123 43")
|
||||
|
||||
// Invalid value
|
||||
f("put aaa 123 invalid-value")
|
||||
|
||||
// Invalid multiline
|
||||
f("put aaa\nbbb 123 34")
|
||||
|
||||
// Invalid tag
|
||||
f("put aaa 123 4.5 foo")
|
||||
f("put aaa 123 4.5 =")
|
||||
f("put aaa 123 4.5 =foo")
|
||||
f("put aaa 123 4.5 =foo a=b")
|
||||
}
|
||||
|
||||
func TestRowsUnmarshalSuccess(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
// Try unmarshaling again
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
t.Fatalf("cannot unmarshal %q: %s", s, err)
|
||||
}
|
||||
if !reflect.DeepEqual(rows.Rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows.Rows, rowsExpected.Rows)
|
||||
}
|
||||
|
||||
rows.Reset()
|
||||
if len(rows.Rows) != 0 {
|
||||
t.Fatalf("non-empty rows after reset: %+v", rows.Rows)
|
||||
}
|
||||
}
|
||||
|
||||
// Empty line
|
||||
f("", &Rows{})
|
||||
f("\n\n", &Rows{})
|
||||
|
||||
// Single line
|
||||
f("put foobar 789 -123.456 a=b", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
// Empty tag value
|
||||
f("put foobar 789 -123.456 a= b=c", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "a",
|
||||
Value: "",
|
||||
},
|
||||
{
|
||||
Key: "b",
|
||||
Value: "c",
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
// Fractional timestamp that is supported by Akumuli.
|
||||
f("put foobar 789.4 -123.456 a=b", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foobar",
|
||||
Value: -123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
f("put foo.bar 789 123.456 a=b\n", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo.bar",
|
||||
Value: 123.456,
|
||||
Timestamp: 789,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
|
||||
// Tags
|
||||
f("put foo 2 1 bar=baz", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
}},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
f("put foo 2 1 bar=baz x=y", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
f("put foo 2 1 bar=baz=aaa x=y", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{
|
||||
{
|
||||
Key: "bar",
|
||||
Value: "baz=aaa",
|
||||
},
|
||||
{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
},
|
||||
},
|
||||
Value: 1,
|
||||
Timestamp: 2,
|
||||
}},
|
||||
})
|
||||
|
||||
// Multi lines
|
||||
f("put foo 2 0.3 a=b\nput bar.baz 43 0.34 a=b\n", &Rows{
|
||||
Rows: []Row{
|
||||
{
|
||||
Metric: "foo",
|
||||
Value: 0.3,
|
||||
Timestamp: 2,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
{
|
||||
Metric: "bar.baz",
|
||||
Value: 0.34,
|
||||
Timestamp: 43,
|
||||
Tags: []Tag{{
|
||||
Key: "a",
|
||||
Value: "b",
|
||||
}},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
24
app/vminsert/opentsdb/parser_timing_test.go
Normal file
24
app/vminsert/opentsdb/parser_timing_test.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkRowsUnmarshal(b *testing.B) {
|
||||
s := `cpu.usage_user 1234556768 1.23 a=b
|
||||
cpu.usage_system 1234556768 23.344 a=b
|
||||
cpu.usage_iowait 1234556769 3.3443 a=b
|
||||
cpu.usage_irq 1234556768 0.34432 a=b
|
||||
`
|
||||
b.SetBytes(int64(len(s)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var rows Rows
|
||||
for pb.Next() {
|
||||
if err := rows.Unmarshal(s); err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal %q: %s", s, err))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
166
app/vminsert/opentsdb/request_handler.go
Normal file
166
app/vminsert/opentsdb/request_handler.go
Normal file
|
@ -0,0 +1,166 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentsdb"}`)
|
||||
|
||||
// insertHandler processes remote write for OpenTSDB put protocol.
|
||||
//
|
||||
// See http://opentsdb.net/docs/build/html/api_telnet/put.html
|
||||
func insertHandler(r io.Reader) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(r)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(r io.Reader) error {
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
for ctx.Read(r) {
|
||||
if err := ctx.InsertRows(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ctx.Error()
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) InsertRows() error {
|
||||
rows := ctx.Rows.Rows
|
||||
ic := &ctx.Common
|
||||
ic.Reset(len(rows))
|
||||
for i := range rows {
|
||||
r := &rows[i]
|
||||
ic.Labels = ic.Labels[:0]
|
||||
ic.AddLabel("", r.Metric)
|
||||
for j := range r.Tags {
|
||||
tag := &r.Tags[j]
|
||||
ic.AddLabel(tag.Key, tag.Value)
|
||||
}
|
||||
ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(rows))
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
const maxReadPacketSize = 4 * 1024 * 1024
|
||||
|
||||
const flushTimeout = 3 * time.Second
|
||||
|
||||
func (ctx *pushCtx) Read(r io.Reader) bool {
|
||||
opentsdbReadCalls.Inc()
|
||||
if ctx.err != nil {
|
||||
return false
|
||||
}
|
||||
if c, ok := r.(net.Conn); ok {
|
||||
if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil {
|
||||
opentsdbReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot set read deadline: %s", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
lr := io.LimitReader(r, maxReadPacketSize)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.reqBuf.B = append(ctx.reqBuf.B[:0], ctx.tailBuf...)
|
||||
n, err := io.CopyBuffer(&ctx.reqBuf, lr, ctx.copyBuf[:])
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok && ne.Timeout() {
|
||||
// Flush the read data on timeout and try reading again.
|
||||
} else {
|
||||
opentsdbReadErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot read OpenTSDB put protocol data: %s", err)
|
||||
return false
|
||||
}
|
||||
} else if n < maxReadPacketSize {
|
||||
// Mark the end of stream.
|
||||
ctx.err = io.EOF
|
||||
}
|
||||
|
||||
// Parse all the rows until the last newline in ctx.reqBuf.B
|
||||
nn := bytes.LastIndexByte(ctx.reqBuf.B, '\n')
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
if nn >= 0 {
|
||||
ctx.tailBuf = append(ctx.tailBuf[:0], ctx.reqBuf.B[nn+1:]...)
|
||||
ctx.reqBuf.B = ctx.reqBuf.B[:nn]
|
||||
}
|
||||
if err = ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf.B)); err != nil {
|
||||
opentsdbUnmarshalErrors.Inc()
|
||||
ctx.err = fmt.Errorf("cannot unmarshal OpenTSDB put protocol data with size %d: %s", len(ctx.reqBuf.B), err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Convert timestamps from seconds to milliseconds
|
||||
for i := range ctx.Rows.Rows {
|
||||
ctx.Rows.Rows[i].Timestamp *= 1e3
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type pushCtx struct {
|
||||
Rows Rows
|
||||
Common common.InsertCtx
|
||||
|
||||
reqBuf bytesutil.ByteBuffer
|
||||
tailBuf []byte
|
||||
copyBuf [16 * 1024]byte
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Error() error {
|
||||
if ctx.err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
return ctx.err
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Rows.Reset()
|
||||
ctx.Common.Reset(0)
|
||||
ctx.reqBuf.Reset()
|
||||
ctx.tailBuf = ctx.tailBuf[:0]
|
||||
|
||||
ctx.err = nil
|
||||
}
|
||||
|
||||
var (
|
||||
opentsdbReadCalls = metrics.NewCounter(`vm_read_calls_total{name="opentsdb"}`)
|
||||
opentsdbReadErrors = metrics.NewCounter(`vm_read_errors_total{name="opentsdb"}`)
|
||||
opentsdbUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="opentsdb"}`)
|
||||
)
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
137
app/vminsert/opentsdb/server.go
Normal file
137
app/vminsert/opentsdb/server.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
package opentsdb
|
||||
|
||||
import (
|
||||
"net"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
writeRequestsTCP = metrics.NewCounter(`vm_opentsdb_requests_total{name="write", net="tcp"}`)
|
||||
writeErrorsTCP = metrics.NewCounter(`vm_opentsdb_request_errors_total{name="write", net="tcp"}`)
|
||||
|
||||
writeRequestsUDP = metrics.NewCounter(`vm_opentsdb_requests_total{name="write", net="udp"}`)
|
||||
writeErrorsUDP = metrics.NewCounter(`vm_opentsdb_request_errors_total{name="write", net="udp"}`)
|
||||
)
|
||||
|
||||
// Serve starts OpenTSDB collector on the given addr.
|
||||
func Serve(addr string) {
|
||||
logger.Infof("starting TCP OpenTSDB collector at %q", addr)
|
||||
lnTCP, err := net.Listen("tcp4", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start TCP OpenTSDB collector at %q: %s", addr, err)
|
||||
}
|
||||
listenerTCP = lnTCP
|
||||
|
||||
logger.Infof("starting UDP OpenTSDB collector at %q", addr)
|
||||
lnUDP, err := net.ListenPacket("udp4", addr)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot start UDP OpenTSDB collector at %q: %s", addr, err)
|
||||
}
|
||||
listenerUDP = lnUDP
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveTCP(listenerTCP)
|
||||
logger.Infof("stopped TCP OpenTSDB collector at %q", addr)
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
serveUDP(listenerUDP)
|
||||
logger.Infof("stopped UDP OpenTSDB collector at %q", addr)
|
||||
}()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func serveTCP(ln net.Listener) {
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
logger.Fatalf("unrecoverable error when accepting TCP OpenTSDB connections: %s", err)
|
||||
}
|
||||
logger.Fatalf("unexpected error when accepting TCP OpenTSDB connections: %s", err)
|
||||
}
|
||||
go func() {
|
||||
writeRequestsTCP.Inc()
|
||||
if err := insertHandler(c); err != nil {
|
||||
writeErrorsTCP.Inc()
|
||||
logger.Errorf("error in TCP OpenTSDB conn %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err)
|
||||
}
|
||||
_ = c.Close()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func serveUDP(ln net.PacketConn) {
|
||||
gomaxprocs := runtime.GOMAXPROCS(-1)
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < gomaxprocs; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var bb bytesutil.ByteBuffer
|
||||
bb.B = bytesutil.Resize(bb.B, 64*1024)
|
||||
for {
|
||||
bb.Reset()
|
||||
bb.B = bb.B[:cap(bb.B)]
|
||||
n, addr, err := ln.ReadFrom(bb.B)
|
||||
if err != nil {
|
||||
writeErrorsUDP.Inc()
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
if ne.Temporary() {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(err.Error(), "use of closed network connection") {
|
||||
break
|
||||
}
|
||||
}
|
||||
logger.Errorf("cannot read OpenTSDB UDP data: %s", err)
|
||||
continue
|
||||
}
|
||||
bb.B = bb.B[:n]
|
||||
writeRequestsUDP.Inc()
|
||||
if err := insertHandler(bb.NewReader()); err != nil {
|
||||
writeErrorsUDP.Inc()
|
||||
logger.Errorf("error in UDP OpenTSDB conn %q<->%q: %s", ln.LocalAddr(), addr, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var (
|
||||
listenerTCP net.Listener
|
||||
listenerUDP net.PacketConn
|
||||
)
|
||||
|
||||
// Stop stops the server.
|
||||
func Stop() {
|
||||
logger.Infof("stopping TCP OpenTSDB server at %q...", listenerTCP.Addr())
|
||||
if err := listenerTCP.Close(); err != nil {
|
||||
logger.Errorf("cannot close TCP OpenTSDB server: %s", err)
|
||||
}
|
||||
logger.Infof("stopping UDP OpenTSDB server at %q...", listenerUDP.LocalAddr())
|
||||
if err := listenerUDP.Close(); err != nil {
|
||||
logger.Errorf("cannot close UDP OpenTSDB server: %s", err)
|
||||
}
|
||||
}
|
106
app/vminsert/prometheus/request_handler.go
Normal file
106
app/vminsert/prometheus/request_handler.go
Normal file
|
@ -0,0 +1,106 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="prometheus"}`)
|
||||
|
||||
// InsertHandler processes remote write for prometheus.
|
||||
func InsertHandler(r *http.Request, maxSize int64) error {
|
||||
return concurrencylimiter.Do(func() error {
|
||||
return insertHandlerInternal(r, maxSize)
|
||||
})
|
||||
}
|
||||
|
||||
func insertHandlerInternal(r *http.Request, maxSize int64) error {
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
if err := ctx.Read(r, maxSize); err != nil {
|
||||
return err
|
||||
}
|
||||
timeseries := ctx.req.Timeseries
|
||||
rowsLen := 0
|
||||
for i := range timeseries {
|
||||
rowsLen += len(timeseries[i].Samples)
|
||||
}
|
||||
ic := &ctx.Common
|
||||
ic.Reset(rowsLen)
|
||||
for i := range timeseries {
|
||||
ts := ×eries[i]
|
||||
var metricNameRaw []byte
|
||||
for i := range ts.Samples {
|
||||
r := &ts.Samples[i]
|
||||
metricNameRaw = ic.WriteDataPointExt(metricNameRaw, ts.Labels, r.Timestamp, r.Value)
|
||||
}
|
||||
rowsInserted.Add(len(ts.Samples))
|
||||
}
|
||||
return ic.FlushBufs()
|
||||
}
|
||||
|
||||
type pushCtx struct {
|
||||
Common common.InsertCtx
|
||||
|
||||
req prompb.WriteRequest
|
||||
reqBuf []byte
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) reset() {
|
||||
ctx.Common.Reset(0)
|
||||
ctx.req.Reset()
|
||||
ctx.reqBuf = ctx.reqBuf[:0]
|
||||
}
|
||||
|
||||
func (ctx *pushCtx) Read(r *http.Request, maxSize int64) error {
|
||||
prometheusReadCalls.Inc()
|
||||
|
||||
var err error
|
||||
ctx.reqBuf, err = prompb.ReadSnappy(ctx.reqBuf[:0], r.Body, maxSize)
|
||||
if err != nil {
|
||||
prometheusReadErrors.Inc()
|
||||
return fmt.Errorf("cannot read prompb.WriteRequest: %s", err)
|
||||
}
|
||||
if err = ctx.req.Unmarshal(ctx.reqBuf); err != nil {
|
||||
prometheusUnmarshalErrors.Inc()
|
||||
return fmt.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %s", len(ctx.reqBuf), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
prometheusReadCalls = metrics.NewCounter(`vm_read_calls_total{name="prometheus"}`)
|
||||
prometheusReadErrors = metrics.NewCounter(`vm_read_errors_total{name="prometheus"}`)
|
||||
prometheusUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="prometheus"}`)
|
||||
)
|
||||
|
||||
func getPushCtx() *pushCtx {
|
||||
select {
|
||||
case ctx := <-pushCtxPoolCh:
|
||||
return ctx
|
||||
default:
|
||||
if v := pushCtxPool.Get(); v != nil {
|
||||
return v.(*pushCtx)
|
||||
}
|
||||
return &pushCtx{}
|
||||
}
|
||||
}
|
||||
|
||||
func putPushCtx(ctx *pushCtx) {
|
||||
ctx.reset()
|
||||
select {
|
||||
case pushCtxPoolCh <- ctx:
|
||||
default:
|
||||
pushCtxPool.Put(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
|
2
app/vmselect/README.md
Normal file
2
app/vmselect/README.md
Normal file
|
@ -0,0 +1,2 @@
|
|||
`vmselect` performs the incoming queries and fetches the required data
|
||||
from `vmstorage`.
|
189
app/vmselect/main.go
Normal file
189
app/vmselect/main.go
Normal file
|
@ -0,0 +1,189 @@
|
|||
package vmselect
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
deleteAuthKey = flag.String("deleteAuthKey", "", "authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series")
|
||||
maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", runtime.GOMAXPROCS(-1)*2, "The maximum number of concurrent search requests. It shouldn't exceed 2*vCPUs for better performance. See also -search.maxQueueDuration")
|
||||
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the request waits for execution when -search.maxConcurrentRequests limit is reached")
|
||||
)
|
||||
|
||||
// Init initializes vmselect
|
||||
func Init() {
|
||||
tmpDirPath := *vmstorage.DataPath + "/tmp"
|
||||
fs.RemoveDirContents(tmpDirPath)
|
||||
netstorage.InitTmpBlocksDir(tmpDirPath)
|
||||
promql.InitRollupResultCache(*vmstorage.DataPath + "/cache/rollupResult")
|
||||
concurrencyCh = make(chan struct{}, *maxConcurrentRequests)
|
||||
}
|
||||
|
||||
var concurrencyCh chan struct{}
|
||||
|
||||
// Stop stops vmselect
|
||||
func Stop() {
|
||||
promql.StopRollupResultCache()
|
||||
}
|
||||
|
||||
// RequestHandler handles remote read API requests for Prometheus
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
// Limit the number of concurrent queries.
|
||||
// Sleep for a second until giving up. This should resolve short bursts in requests.
|
||||
t := time.NewTimer(*maxQueueDuration)
|
||||
select {
|
||||
case concurrencyCh <- struct{}{}:
|
||||
t.Stop()
|
||||
defer func() { <-concurrencyCh }()
|
||||
case <-t.C:
|
||||
httpserver.Errorf(w, "cannot handle more than %d concurrent requests", cap(concurrencyCh))
|
||||
return true
|
||||
}
|
||||
|
||||
path := strings.Replace(r.URL.Path, "//", "/", -1)
|
||||
if strings.HasPrefix(path, "/api/v1/label/") {
|
||||
s := r.URL.Path[len("/api/v1/label/"):]
|
||||
if strings.HasSuffix(s, "/values") {
|
||||
labelValuesRequests.Inc()
|
||||
labelName := s[:len(s)-len("/values")]
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.LabelValuesHandler(labelName, w, r); err != nil {
|
||||
labelValuesErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "/api/v1/query":
|
||||
queryRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.QueryHandler(w, r); err != nil {
|
||||
queryErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/query_range":
|
||||
queryRangeRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.QueryRangeHandler(w, r); err != nil {
|
||||
queryRangeErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/series":
|
||||
seriesRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.SeriesHandler(w, r); err != nil {
|
||||
seriesErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/series/count":
|
||||
seriesCountRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.SeriesCountHandler(w, r); err != nil {
|
||||
seriesCountErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/labels":
|
||||
labelsRequests.Inc()
|
||||
httpserver.EnableCORS(w, r)
|
||||
if err := prometheus.LabelsHandler(w, r); err != nil {
|
||||
labelsErrors.Inc()
|
||||
sendPrometheusError(w, r, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/export":
|
||||
exportRequests.Inc()
|
||||
if err := prometheus.ExportHandler(w, r); err != nil {
|
||||
exportErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/federate":
|
||||
federateRequests.Inc()
|
||||
if err := prometheus.FederateHandler(w, r); err != nil {
|
||||
federateErrors.Inc()
|
||||
httpserver.Errorf(w, "error int %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
case "/api/v1/admin/tsdb/delete_series":
|
||||
deleteRequests.Inc()
|
||||
authKey := r.FormValue("authKey")
|
||||
if authKey != *deleteAuthKey {
|
||||
httpserver.Errorf(w, "invalid authKey %q. It must match the value from -deleteAuthKey command line flag", authKey)
|
||||
return true
|
||||
}
|
||||
if err := prometheus.DeleteHandler(r); err != nil {
|
||||
deleteErrors.Inc()
|
||||
httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err)
|
||||
return true
|
||||
}
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func sendPrometheusError(w http.ResponseWriter, r *http.Request, err error) {
|
||||
logger.Errorf("error in %q: %s", r.URL.Path, err)
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
statusCode := 422
|
||||
w.WriteHeader(statusCode)
|
||||
prometheus.WriteErrorResponse(w, statusCode, err)
|
||||
}
|
||||
|
||||
var (
|
||||
labelValuesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/label/{}/values"}`)
|
||||
labelValuesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/label/{}/values"}`)
|
||||
|
||||
queryRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/query"}`)
|
||||
queryErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/query"}`)
|
||||
|
||||
queryRangeRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/query_range"}`)
|
||||
queryRangeErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/query_range"}`)
|
||||
|
||||
seriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/series"}`)
|
||||
seriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/series"}`)
|
||||
|
||||
seriesCountRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/series/count"}`)
|
||||
seriesCountErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/series/count"}`)
|
||||
|
||||
labelsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/labels"}`)
|
||||
labelsErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/labels"}`)
|
||||
|
||||
deleteRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/admin/tsdb/delete_series"}`)
|
||||
deleteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/admin/tsdb/delete_series"}`)
|
||||
|
||||
exportRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/export"}`)
|
||||
exportErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/v1/export"}`)
|
||||
|
||||
federateRequests = metrics.NewCounter(`vm_http_requests_total{path="/federate"}`)
|
||||
federateErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/federate"}`)
|
||||
)
|
9
app/vmselect/netstorage/fadvise_darwin.go
Normal file
9
app/vmselect/netstorage/fadvise_darwin.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package netstorage
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
func mustFadviseRandomRead(f *os.File) {
|
||||
// Do nothing :)
|
||||
}
|
15
app/vmselect/netstorage/fadvise_linux.go
Normal file
15
app/vmselect/netstorage/fadvise_linux.go
Normal file
|
@ -0,0 +1,15 @@
|
|||
package netstorage
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func mustFadviseRandomRead(f *os.File) {
|
||||
fd := int(f.Fd())
|
||||
if err := unix.Fadvise(int(fd), 0, 0, unix.FADV_RANDOM|unix.FADV_WILLNEED); err != nil {
|
||||
logger.Panicf("FATAL: error returned from unix.Fadvise(RANDOM|WILLNEED): %s", err)
|
||||
}
|
||||
}
|
535
app/vmselect/netstorage/netstorage.go
Normal file
535
app/vmselect/netstorage/netstorage.go
Normal file
|
@ -0,0 +1,535 @@
|
|||
package netstorage
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"flag"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
maxTagKeysPerSearch = flag.Int("search.maxTagKeys", 10e3, "The maximum number of tag keys returned per search")
|
||||
maxTagValuesPerSearch = flag.Int("search.maxTagValues", 10e3, "The maximum number of tag values returned per search")
|
||||
maxMetricsPerSearch = flag.Int("search.maxUniqueTimeseries", 100e3, "The maximum number of unique time series each search can scan")
|
||||
)
|
||||
|
||||
// Result is a single timeseries result.
|
||||
//
|
||||
// ProcessSearchQuery returns Result slice.
|
||||
type Result struct {
|
||||
// The name of the metric.
|
||||
MetricName storage.MetricName
|
||||
|
||||
// Values are sorted by Timestamps.
|
||||
Values []float64
|
||||
Timestamps []int64
|
||||
|
||||
// Marshaled MetricName. Used only for results sorting
|
||||
// in app/vmselect/promql
|
||||
MetricNameMarshaled []byte
|
||||
}
|
||||
|
||||
func (r *Result) reset() {
|
||||
r.MetricName.Reset()
|
||||
r.Values = r.Values[:0]
|
||||
r.Timestamps = r.Timestamps[:0]
|
||||
r.MetricNameMarshaled = r.MetricNameMarshaled[:0]
|
||||
}
|
||||
|
||||
// Results holds results returned from ProcessSearchQuery.
|
||||
type Results struct {
|
||||
tr storage.TimeRange
|
||||
deadline Deadline
|
||||
|
||||
tbf *tmpBlocksFile
|
||||
|
||||
packedTimeseries []packedTimeseries
|
||||
}
|
||||
|
||||
// Len returns the number of results in rss.
|
||||
func (rss *Results) Len() int {
|
||||
return len(rss.packedTimeseries)
|
||||
}
|
||||
|
||||
// Cancel cancels rss work.
|
||||
func (rss *Results) Cancel() {
|
||||
putTmpBlocksFile(rss.tbf)
|
||||
rss.tbf = nil
|
||||
}
|
||||
|
||||
// RunParallel runs in parallel f for all the results from rss.
|
||||
//
|
||||
// f shouldn't hold references to rs after returning.
|
||||
//
|
||||
// rss becomes unusable after the call to RunParallel.
|
||||
func (rss *Results) RunParallel(f func(rs *Result)) error {
|
||||
defer func() {
|
||||
putTmpBlocksFile(rss.tbf)
|
||||
rss.tbf = nil
|
||||
}()
|
||||
|
||||
workersCount := 1 + len(rss.packedTimeseries)/32
|
||||
if workersCount > gomaxprocs {
|
||||
workersCount = gomaxprocs
|
||||
}
|
||||
if workersCount == 0 {
|
||||
logger.Panicf("BUG: workersCount cannot be zero")
|
||||
}
|
||||
workCh := make(chan *packedTimeseries, workersCount)
|
||||
doneCh := make(chan error)
|
||||
|
||||
// Start workers.
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func() {
|
||||
rs := getResult()
|
||||
defer putResult(rs)
|
||||
maxWorkersCount := gomaxprocs / workersCount
|
||||
|
||||
var err error
|
||||
for pts := range workCh {
|
||||
if time.Until(rss.deadline.Deadline) < 0 {
|
||||
err = fmt.Errorf("timeout exceeded during query execution: %s", rss.deadline.Timeout)
|
||||
break
|
||||
}
|
||||
if err = pts.Unpack(rss.tbf, rs, rss.tr, maxWorkersCount); err != nil {
|
||||
break
|
||||
}
|
||||
if len(rs.Timestamps) == 0 {
|
||||
// Skip empty blocks.
|
||||
continue
|
||||
}
|
||||
f(rs)
|
||||
}
|
||||
// Drain the remaining work
|
||||
for range workCh {
|
||||
}
|
||||
doneCh <- err
|
||||
}()
|
||||
}
|
||||
|
||||
// Feed workers with work.
|
||||
for i := range rss.packedTimeseries {
|
||||
workCh <- &rss.packedTimeseries[i]
|
||||
}
|
||||
rss.packedTimeseries = rss.packedTimeseries[:0]
|
||||
close(workCh)
|
||||
|
||||
// Wait until workers finish.
|
||||
var errors []error
|
||||
for i := 0; i < workersCount; i++ {
|
||||
if err := <-doneCh; err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
// Return just the first error, since other errors
|
||||
// is likely duplicate the first error.
|
||||
return errors[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var gomaxprocs = runtime.GOMAXPROCS(-1)
|
||||
|
||||
type packedTimeseries struct {
|
||||
metricName string
|
||||
addrs []tmpBlockAddr
|
||||
}
|
||||
|
||||
// Unpack unpacks pts to dst.
|
||||
func (pts *packedTimeseries) Unpack(tbf *tmpBlocksFile, dst *Result, tr storage.TimeRange, maxWorkersCount int) error {
|
||||
dst.reset()
|
||||
|
||||
if err := dst.MetricName.Unmarshal(bytesutil.ToUnsafeBytes(pts.metricName)); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal metricName %q: %s", pts.metricName, err)
|
||||
}
|
||||
|
||||
workersCount := 1 + len(pts.addrs)/32
|
||||
if workersCount > maxWorkersCount {
|
||||
workersCount = maxWorkersCount
|
||||
}
|
||||
if workersCount == 0 {
|
||||
logger.Panicf("BUG: workersCount cannot be zero")
|
||||
}
|
||||
|
||||
sbs := make([]*sortBlock, 0, len(pts.addrs))
|
||||
var sbsLock sync.Mutex
|
||||
|
||||
workCh := make(chan tmpBlockAddr, workersCount)
|
||||
doneCh := make(chan error)
|
||||
|
||||
// Start workers
|
||||
for i := 0; i < workersCount; i++ {
|
||||
go func() {
|
||||
var err error
|
||||
for addr := range workCh {
|
||||
sb := getSortBlock()
|
||||
if err = sb.unpackFrom(tbf, addr, tr); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
sbsLock.Lock()
|
||||
sbs = append(sbs, sb)
|
||||
sbsLock.Unlock()
|
||||
}
|
||||
|
||||
// Drain the remaining work
|
||||
for range workCh {
|
||||
}
|
||||
doneCh <- err
|
||||
}()
|
||||
}
|
||||
|
||||
// Feed workers with work
|
||||
for _, addr := range pts.addrs {
|
||||
workCh <- addr
|
||||
}
|
||||
pts.addrs = pts.addrs[:0]
|
||||
close(workCh)
|
||||
|
||||
// Wait until workers finish
|
||||
var errors []error
|
||||
for i := 0; i < workersCount; i++ {
|
||||
if err := <-doneCh; err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
}
|
||||
if len(errors) > 0 {
|
||||
// Return the first error only, since other errors are likely the same.
|
||||
return errors[0]
|
||||
}
|
||||
|
||||
// Merge blocks
|
||||
mergeSortBlocks(dst, sbs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSortBlock() *sortBlock {
|
||||
v := sbPool.Get()
|
||||
if v == nil {
|
||||
return &sortBlock{}
|
||||
}
|
||||
return v.(*sortBlock)
|
||||
}
|
||||
|
||||
func putSortBlock(sb *sortBlock) {
|
||||
sb.reset()
|
||||
sbPool.Put(sb)
|
||||
}
|
||||
|
||||
var sbPool sync.Pool
|
||||
|
||||
var metricRowsSkipped = metrics.NewCounter(`vm_metric_rows_skipped_total{name="vmselect"}`)
|
||||
|
||||
func mergeSortBlocks(dst *Result, sbh sortBlocksHeap) {
|
||||
// Skip empty sort blocks, since they cannot be passed to heap.Init.
|
||||
src := sbh
|
||||
sbh = sbh[:0]
|
||||
for _, sb := range src {
|
||||
if len(sb.Timestamps) == 0 {
|
||||
putSortBlock(sb)
|
||||
continue
|
||||
}
|
||||
sbh = append(sbh, sb)
|
||||
}
|
||||
if len(sbh) == 0 {
|
||||
return
|
||||
}
|
||||
heap.Init(&sbh)
|
||||
for {
|
||||
top := sbh[0]
|
||||
heap.Pop(&sbh)
|
||||
if len(sbh) == 0 {
|
||||
dst.Timestamps = append(dst.Timestamps, top.Timestamps[top.NextIdx:]...)
|
||||
dst.Values = append(dst.Values, top.Values[top.NextIdx:]...)
|
||||
putSortBlock(top)
|
||||
return
|
||||
}
|
||||
sbNext := sbh[0]
|
||||
tsNext := sbNext.Timestamps[sbNext.NextIdx]
|
||||
idxNext := len(top.Timestamps)
|
||||
if top.Timestamps[idxNext-1] > tsNext {
|
||||
idxNext = top.NextIdx
|
||||
for top.Timestamps[idxNext] <= tsNext {
|
||||
idxNext++
|
||||
}
|
||||
}
|
||||
dst.Timestamps = append(dst.Timestamps, top.Timestamps[top.NextIdx:idxNext]...)
|
||||
dst.Values = append(dst.Values, top.Values[top.NextIdx:idxNext]...)
|
||||
if idxNext < len(top.Timestamps) {
|
||||
top.NextIdx = idxNext
|
||||
heap.Push(&sbh, top)
|
||||
} else {
|
||||
// Return top to the pool.
|
||||
putSortBlock(top)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type sortBlock struct {
|
||||
// b is used as a temporary storage for unpacked rows before they
|
||||
// go to Timestamps and Values.
|
||||
b storage.Block
|
||||
|
||||
Timestamps []int64
|
||||
Values []float64
|
||||
NextIdx int
|
||||
}
|
||||
|
||||
func (sb *sortBlock) reset() {
|
||||
sb.b.Reset()
|
||||
sb.Timestamps = sb.Timestamps[:0]
|
||||
sb.Values = sb.Values[:0]
|
||||
sb.NextIdx = 0
|
||||
}
|
||||
|
||||
func (sb *sortBlock) unpackFrom(tbf *tmpBlocksFile, addr tmpBlockAddr, tr storage.TimeRange) error {
|
||||
tbf.MustReadBlockAt(&sb.b, addr)
|
||||
if err := sb.b.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal block: %s", err)
|
||||
}
|
||||
timestamps := sb.b.Timestamps()
|
||||
|
||||
// Skip timestamps smaller than tr.MinTimestamp.
|
||||
i := 0
|
||||
for i < len(timestamps) && timestamps[i] < tr.MinTimestamp {
|
||||
i++
|
||||
}
|
||||
|
||||
// Skip timestamps bigger than tr.MaxTimestamp.
|
||||
j := len(timestamps)
|
||||
for j > i && timestamps[j-1] > tr.MaxTimestamp {
|
||||
j--
|
||||
}
|
||||
skippedRows := sb.b.RowsCount() - (j - i)
|
||||
metricRowsSkipped.Add(skippedRows)
|
||||
|
||||
// Copy the remaining values.
|
||||
if i == j {
|
||||
return nil
|
||||
}
|
||||
values := sb.b.Values()
|
||||
sb.Timestamps = append(sb.Timestamps, timestamps[i:j]...)
|
||||
sb.Values = decimal.AppendDecimalToFloat(sb.Values, values[i:j], sb.b.Scale())
|
||||
return nil
|
||||
}
|
||||
|
||||
type sortBlocksHeap []*sortBlock
|
||||
|
||||
func (sbh sortBlocksHeap) Len() int {
|
||||
return len(sbh)
|
||||
}
|
||||
|
||||
func (sbh sortBlocksHeap) Less(i, j int) bool {
|
||||
a := sbh[i]
|
||||
b := sbh[j]
|
||||
return a.Timestamps[a.NextIdx] < b.Timestamps[b.NextIdx]
|
||||
}
|
||||
|
||||
func (sbh sortBlocksHeap) Swap(i, j int) {
|
||||
sbh[i], sbh[j] = sbh[j], sbh[i]
|
||||
}
|
||||
|
||||
func (sbh *sortBlocksHeap) Push(x interface{}) {
|
||||
*sbh = append(*sbh, x.(*sortBlock))
|
||||
}
|
||||
|
||||
func (sbh *sortBlocksHeap) Pop() interface{} {
|
||||
a := *sbh
|
||||
v := a[len(a)-1]
|
||||
*sbh = a[:len(a)-1]
|
||||
return v
|
||||
}
|
||||
|
||||
// DeleteSeries deletes time series matching the given tagFilterss.
|
||||
func DeleteSeries(sq *storage.SearchQuery) (int, error) {
|
||||
tfss, err := setupTfss(sq.TagFilterss)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return vmstorage.DeleteMetrics(tfss)
|
||||
}
|
||||
|
||||
// GetLabels returns labels until the given deadline.
|
||||
func GetLabels(deadline Deadline) ([]string, error) {
|
||||
labels, err := vmstorage.SearchTagKeys(*maxTagKeysPerSearch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during labels search: %s", err)
|
||||
}
|
||||
|
||||
// Substitute "" with "__name__"
|
||||
for i := range labels {
|
||||
if labels[i] == "" {
|
||||
labels[i] = "__name__"
|
||||
}
|
||||
}
|
||||
|
||||
// Sort labels like Prometheus does
|
||||
sort.Strings(labels)
|
||||
|
||||
return labels, nil
|
||||
}
|
||||
|
||||
// GetLabelValues returns label values for the given labelName
|
||||
// until the given deadline.
|
||||
func GetLabelValues(labelName string, deadline Deadline) ([]string, error) {
|
||||
if labelName == "__name__" {
|
||||
labelName = ""
|
||||
}
|
||||
|
||||
// Search for tag values
|
||||
labelValues, err := vmstorage.SearchTagValues([]byte(labelName), *maxTagValuesPerSearch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error during label values search for labelName=%q: %s", labelName, err)
|
||||
}
|
||||
|
||||
// Sort labelValues like Prometheus does
|
||||
sort.Strings(labelValues)
|
||||
|
||||
return labelValues, nil
|
||||
}
|
||||
|
||||
// GetSeriesCount returns the number of unique series.
|
||||
func GetSeriesCount(deadline Deadline) (uint64, error) {
|
||||
n, err := vmstorage.GetSeriesCount()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error during series count request: %s", err)
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func getStorageSearch() *storage.Search {
|
||||
v := ssPool.Get()
|
||||
if v == nil {
|
||||
return &storage.Search{}
|
||||
}
|
||||
return v.(*storage.Search)
|
||||
}
|
||||
|
||||
func putStorageSearch(sr *storage.Search) {
|
||||
n := atomic.LoadUint64(&sr.MissingMetricNamesForMetricID)
|
||||
missingMetricNamesForMetricID.Add(int(n))
|
||||
sr.MustClose()
|
||||
ssPool.Put(sr)
|
||||
}
|
||||
|
||||
var ssPool sync.Pool
|
||||
|
||||
var missingMetricNamesForMetricID = metrics.NewCounter(`vm_missing_metric_names_for_metric_id_total`)
|
||||
|
||||
// ProcessSearchQuery performs sq on storage nodes until the given deadline.
|
||||
func ProcessSearchQuery(sq *storage.SearchQuery, deadline Deadline) (*Results, error) {
|
||||
// Setup search.
|
||||
tfss, err := setupTfss(sq.TagFilterss)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr := storage.TimeRange{
|
||||
MinTimestamp: sq.MinTimestamp,
|
||||
MaxTimestamp: sq.MaxTimestamp,
|
||||
}
|
||||
|
||||
vmstorage.WG.Add(1)
|
||||
defer vmstorage.WG.Done()
|
||||
|
||||
sr := getStorageSearch()
|
||||
defer putStorageSearch(sr)
|
||||
sr.Init(vmstorage.Storage, tfss, tr, *maxMetricsPerSearch)
|
||||
|
||||
tbf := getTmpBlocksFile()
|
||||
m := make(map[string][]tmpBlockAddr)
|
||||
for sr.NextMetricBlock() {
|
||||
addr, err := tbf.WriteBlock(sr.MetricBlock.Block)
|
||||
if err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("cannot write data to temporary blocks file: %s", err)
|
||||
}
|
||||
if time.Until(deadline.Deadline) < 0 {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("timeout exceeded while fetching data from storage: %s", deadline.Timeout)
|
||||
}
|
||||
metricName := sr.MetricBlock.MetricName
|
||||
m[string(metricName)] = append(m[string(metricName)], addr)
|
||||
}
|
||||
if err := sr.Error(); err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("search error: %s", err)
|
||||
}
|
||||
if err := tbf.Finalize(); err != nil {
|
||||
putTmpBlocksFile(tbf)
|
||||
return nil, fmt.Errorf("cannot finalize temporary blocks file: %s", err)
|
||||
}
|
||||
|
||||
var rss Results
|
||||
rss.packedTimeseries = make([]packedTimeseries, len(m))
|
||||
rss.tr = tr
|
||||
rss.deadline = deadline
|
||||
rss.tbf = tbf
|
||||
i := 0
|
||||
for metricName, addrs := range m {
|
||||
pts := &rss.packedTimeseries[i]
|
||||
i++
|
||||
pts.metricName = metricName
|
||||
pts.addrs = addrs
|
||||
}
|
||||
return &rss, nil
|
||||
}
|
||||
|
||||
func getResult() *Result {
|
||||
v := rsPool.Get()
|
||||
if v == nil {
|
||||
return &Result{}
|
||||
}
|
||||
return v.(*Result)
|
||||
}
|
||||
|
||||
func putResult(rs *Result) {
|
||||
if len(rs.Values) > 8192 {
|
||||
// Do not pool big results, since they may occupy too much memory.
|
||||
return
|
||||
}
|
||||
rs.reset()
|
||||
rsPool.Put(rs)
|
||||
}
|
||||
|
||||
var rsPool sync.Pool
|
||||
|
||||
func setupTfss(tagFilterss [][]storage.TagFilter) ([]*storage.TagFilters, error) {
|
||||
tfss := make([]*storage.TagFilters, 0, len(tagFilterss))
|
||||
for _, tagFilters := range tagFilterss {
|
||||
tfs := storage.NewTagFilters()
|
||||
for i := range tagFilters {
|
||||
tf := &tagFilters[i]
|
||||
if err := tfs.Add(tf.Key, tf.Value, tf.IsNegative, tf.IsRegexp); err != nil {
|
||||
return nil, fmt.Errorf("cannot parse tag filter %s: %s", tf, err)
|
||||
}
|
||||
}
|
||||
tfss = append(tfss, tfs)
|
||||
}
|
||||
return tfss, nil
|
||||
}
|
||||
|
||||
// Deadline contains deadline with the corresponding timeout for pretty error messages.
|
||||
type Deadline struct {
|
||||
Deadline time.Time
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// NewDeadline returns deadline for the given timeout.
|
||||
func NewDeadline(timeout time.Duration) Deadline {
|
||||
return Deadline{
|
||||
Deadline: time.Now().Add(timeout),
|
||||
Timeout: timeout,
|
||||
}
|
||||
}
|
188
app/vmselect/netstorage/tmp_blocks_file.go
Normal file
188
app/vmselect/netstorage/tmp_blocks_file.go
Normal file
|
@ -0,0 +1,188 @@
|
|||
package netstorage
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// InitTmpBlocksDir initializes directory to store temporary search results.
|
||||
//
|
||||
// It stores data in system-defined temporary directory if tmpDirPath is empty.
|
||||
func InitTmpBlocksDir(tmpDirPath string) {
|
||||
if len(tmpDirPath) == 0 {
|
||||
tmpDirPath = os.TempDir()
|
||||
}
|
||||
tmpBlocksDir = tmpDirPath + "/searchResults"
|
||||
if err := os.RemoveAll(tmpBlocksDir); err != nil {
|
||||
logger.Panicf("FATAL: cannot remove %q: %s", tmpBlocksDir, err)
|
||||
}
|
||||
if err := fs.MkdirAllIfNotExist(tmpBlocksDir); err != nil {
|
||||
logger.Panicf("FATAL: cannot create %q: %s", tmpBlocksDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
var tmpBlocksDir string
|
||||
|
||||
const maxInmemoryTmpBlocksFile = 512 * 1024
|
||||
|
||||
type tmpBlocksFile struct {
|
||||
buf []byte
|
||||
|
||||
f *os.File
|
||||
bw *bufio.Writer
|
||||
|
||||
offset uint64
|
||||
}
|
||||
|
||||
func getTmpBlocksFile() *tmpBlocksFile {
|
||||
v := tmpBlocksFilePool.Get()
|
||||
if v == nil {
|
||||
return &tmpBlocksFile{}
|
||||
}
|
||||
return v.(*tmpBlocksFile)
|
||||
}
|
||||
|
||||
func putTmpBlocksFile(tbf *tmpBlocksFile) {
|
||||
tbf.MustClose()
|
||||
tbf.buf = tbf.buf[:0]
|
||||
tbf.f = nil
|
||||
tbf.bw = nil
|
||||
tbf.offset = 0
|
||||
tmpBlocksFilePool.Put(tbf)
|
||||
}
|
||||
|
||||
var tmpBlocksFilePool sync.Pool
|
||||
|
||||
type tmpBlockAddr struct {
|
||||
offset uint64
|
||||
size int
|
||||
}
|
||||
|
||||
func (addr tmpBlockAddr) String() string {
|
||||
return fmt.Sprintf("offset %d, size %d", addr.offset, addr.size)
|
||||
}
|
||||
|
||||
func getBufioWriter(f *os.File) *bufio.Writer {
|
||||
v := bufioWriterPool.Get()
|
||||
if v == nil {
|
||||
return bufio.NewWriterSize(f, maxInmemoryTmpBlocksFile*2)
|
||||
}
|
||||
bw := v.(*bufio.Writer)
|
||||
bw.Reset(f)
|
||||
return bw
|
||||
}
|
||||
|
||||
func putBufioWriter(bw *bufio.Writer) {
|
||||
bufioWriterPool.Put(bw)
|
||||
}
|
||||
|
||||
var bufioWriterPool sync.Pool
|
||||
|
||||
var tmpBlocksFilesCreated = metrics.NewCounter(`vm_tmp_blocks_files_created_total`)
|
||||
|
||||
// WriteBlock writes b to tbf.
|
||||
//
|
||||
// It returns errors since the operation may fail on space shortage
|
||||
// and this must be handled.
|
||||
func (tbf *tmpBlocksFile) WriteBlock(b *storage.Block) (tmpBlockAddr, error) {
|
||||
var addr tmpBlockAddr
|
||||
addr.offset = tbf.offset
|
||||
|
||||
tbfBufLen := len(tbf.buf)
|
||||
tbf.buf = storage.MarshalBlock(tbf.buf, b)
|
||||
addr.size = len(tbf.buf) - tbfBufLen
|
||||
tbf.offset += uint64(addr.size)
|
||||
if tbf.offset <= maxInmemoryTmpBlocksFile {
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
if tbf.f == nil {
|
||||
f, err := ioutil.TempFile(tmpBlocksDir, "")
|
||||
if err != nil {
|
||||
return addr, err
|
||||
}
|
||||
tbf.f = f
|
||||
tbf.bw = getBufioWriter(f)
|
||||
tmpBlocksFilesCreated.Inc()
|
||||
}
|
||||
_, err := tbf.bw.Write(tbf.buf)
|
||||
tbf.buf = tbf.buf[:0]
|
||||
if err != nil {
|
||||
return addr, fmt.Errorf("cannot write block to %q: %s", tbf.f.Name(), err)
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
func (tbf *tmpBlocksFile) Finalize() error {
|
||||
if tbf.f == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := tbf.bw.Flush()
|
||||
putBufioWriter(tbf.bw)
|
||||
tbf.bw = nil
|
||||
if _, err := tbf.f.Seek(0, 0); err != nil {
|
||||
logger.Panicf("FATAL: cannot seek to the start of file: %s", err)
|
||||
}
|
||||
mustFadviseRandomRead(tbf.f)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tbf *tmpBlocksFile) MustReadBlockAt(dst *storage.Block, addr tmpBlockAddr) {
|
||||
var buf []byte
|
||||
if tbf.f == nil {
|
||||
buf = tbf.buf[addr.offset : addr.offset+uint64(addr.size)]
|
||||
} else {
|
||||
bb := tmpBufPool.Get()
|
||||
defer tmpBufPool.Put(bb)
|
||||
bb.B = bytesutil.Resize(bb.B, addr.size)
|
||||
n, err := tbf.f.ReadAt(bb.B, int64(addr.offset))
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot read from %q at %s: %s", tbf.f.Name(), addr, err)
|
||||
}
|
||||
if n != len(bb.B) {
|
||||
logger.Panicf("FATAL: too short number of bytes read at %s; got %d; want %d", addr, n, len(bb.B))
|
||||
}
|
||||
buf = bb.B
|
||||
}
|
||||
tail, err := storage.UnmarshalBlock(dst, buf)
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot unmarshal data at %s: %s", addr, err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
logger.Panicf("FATAL: unexpected non-empty tail left after unmarshaling data at %s; len(tail)=%d", addr, len(tail))
|
||||
}
|
||||
}
|
||||
|
||||
var tmpBufPool bytesutil.ByteBufferPool
|
||||
|
||||
func (tbf *tmpBlocksFile) MustClose() {
|
||||
if tbf.f == nil {
|
||||
return
|
||||
}
|
||||
if tbf.bw != nil {
|
||||
putBufioWriter(tbf.bw)
|
||||
tbf.bw = nil
|
||||
}
|
||||
fname := tbf.f.Name()
|
||||
|
||||
// Remove the file at first, then close it.
|
||||
// This way the OS shouldn't try to flush file contents to storage
|
||||
// on close.
|
||||
if err := os.Remove(fname); err != nil {
|
||||
logger.Panicf("FATAL: cannot remove %q: %s", fname, err)
|
||||
}
|
||||
if err := tbf.f.Close(); err != nil {
|
||||
logger.Panicf("FATAL: cannot close %q: %s", fname, err)
|
||||
}
|
||||
tbf.f = nil
|
||||
}
|
150
app/vmselect/netstorage/tmp_blocks_file_test.go
Normal file
150
app/vmselect/netstorage/tmp_blocks_file_test.go
Normal file
|
@ -0,0 +1,150 @@
|
|||
package netstorage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
tmpDir := "TestTmpBlocks"
|
||||
InitTmpBlocksDir(tmpDir)
|
||||
statusCode := m.Run()
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
logger.Panicf("cannot remove %q: %s", tmpDir, err)
|
||||
}
|
||||
os.Exit(statusCode)
|
||||
}
|
||||
|
||||
func TestTmpBlocksFileSerial(t *testing.T) {
|
||||
if err := testTmpBlocksFile(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTmpBlocksFileConcurrent(t *testing.T) {
|
||||
concurrency := 4
|
||||
ch := make(chan error, concurrency)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
ch <- testTmpBlocksFile()
|
||||
}()
|
||||
}
|
||||
for i := 0; i < concurrency; i++ {
|
||||
select {
|
||||
case err := <-ch:
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
case <-time.After(30 * time.Second):
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testTmpBlocksFile() error {
|
||||
createBlock := func() *storage.Block {
|
||||
rowsCount := rand.Intn(8000) + 1
|
||||
var timestamps, values []int64
|
||||
ts := int64(rand.Intn(1023434))
|
||||
for i := 0; i < rowsCount; i++ {
|
||||
ts += int64(rand.Intn(1000) + 1)
|
||||
timestamps = append(timestamps, ts)
|
||||
values = append(values, int64(i*i+rand.Intn(20)))
|
||||
}
|
||||
tsid := &storage.TSID{
|
||||
MetricID: 234211,
|
||||
}
|
||||
scale := int16(rand.Intn(123))
|
||||
precisionBits := uint8(rand.Intn(63) + 1)
|
||||
var b storage.Block
|
||||
b.Init(tsid, timestamps, values, scale, precisionBits)
|
||||
_, _, _ = b.MarshalData(0, 0)
|
||||
return &b
|
||||
}
|
||||
for _, size := range []int{1024, 16 * 1024, maxInmemoryTmpBlocksFile / 2, 2 * maxInmemoryTmpBlocksFile} {
|
||||
err := func() error {
|
||||
tbf := getTmpBlocksFile()
|
||||
defer putTmpBlocksFile(tbf)
|
||||
|
||||
// Write blocks until their summary size exceeds `size`.
|
||||
var addrs []tmpBlockAddr
|
||||
var blocks []*storage.Block
|
||||
for tbf.offset < uint64(size) {
|
||||
b := createBlock()
|
||||
addr, err := tbf.WriteBlock(b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot write block at offset %d: %s", tbf.offset, err)
|
||||
}
|
||||
if addr.offset+uint64(addr.size) != tbf.offset {
|
||||
return fmt.Errorf("unexpected addr=%+v for offset %v", &addr, tbf.offset)
|
||||
}
|
||||
addrs = append(addrs, addr)
|
||||
blocks = append(blocks, b)
|
||||
}
|
||||
if err := tbf.Finalize(); err != nil {
|
||||
return fmt.Errorf("cannot finalize tbf: %s", err)
|
||||
}
|
||||
|
||||
// Read blocks in parallel and verify them
|
||||
concurrency := 3
|
||||
workCh := make(chan int)
|
||||
doneCh := make(chan error)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
doneCh <- func() error {
|
||||
var b1 storage.Block
|
||||
for idx := range workCh {
|
||||
addr := addrs[idx]
|
||||
b := blocks[idx]
|
||||
if err := b.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal data from the original block: %s", err)
|
||||
}
|
||||
b1.Reset()
|
||||
tbf.MustReadBlockAt(&b1, addr)
|
||||
if err := b1.UnmarshalData(); err != nil {
|
||||
return fmt.Errorf("cannot unmarshal data from tbf: %s", err)
|
||||
}
|
||||
if b1.RowsCount() != b.RowsCount() {
|
||||
return fmt.Errorf("unexpected number of rows in tbf block; got %d; want %d", b1.RowsCount(), b.RowsCount())
|
||||
}
|
||||
if !reflect.DeepEqual(b1.Timestamps(), b.Timestamps()) {
|
||||
return fmt.Errorf("unexpected timestamps; got\n%v\nwant\n%v", b1.Timestamps(), b.Timestamps())
|
||||
}
|
||||
if !reflect.DeepEqual(b1.Values(), b.Values()) {
|
||||
return fmt.Errorf("unexpected values; got\n%v\nwant\n%v", b1.Values(), b.Values())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
}()
|
||||
}
|
||||
for i := range addrs {
|
||||
workCh <- i
|
||||
}
|
||||
close(workCh)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
select {
|
||||
case err := <-doneCh:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case <-time.After(time.Second):
|
||||
return fmt.Errorf("timeout")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
11
app/vmselect/prometheus/error_response.qtpl
Normal file
11
app/vmselect/prometheus/error_response.qtpl
Normal file
|
@ -0,0 +1,11 @@
|
|||
{% stripspace %}
|
||||
ErrorResponse generates error response for /api/v1/query.
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#format-overview
|
||||
{% func ErrorResponse(statusCode int, err error) %}
|
||||
{
|
||||
"status":"error",
|
||||
"errorType":"{%d statusCode %}",
|
||||
"error": {%q= err.Error() %}
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
61
app/vmselect/prometheus/error_response.qtpl.go
Normal file
61
app/vmselect/prometheus/error_response.qtpl.go
Normal file
|
@ -0,0 +1,61 @@
|
|||
// Code generated by qtc from "error_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
// ErrorResponse generates error response for /api/v1/query.See https://prometheus.io/docs/prometheus/latest/querying/api/#format-overview
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
func StreamErrorResponse(qw422016 *qt422016.Writer, statusCode int, err error) {
|
||||
//line app/vmselect/prometheus/error_response.qtpl:4
|
||||
qw422016.N().S(`{"status":"error","errorType":"`)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:7
|
||||
qw422016.N().D(statusCode)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:7
|
||||
qw422016.N().S(`","error":`)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:8
|
||||
qw422016.N().Q(err.Error())
|
||||
//line app/vmselect/prometheus/error_response.qtpl:8
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
func WriteErrorResponse(qq422016 qtio422016.Writer, statusCode int, err error) {
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
StreamErrorResponse(qw422016, statusCode, err)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
func ErrorResponse(statusCode int, err error) string {
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
WriteErrorResponse(qb422016, statusCode, err)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/error_response.qtpl:10
|
||||
}
|
96
app/vmselect/prometheus/export.qtpl
Normal file
96
app/vmselect/prometheus/export.qtpl
Normal file
|
@ -0,0 +1,96 @@
|
|||
{% import (
|
||||
"github.com/valyala/quicktemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
{% func ExportPrometheusLine(rs *netstorage.Result) %}
|
||||
{% if len(rs.Timestamps) == 0 %}{% return %}{% endif %}
|
||||
{% code bb := quicktemplate.AcquireByteBuffer() %}
|
||||
{% code writeprometheusMetricName(bb, &rs.MetricName) %}
|
||||
{% for i, ts := range rs.Timestamps %}
|
||||
{%z= bb.B %}{% space %}
|
||||
{%f= rs.Values[i] %}{% space %}
|
||||
{%d= int(ts) %}{% newline %}
|
||||
{% endfor %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportJSONLine(rs *netstorage.Result) %}
|
||||
{% if len(rs.Timestamps) == 0 %}{% return %}{% endif %}
|
||||
{
|
||||
"metric":{%= metricNameObject(&rs.MetricName) %},
|
||||
"values":[
|
||||
{% if len(rs.Values) > 0 %}
|
||||
{% code values := rs.Values %}
|
||||
{%f= values[0] %}
|
||||
{% code values = values[1:] %}
|
||||
{% for _, v := range values %}
|
||||
,{%f= v %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
],
|
||||
"timestamps":[
|
||||
{% if len(rs.Timestamps) > 0 %}
|
||||
{% code timestamps := rs.Timestamps %}
|
||||
{%d= int(timestamps[0]) %}
|
||||
{% code timestamps = timestamps[1:] %}
|
||||
{% for _, ts := range timestamps %}
|
||||
,{%d= int(ts) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportPromAPILine(rs *netstorage.Result) %}
|
||||
{
|
||||
"metric": {%= metricNameObject(&rs.MetricName) %},
|
||||
"values": {%= valuesWithTimestamps(rs.Values, rs.Timestamps) %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[
|
||||
{% code bb, ok := <-resultsCh %}
|
||||
{% if ok %}
|
||||
{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% for bb := range resultsCh %}
|
||||
,{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
|
||||
{% for bb := range resultsCh %}
|
||||
{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfor %}
|
||||
{% endfunc %}
|
||||
|
||||
{% func prometheusMetricName(mn *storage.MetricName) %}
|
||||
{%z= mn.MetricGroup %}
|
||||
{% if len(mn.Tags) > 0 %}
|
||||
{
|
||||
{% code tags := mn.Tags %}
|
||||
{%z= tags[0].Key %}={%qz= tags[0].Value %}
|
||||
{% code tags = tags[1:] %}
|
||||
{% for i := range tags %}
|
||||
{% code tag := &tags[i] %}
|
||||
,{%z= tag.Key %}={%qz= tag.Value %}
|
||||
{% endfor %}
|
||||
}
|
||||
{% endif %}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
385
app/vmselect/prometheus/export.qtpl.go
Normal file
385
app/vmselect/prometheus/export.qtpl.go
Normal file
|
@ -0,0 +1,385 @@
|
|||
// Code generated by qtc from "export.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:9
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:9
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:9
|
||||
func StreamExportPrometheusLine(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:10
|
||||
if len(rs.Timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:10
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:11
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:12
|
||||
writeprometheusMetricName(bb, &rs.MetricName)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:13
|
||||
for i, ts := range rs.Timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:14
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:14
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:15
|
||||
qw422016.N().F(rs.Values[i])
|
||||
//line app/vmselect/prometheus/export.qtpl:15
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/export.qtpl:16
|
||||
qw422016.N().D(int(ts))
|
||||
//line app/vmselect/prometheus/export.qtpl:16
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/export.qtpl:17
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:18
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
func WriteExportPrometheusLine(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
StreamExportPrometheusLine(qw422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
func ExportPrometheusLine(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
WriteExportPrometheusLine(qb422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:19
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:21
|
||||
func StreamExportJSONLine(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:22
|
||||
if len(rs.Timestamps) == 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:22
|
||||
return
|
||||
//line app/vmselect/prometheus/export.qtpl:22
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:22
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:24
|
||||
streammetricNameObject(qw422016, &rs.MetricName)
|
||||
//line app/vmselect/prometheus/export.qtpl:24
|
||||
qw422016.N().S(`,"values":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:26
|
||||
if len(rs.Values) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:27
|
||||
values := rs.Values
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:28
|
||||
qw422016.N().F(values[0])
|
||||
//line app/vmselect/prometheus/export.qtpl:29
|
||||
values = values[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:30
|
||||
for _, v := range values {
|
||||
//line app/vmselect/prometheus/export.qtpl:30
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:31
|
||||
qw422016.N().F(v)
|
||||
//line app/vmselect/prometheus/export.qtpl:32
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:33
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:33
|
||||
qw422016.N().S(`],"timestamps":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:36
|
||||
if len(rs.Timestamps) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:37
|
||||
timestamps := rs.Timestamps
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:38
|
||||
qw422016.N().D(int(timestamps[0]))
|
||||
//line app/vmselect/prometheus/export.qtpl:39
|
||||
timestamps = timestamps[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:40
|
||||
for _, ts := range timestamps {
|
||||
//line app/vmselect/prometheus/export.qtpl:40
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:41
|
||||
qw422016.N().D(int(ts))
|
||||
//line app/vmselect/prometheus/export.qtpl:42
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:43
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:43
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:45
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
func WriteExportJSONLine(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
StreamExportJSONLine(qw422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
func ExportJSONLine(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
WriteExportJSONLine(qb422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:46
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:48
|
||||
func StreamExportPromAPILine(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:48
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:50
|
||||
streammetricNameObject(qw422016, &rs.MetricName)
|
||||
//line app/vmselect/prometheus/export.qtpl:50
|
||||
qw422016.N().S(`,"values":`)
|
||||
//line app/vmselect/prometheus/export.qtpl:51
|
||||
streamvaluesWithTimestamps(qw422016, rs.Values, rs.Timestamps)
|
||||
//line app/vmselect/prometheus/export.qtpl:51
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
func WriteExportPromAPILine(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
StreamExportPromAPILine(qw422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
func ExportPromAPILine(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
WriteExportPromAPILine(qb422016, rs)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:53
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:55
|
||||
func StreamExportPromAPIResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:55
|
||||
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
|
||||
//line app/vmselect/prometheus/export.qtpl:61
|
||||
bb, ok := <-resultsCh
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:62
|
||||
if ok {
|
||||
//line app/vmselect/prometheus/export.qtpl:63
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:64
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:65
|
||||
for bb := range resultsCh {
|
||||
//line app/vmselect/prometheus/export.qtpl:65
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:66
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:67
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:68
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:69
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:69
|
||||
qw422016.N().S(`]}}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
func WriteExportPromAPIResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
StreamExportPromAPIResponse(qw422016, resultsCh)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
func ExportPromAPIResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
WriteExportPromAPIResponse(qb422016, resultsCh)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:73
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:75
|
||||
func StreamExportStdResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:76
|
||||
for bb := range resultsCh {
|
||||
//line app/vmselect/prometheus/export.qtpl:77
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:78
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:79
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
func WriteExportStdResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
StreamExportStdResponse(qw422016, resultsCh)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
func ExportStdResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
WriteExportStdResponse(qb422016, resultsCh)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:80
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:82
|
||||
func streamprometheusMetricName(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:83
|
||||
qw422016.N().Z(mn.MetricGroup)
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
if len(mn.Tags) > 0 {
|
||||
//line app/vmselect/prometheus/export.qtpl:84
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/export.qtpl:86
|
||||
tags := mn.Tags
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
qw422016.N().Z(tags[0].Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:87
|
||||
qw422016.N().QZ(tags[0].Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:88
|
||||
tags = tags[1:]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:89
|
||||
for i := range tags {
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
tag := &tags[i]
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:90
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().Z(tag.Key)
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().S(`=`)
|
||||
//line app/vmselect/prometheus/export.qtpl:91
|
||||
qw422016.N().QZ(tag.Value)
|
||||
//line app/vmselect/prometheus/export.qtpl:92
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:92
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/export.qtpl:94
|
||||
}
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
func writeprometheusMetricName(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
streamprometheusMetricName(qw422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
func prometheusMetricName(mn *storage.MetricName) string {
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
writeprometheusMetricName(qb422016, mn)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/export.qtpl:95
|
||||
}
|
16
app/vmselect/prometheus/federate.qtpl
Normal file
16
app/vmselect/prometheus/federate.qtpl
Normal file
|
@ -0,0 +1,16 @@
|
|||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
// Federate writes rs in /federate format.
|
||||
// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
{% func Federate(rs *netstorage.Result) %}
|
||||
{% if len(rs.Timestamps) == 0 || len(rs.Values) == 0 %}{% return %}{% endif %}
|
||||
{%= prometheusMetricName(&rs.MetricName) %}{% space %}
|
||||
{%f= rs.Values[len(rs.Values)-1] %}{% space %}
|
||||
{%d= int(rs.Timestamps[len(rs.Timestamps)-1]) %}{% newline %}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
75
app/vmselect/prometheus/federate.qtpl.go
Normal file
75
app/vmselect/prometheus/federate.qtpl.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
// Code generated by qtc from "federate.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
// Federate writes rs in /federate format.// See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:9
|
||||
func StreamFederate(qw422016 *qt422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
if len(rs.Timestamps) == 0 || len(rs.Values) == 0 {
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
return
|
||||
//line app/vmselect/prometheus/federate.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
streamprometheusMetricName(qw422016, &rs.MetricName)
|
||||
//line app/vmselect/prometheus/federate.qtpl:11
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
qw422016.N().F(rs.Values[len(rs.Values)-1])
|
||||
//line app/vmselect/prometheus/federate.qtpl:12
|
||||
qw422016.N().S(` `)
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
qw422016.N().D(int(rs.Timestamps[len(rs.Timestamps)-1]))
|
||||
//line app/vmselect/prometheus/federate.qtpl:13
|
||||
qw422016.N().S(`
|
||||
`)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
func WriteFederate(qq422016 qtio422016.Writer, rs *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
StreamFederate(qw422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
func Federate(rs *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
WriteFederate(qb422016, rs)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/federate.qtpl:14
|
||||
}
|
15
app/vmselect/prometheus/label_values_response.qtpl
Normal file
15
app/vmselect/prometheus/label_values_response.qtpl
Normal file
|
@ -0,0 +1,15 @@
|
|||
{% stripspace %}
|
||||
LabelValuesResponse generates response for /api/v1/label/<labelName>/values .
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
{% func LabelValuesResponse(labelValues []string) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":[
|
||||
{% for i, labelValue := range labelValues %}
|
||||
{%q= labelValue %}
|
||||
{% if i+1 < len(labelValues) %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
67
app/vmselect/prometheus/label_values_response.qtpl.go
Normal file
67
app/vmselect/prometheus/label_values_response.qtpl.go
Normal file
|
@ -0,0 +1,67 @@
|
|||
// Code generated by qtc from "label_values_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
// LabelValuesResponse generates response for /api/v1/label/<labelName>/values .See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
func StreamLabelValuesResponse(qw422016 *qt422016.Writer, labelValues []string) {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:4
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:8
|
||||
for i, labelValue := range labelValues {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:9
|
||||
qw422016.N().Q(labelValue)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:10
|
||||
if i+1 < len(labelValues) {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:10
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:11
|
||||
}
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:11
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
func WriteLabelValuesResponse(qq422016 qtio422016.Writer, labelValues []string) {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
StreamLabelValuesResponse(qw422016, labelValues)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
func LabelValuesResponse(labelValues []string) string {
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
WriteLabelValuesResponse(qb422016, labelValues)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/label_values_response.qtpl:14
|
||||
}
|
15
app/vmselect/prometheus/labels_response.qtpl
Normal file
15
app/vmselect/prometheus/labels_response.qtpl
Normal file
|
@ -0,0 +1,15 @@
|
|||
{% stripspace %}
|
||||
LabelsResponse generates response for /api/v1/labels .
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||
{% func LabelsResponse(labels []string) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":[
|
||||
{% for i, label := range labels %}
|
||||
{%q= label %}
|
||||
{% if i+1 < len(labels) %},{% endif %}
|
||||
{% endfor %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
67
app/vmselect/prometheus/labels_response.qtpl.go
Normal file
67
app/vmselect/prometheus/labels_response.qtpl.go
Normal file
|
@ -0,0 +1,67 @@
|
|||
// Code generated by qtc from "labels_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
// LabelsResponse generates response for /api/v1/labels .See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
func StreamLabelsResponse(qw422016 *qt422016.Writer, labels []string) {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:4
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:8
|
||||
for i, label := range labels {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:9
|
||||
qw422016.N().Q(label)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:10
|
||||
if i+1 < len(labels) {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:10
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:11
|
||||
}
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:11
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
func WriteLabelsResponse(qq422016 qtio422016.Writer, labels []string) {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
StreamLabelsResponse(qw422016, labels)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
func LabelsResponse(labels []string) string {
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
WriteLabelsResponse(qb422016, labels)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/labels_response.qtpl:14
|
||||
}
|
550
app/vmselect/prometheus/prometheus.go
Normal file
550
app/vmselect/prometheus/prometheus.go
Normal file
|
@ -0,0 +1,550 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
var (
|
||||
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum time for search query execution")
|
||||
maxQueryLen = flag.Int("search.maxQueryLen", 16*1024, "The maximum search query length in bytes")
|
||||
)
|
||||
|
||||
// Default step used if not set.
|
||||
const defaultStep = 5 * 60 * 1000
|
||||
|
||||
// Latency for data processing pipeline, i.e. the time between data is ignested
|
||||
// into the system and the time it becomes visible to search.
|
||||
const latencyOffset = 60 * 1000
|
||||
|
||||
// FederateHandler implements /federate . See https://prometheus.io/docs/prometheus/latest/federation/
|
||||
func FederateHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
maxLookback := getDuration(r, "max_lookback", defaultStep)
|
||||
start := getTime(r, "start", ct-maxLookback)
|
||||
end := getTime(r, "end", ct)
|
||||
deadline := getDeadline(r)
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
}
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
WriteFederate(bb, rs)
|
||||
resultsCh <- bb
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
for bb := range resultsCh {
|
||||
w.Write(bb.B)
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
}
|
||||
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
}
|
||||
federateDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var federateDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/federate"}`)
|
||||
|
||||
// ExportHandler exports data in raw format from /api/v1/export.
|
||||
func ExportHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
if len(matches) == 0 {
|
||||
// Maintain backwards compatibility
|
||||
match := r.FormValue("match")
|
||||
matches = []string{match}
|
||||
}
|
||||
start := getTime(r, "start", 0)
|
||||
end := getTime(r, "end", ct)
|
||||
format := r.FormValue("format")
|
||||
deadline := getDeadline(r)
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
}
|
||||
if err := exportHandler(w, matches, start, end, format, deadline); err != nil {
|
||||
return err
|
||||
}
|
||||
exportDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
||||
|
||||
func exportHandler(w http.ResponseWriter, matches []string, start, end int64, format string, deadline netstorage.Deadline) error {
|
||||
writeResponseFunc := WriteExportStdResponse
|
||||
writeLineFunc := WriteExportJSONLine
|
||||
contentType := "application/json"
|
||||
if format == "prometheus" {
|
||||
contentType = "text/plain"
|
||||
writeLineFunc = WriteExportPrometheusLine
|
||||
} else if format == "promapi" {
|
||||
writeResponseFunc = WriteExportPromAPIResponse
|
||||
writeLineFunc = WriteExportPromAPILine
|
||||
}
|
||||
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer, runtime.GOMAXPROCS(-1))
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
writeLineFunc(bb, rs)
|
||||
resultsCh <- bb
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
writeResponseFunc(w, resultsCh)
|
||||
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteHandler processes /api/v1/admin/tsdb/delete_series prometheus API request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#delete-series
|
||||
func DeleteHandler(r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse request form values: %s", err)
|
||||
}
|
||||
if r.FormValue("start") != "" || r.FormValue("end") != "" {
|
||||
return fmt.Errorf("start and end aren't supported. Remove these args from the query in order to delete all the matching metrics")
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
deletedCount, err := netstorage.DeleteSeries(sq)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete time series matching %q: %s", matches, err)
|
||||
}
|
||||
if deletedCount > 0 {
|
||||
promql.ResetRollupResultCache()
|
||||
}
|
||||
deleteDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var deleteDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/admin/tsdb/delete_series"}`)
|
||||
|
||||
// LabelValuesHandler processes /api/v1/label/<labelName>/values request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#querying-label-values
|
||||
func LabelValuesHandler(labelName string, w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
labelValues, err := netstorage.GetLabelValues(labelName, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain label values for %q: %s`, labelName, err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteLabelValuesResponse(w, labelValues)
|
||||
labelValuesDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var labelValuesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/label/{}/values"}`)
|
||||
|
||||
// LabelsHandler processes /api/v1/labels request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#getting-label-names
|
||||
func LabelsHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
labels, err := netstorage.GetLabels(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels: %s", err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteLabelsResponse(w, labels)
|
||||
labelsDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var labelsDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/labels"}`)
|
||||
|
||||
// SeriesCountHandler processes /api/v1/series/count request.
|
||||
func SeriesCountHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
deadline := getDeadline(r)
|
||||
n, err := netstorage.GetSeriesCount(deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain series count: %s", err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteSeriesCountResponse(w, n)
|
||||
seriesCountDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var seriesCountDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/series/count"}`)
|
||||
|
||||
// SeriesHandler processes /api/v1/series request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||
func SeriesHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %s", err)
|
||||
}
|
||||
matches := r.Form["match[]"]
|
||||
start := getTime(r, "start", ct-defaultStep)
|
||||
end := getTime(r, "end", ct)
|
||||
deadline := getDeadline(r)
|
||||
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if start >= end {
|
||||
start = end - defaultStep
|
||||
}
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start,
|
||||
MaxTimestamp: end,
|
||||
TagFilterss: tagFilterss,
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %s", sq, err)
|
||||
}
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer)
|
||||
doneCh := make(chan error)
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
writemetricNameObject(bb, &rs.MetricName)
|
||||
resultsCh <- bb
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteSeriesResponse(w, resultsCh)
|
||||
|
||||
// Consume all the data from resultsCh in the event WriteSeriesResponse
|
||||
// fail to consume all the data.
|
||||
for bb := range resultsCh {
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
}
|
||||
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during data fetching: %s", err)
|
||||
}
|
||||
seriesDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var seriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/series"}`)
|
||||
|
||||
// QueryHandler processes /api/v1/query request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||
func QueryHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
|
||||
query := r.FormValue("query")
|
||||
start := getTime(r, "time", ct)
|
||||
step := getDuration(r, "step", latencyOffset)
|
||||
deadline := getDeadline(r)
|
||||
|
||||
if len(query) > *maxQueryLen {
|
||||
return fmt.Errorf(`too long query; got %d bytes; mustn't exceed %d bytes`, len(query), *maxQueryLen)
|
||||
}
|
||||
if ct-start < latencyOffset {
|
||||
start -= latencyOffset
|
||||
}
|
||||
if childQuery, windowStr, offsetStr := promql.IsMetricSelectorWithRollup(query); childQuery != "" {
|
||||
var window int64
|
||||
if len(windowStr) > 0 {
|
||||
var err error
|
||||
window, err = promql.DurationValue(windowStr, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var offset int64
|
||||
if len(offsetStr) > 0 {
|
||||
var err error
|
||||
offset, err = promql.DurationValue(offsetStr, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := exportHandler(w, []string{childQuery}, start, end, "promapi", deadline); err != nil {
|
||||
return err
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
ec := promql.EvalConfig{
|
||||
Start: start,
|
||||
End: start,
|
||||
Step: step,
|
||||
Deadline: deadline,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot execute %q: %s", query, err)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteQueryResponse(w, result)
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var queryDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/query"}`)
|
||||
|
||||
// QueryRangeHandler processes /api/v1/query_range request.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
func QueryRangeHandler(w http.ResponseWriter, r *http.Request) error {
|
||||
startTime := time.Now()
|
||||
ct := currentTime()
|
||||
|
||||
query := r.FormValue("query")
|
||||
start := getTime(r, "start", ct-defaultStep)
|
||||
end := getTime(r, "end", ct)
|
||||
step := getDuration(r, "step", defaultStep)
|
||||
deadline := getDeadline(r)
|
||||
mayCache := !getBool(r, "nocache")
|
||||
|
||||
// Validate input args.
|
||||
if len(query) > *maxQueryLen {
|
||||
return fmt.Errorf(`too long query; got %d bytes; mustn't exceed %d bytes`, len(query), *maxQueryLen)
|
||||
}
|
||||
if start > end {
|
||||
start = end
|
||||
}
|
||||
if err := promql.ValidateMaxPointsPerTimeseries(start, end, step); err != nil {
|
||||
return err
|
||||
}
|
||||
start, end = promql.AdjustStartEnd(start, end, step)
|
||||
|
||||
ec := promql.EvalConfig{
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Deadline: deadline,
|
||||
MayCache: mayCache,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot execute %q: %s", query, err)
|
||||
}
|
||||
if ct-end < latencyOffset {
|
||||
adjustLastPoints(result)
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
WriteQueryRangeResponse(w, result)
|
||||
queryRangeDuration.UpdateDuration(startTime)
|
||||
return nil
|
||||
}
|
||||
|
||||
var queryRangeDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/query_range"}`)
|
||||
|
||||
// adjustLastPoints substitutes the last point values with the previous
|
||||
// point values, since the last points may contain garbage.
|
||||
func adjustLastPoints(tss []netstorage.Result) {
|
||||
if len(tss) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Search for the last non-NaN value across all the timeseries.
|
||||
lastNonNaNIdx := -1
|
||||
for i := range tss {
|
||||
r := &tss[i]
|
||||
j := len(r.Values) - 1
|
||||
for j >= 0 && math.IsNaN(r.Values[j]) {
|
||||
j--
|
||||
}
|
||||
if j > lastNonNaNIdx {
|
||||
lastNonNaNIdx = j
|
||||
}
|
||||
}
|
||||
if lastNonNaNIdx == -1 {
|
||||
// All timeseries contain only NaNs.
|
||||
return
|
||||
}
|
||||
|
||||
// Substitute last three values starting from lastNonNaNIdx
|
||||
// with the previous values for each timeseries.
|
||||
for i := range tss {
|
||||
r := &tss[i]
|
||||
for j := 0; j < 3; j++ {
|
||||
idx := lastNonNaNIdx + j
|
||||
if idx <= 0 || idx >= len(r.Values) {
|
||||
continue
|
||||
}
|
||||
r.Values[idx] = r.Values[idx-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getTime(r *http.Request, argKey string, defaultValue int64) int64 {
|
||||
argValue := r.FormValue(argKey)
|
||||
if len(argValue) == 0 {
|
||||
return defaultValue
|
||||
}
|
||||
secs, err := strconv.ParseFloat(argValue, 64)
|
||||
if err != nil {
|
||||
// Try parsing string format
|
||||
t, err := time.Parse(time.RFC3339, argValue)
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
}
|
||||
secs = float64(t.UnixNano()) / 1e9
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs < minTimeMsecs || msecs > maxTimeMsecs {
|
||||
return defaultValue
|
||||
}
|
||||
return msecs
|
||||
}
|
||||
|
||||
const (
|
||||
// These values prevent from overflow when storing msec-precision time in int64.
|
||||
minTimeMsecs = int64(-1<<63) / 1e6
|
||||
maxTimeMsecs = int64(1<<63-1) / 1e6
|
||||
)
|
||||
|
||||
func getDuration(r *http.Request, argKey string, defaultValue int64) int64 {
|
||||
argValue := r.FormValue(argKey)
|
||||
if len(argValue) == 0 {
|
||||
return defaultValue
|
||||
}
|
||||
secs, err := strconv.ParseFloat(argValue, 64)
|
||||
if err != nil {
|
||||
// Try parsing string format
|
||||
d, err := time.ParseDuration(argValue)
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
}
|
||||
secs = d.Seconds()
|
||||
}
|
||||
msecs := int64(secs * 1e3)
|
||||
if msecs <= 0 || msecs > maxDurationMsecs {
|
||||
return defaultValue
|
||||
}
|
||||
return msecs
|
||||
}
|
||||
|
||||
const maxDurationMsecs = 100 * 365 * 24 * 3600 * 1000
|
||||
|
||||
func getDeadline(r *http.Request) netstorage.Deadline {
|
||||
d := getDuration(r, "timeout", 0)
|
||||
dMax := int64(maxQueryDuration.Seconds() * 1e3)
|
||||
if d <= 0 || d > dMax {
|
||||
d = dMax
|
||||
}
|
||||
timeout := time.Duration(d) * time.Millisecond
|
||||
return netstorage.NewDeadline(timeout)
|
||||
}
|
||||
|
||||
func getBool(r *http.Request, argKey string) bool {
|
||||
argValue := r.FormValue(argKey)
|
||||
switch strings.ToLower(argValue) {
|
||||
case "", "0", "f", "false", "no":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func currentTime() int64 {
|
||||
return int64(time.Now().UTC().Unix()) * 1e3
|
||||
}
|
||||
|
||||
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
||||
tagFilterss := make([][]storage.TagFilter, 0, len(matches))
|
||||
for _, match := range matches {
|
||||
tagFilters, err := promql.ParseMetricSelector(match)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q: %s", match, err)
|
||||
}
|
||||
tagFilterss = append(tagFilterss, tagFilters)
|
||||
}
|
||||
return tagFilterss, nil
|
||||
}
|
33
app/vmselect/prometheus/query_range_response.qtpl
Normal file
33
app/vmselect/prometheus/query_range_response.qtpl
Normal file
|
@ -0,0 +1,33 @@
|
|||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
QueryRangeResponse generates response for /api/v1/query_range.
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
{% func QueryRangeResponse(rs []netstorage.Result) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":{
|
||||
"resultType":"matrix",
|
||||
"result":[
|
||||
{% if len(rs) > 0 %}
|
||||
{%= queryRangeLine(&rs[0]) %}
|
||||
{% code rs = rs[1:] %}
|
||||
{% for i := range rs %}
|
||||
,{%= queryRangeLine(&rs[i]) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func queryRangeLine(r *netstorage.Result) %}
|
||||
{
|
||||
"metric": {%= metricNameObject(&r.MetricName) %},
|
||||
"values": {%= valuesWithTimestamps(r.Values, r.Timestamps) %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
118
app/vmselect/prometheus/query_range_response.qtpl.go
Normal file
118
app/vmselect/prometheus/query_range_response.qtpl.go
Normal file
|
@ -0,0 +1,118 @@
|
|||
// Code generated by qtc from "query_range_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
// QueryRangeResponse generates response for /api/v1/query_range.See https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
||||
func StreamQueryRangeResponse(qw422016 *qt422016.Writer, rs []netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:8
|
||||
qw422016.N().S(`{"status":"success","data":{"resultType":"matrix","result":[`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:14
|
||||
if len(rs) > 0 {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:15
|
||||
streamqueryRangeLine(qw422016, &rs[0])
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:16
|
||||
rs = rs[1:]
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:17
|
||||
for i := range rs {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:17
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:18
|
||||
streamqueryRangeLine(qw422016, &rs[i])
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:19
|
||||
}
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:20
|
||||
}
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:20
|
||||
qw422016.N().S(`]}}`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
func WriteQueryRangeResponse(qq422016 qtio422016.Writer, rs []netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
StreamQueryRangeResponse(qw422016, rs)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
func QueryRangeResponse(rs []netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
WriteQueryRangeResponse(qb422016, rs)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:24
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:26
|
||||
func streamqueryRangeLine(qw422016 *qt422016.Writer, r *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:26
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:28
|
||||
streammetricNameObject(qw422016, &r.MetricName)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:28
|
||||
qw422016.N().S(`,"values":`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:29
|
||||
streamvaluesWithTimestamps(qw422016, r.Values, r.Timestamps)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:29
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
func writequeryRangeLine(qq422016 qtio422016.Writer, r *netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
streamqueryRangeLine(qw422016, r)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
func queryRangeLine(r *netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
writequeryRangeLine(qb422016, r)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/query_range_response.qtpl:31
|
||||
}
|
32
app/vmselect/prometheus/query_response.qtpl
Normal file
32
app/vmselect/prometheus/query_response.qtpl
Normal file
|
@ -0,0 +1,32 @@
|
|||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
QueryResponse generates response for /api/v1/query.
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||
{% func QueryResponse(rs []netstorage.Result) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":{
|
||||
"resultType":"vector",
|
||||
"result":[
|
||||
{% if len(rs) > 0 %}
|
||||
{
|
||||
"metric": {%= metricNameObject(&rs[0].MetricName) %},
|
||||
"value": {%= metricRow(rs[0].Timestamps[0], rs[0].Values[0]) %}
|
||||
}
|
||||
{% code rs = rs[1:] %}
|
||||
{% for i := range rs %}
|
||||
{% code r := &rs[i] %}
|
||||
,{
|
||||
"metric": {%= metricNameObject(&r.MetricName) %},
|
||||
"value": {%= metricRow(r.Timestamps[0], r.Values[0]) %}
|
||||
}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
94
app/vmselect/prometheus/query_response.qtpl.go
Normal file
94
app/vmselect/prometheus/query_response.qtpl.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
// Code generated by qtc from "query_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
)
|
||||
|
||||
// QueryResponse generates response for /api/v1/query.See https://prometheus.io/docs/prometheus/latest/querying/api/#instant-queries
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
||||
func StreamQueryResponse(qw422016 *qt422016.Writer, rs []netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:8
|
||||
qw422016.N().S(`{"status":"success","data":{"resultType":"vector","result":[`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:14
|
||||
if len(rs) > 0 {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:14
|
||||
qw422016.N().S(`{"metric":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:16
|
||||
streammetricNameObject(qw422016, &rs[0].MetricName)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:16
|
||||
qw422016.N().S(`,"value":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:17
|
||||
streammetricRow(qw422016, rs[0].Timestamps[0], rs[0].Values[0])
|
||||
//line app/vmselect/prometheus/query_response.qtpl:17
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:19
|
||||
rs = rs[1:]
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:20
|
||||
for i := range rs {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:21
|
||||
r := &rs[i]
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:21
|
||||
qw422016.N().S(`,{"metric":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:23
|
||||
streammetricNameObject(qw422016, &r.MetricName)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:23
|
||||
qw422016.N().S(`,"value":`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:24
|
||||
streammetricRow(qw422016, r.Timestamps[0], r.Values[0])
|
||||
//line app/vmselect/prometheus/query_response.qtpl:24
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:26
|
||||
}
|
||||
//line app/vmselect/prometheus/query_response.qtpl:27
|
||||
}
|
||||
//line app/vmselect/prometheus/query_response.qtpl:27
|
||||
qw422016.N().S(`]}}`)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
func WriteQueryResponse(qq422016 qtio422016.Writer, rs []netstorage.Result) {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
StreamQueryResponse(qw422016, rs)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
func QueryResponse(rs []netstorage.Result) string {
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
WriteQueryResponse(qb422016, rs)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/query_response.qtpl:31
|
||||
}
|
9
app/vmselect/prometheus/series_count_response.qtpl
Normal file
9
app/vmselect/prometheus/series_count_response.qtpl
Normal file
|
@ -0,0 +1,9 @@
|
|||
{% stripspace %}
|
||||
SeriesCountResponse generates response for /api/v1/series/count .
|
||||
{% func SeriesCountResponse(n uint64) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":[{%d int(n) %}]
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
57
app/vmselect/prometheus/series_count_response.qtpl.go
Normal file
57
app/vmselect/prometheus/series_count_response.qtpl.go
Normal file
|
@ -0,0 +1,57 @@
|
|||
// Code generated by qtc from "series_count_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
// SeriesCountResponse generates response for /api/v1/series/count .
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
func StreamSeriesCountResponse(qw422016 *qt422016.Writer, n uint64) {
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:3
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:6
|
||||
qw422016.N().D(int(n))
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:6
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
func WriteSeriesCountResponse(qq422016 qtio422016.Writer, n uint64) {
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
StreamSeriesCountResponse(qw422016, n)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
func SeriesCountResponse(n uint64) string {
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
WriteSeriesCountResponse(qb422016, n)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/series_count_response.qtpl:8
|
||||
}
|
24
app/vmselect/prometheus/series_response.qtpl
Normal file
24
app/vmselect/prometheus/series_response.qtpl
Normal file
|
@ -0,0 +1,24 @@
|
|||
{% import (
|
||||
"github.com/valyala/quicktemplate"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
SeriesResponse generates response for /api/v1/series.
|
||||
See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||
{% func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer) %}
|
||||
{
|
||||
"status":"success",
|
||||
"data":[
|
||||
{% code bb, ok := <-resultsCh %}
|
||||
{% if ok %}
|
||||
{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% for bb := range resultsCh %}
|
||||
,{%z= bb.B %}
|
||||
{% code quicktemplate.ReleaseByteBuffer(bb) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
}
|
||||
{% endfunc %}
|
||||
{% endstripspace %}
|
83
app/vmselect/prometheus/series_response.qtpl.go
Normal file
83
app/vmselect/prometheus/series_response.qtpl.go
Normal file
|
@ -0,0 +1,83 @@
|
|||
// Code generated by qtc from "series_response.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:1
|
||||
import (
|
||||
"github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
// SeriesResponse generates response for /api/v1/series.See https://prometheus.io/docs/prometheus/latest/querying/api/#finding-series-by-label-matchers
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
||||
func StreamSeriesResponse(qw422016 *qt422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:8
|
||||
qw422016.N().S(`{"status":"success","data":[`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:12
|
||||
bb, ok := <-resultsCh
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:13
|
||||
if ok {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:14
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:15
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:16
|
||||
for bb := range resultsCh {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:16
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:17
|
||||
qw422016.N().Z(bb.B)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:18
|
||||
quicktemplate.ReleaseByteBuffer(bb)
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:19
|
||||
}
|
||||
//line app/vmselect/prometheus/series_response.qtpl:20
|
||||
}
|
||||
//line app/vmselect/prometheus/series_response.qtpl:20
|
||||
qw422016.N().S(`]}`)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
func WriteSeriesResponse(qq422016 qtio422016.Writer, resultsCh <-chan *quicktemplate.ByteBuffer) {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
StreamSeriesResponse(qw422016, resultsCh)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
func SeriesResponse(resultsCh <-chan *quicktemplate.ByteBuffer) string {
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
WriteSeriesResponse(qb422016, resultsCh)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/series_response.qtpl:23
|
||||
}
|
47
app/vmselect/prometheus/util.qtpl
Normal file
47
app/vmselect/prometheus/util.qtpl
Normal file
|
@ -0,0 +1,47 @@
|
|||
{% import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
) %}
|
||||
|
||||
{% stripspace %}
|
||||
|
||||
{% func metricNameObject(mn *storage.MetricName) %}
|
||||
{
|
||||
{% if len(mn.MetricGroup) > 0 %}
|
||||
"__name__":{%qz= mn.MetricGroup %}{% if len(mn.Tags) > 0 %},{% endif %}
|
||||
{% endif %}
|
||||
{% for j := range mn.Tags %}
|
||||
{% code tag := &mn.Tags[j] %}
|
||||
{%qz= tag.Key %}:{%qz= tag.Value %}{% if j+1 < len(mn.Tags) %},{% endif %}
|
||||
{% endfor %}
|
||||
}
|
||||
{% endfunc %}
|
||||
|
||||
{% func metricRow(timestamp int64, value float64) %}
|
||||
[{%f= float64(timestamp)/1e3 %},"{%f= value %}"]
|
||||
{% endfunc %}
|
||||
|
||||
{% func valuesWithTimestamps(values []float64, timestamps []int64) %}
|
||||
[
|
||||
{% if len(values) == 0 %}
|
||||
{% return %}
|
||||
{% endif %}
|
||||
{% code /* inline metricRow call here for the sake of performance optimization */ %}
|
||||
[{%f= float64(timestamps[0])/1e3 %},"{%f= values[0] %}"]
|
||||
{% code
|
||||
timestamps = timestamps[1:]
|
||||
values = values[1:]
|
||||
%}
|
||||
{% if len(values) > 0 %}
|
||||
{%code
|
||||
// Remove bounds check inside the loop below
|
||||
_ = timestamps[len(values)-1]
|
||||
%}
|
||||
{% for i, v := range values %}
|
||||
{% code /* inline metricRow call here for the sake of performance optimization */ %}
|
||||
,[{%f= float64(timestamps[i])/1e3 %},"{%f= v %}"]
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
]
|
||||
{% endfunc %}
|
||||
|
||||
{% endstripspace %}
|
215
app/vmselect/prometheus/util.qtpl.go
Normal file
215
app/vmselect/prometheus/util.qtpl.go
Normal file
|
@ -0,0 +1,215 @@
|
|||
// Code generated by qtc from "util.qtpl". DO NOT EDIT.
|
||||
// See https://github.com/valyala/quicktemplate for details.
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:1
|
||||
package prometheus
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:1
|
||||
import (
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:7
|
||||
import (
|
||||
qtio422016 "io"
|
||||
|
||||
qt422016 "github.com/valyala/quicktemplate"
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:7
|
||||
var (
|
||||
_ = qtio422016.Copy
|
||||
_ = qt422016.AcquireByteBuffer
|
||||
)
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:7
|
||||
func streammetricNameObject(qw422016 *qt422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/util.qtpl:7
|
||||
qw422016.N().S(`{`)
|
||||
//line app/vmselect/prometheus/util.qtpl:9
|
||||
if len(mn.MetricGroup) > 0 {
|
||||
//line app/vmselect/prometheus/util.qtpl:9
|
||||
qw422016.N().S(`"__name__":`)
|
||||
//line app/vmselect/prometheus/util.qtpl:10
|
||||
qw422016.N().QZ(mn.MetricGroup)
|
||||
//line app/vmselect/prometheus/util.qtpl:10
|
||||
if len(mn.Tags) > 0 {
|
||||
//line app/vmselect/prometheus/util.qtpl:10
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/util.qtpl:10
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:11
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:12
|
||||
for j := range mn.Tags {
|
||||
//line app/vmselect/prometheus/util.qtpl:13
|
||||
tag := &mn.Tags[j]
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
qw422016.N().QZ(tag.Key)
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
qw422016.N().S(`:`)
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
qw422016.N().QZ(tag.Value)
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
if j+1 < len(mn.Tags) {
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
qw422016.N().S(`,`)
|
||||
//line app/vmselect/prometheus/util.qtpl:14
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:15
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:15
|
||||
qw422016.N().S(`}`)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
func writemetricNameObject(qq422016 qtio422016.Writer, mn *storage.MetricName) {
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
streammetricNameObject(qw422016, mn)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
func metricNameObject(mn *storage.MetricName) string {
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
writemetricNameObject(qb422016, mn)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/util.qtpl:17
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:19
|
||||
func streammetricRow(qw422016 *qt422016.Writer, timestamp int64, value float64) {
|
||||
//line app/vmselect/prometheus/util.qtpl:19
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/prometheus/util.qtpl:20
|
||||
qw422016.N().F(float64(timestamp) / 1e3)
|
||||
//line app/vmselect/prometheus/util.qtpl:20
|
||||
qw422016.N().S(`,"`)
|
||||
//line app/vmselect/prometheus/util.qtpl:20
|
||||
qw422016.N().F(value)
|
||||
//line app/vmselect/prometheus/util.qtpl:20
|
||||
qw422016.N().S(`"]`)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
func writemetricRow(qq422016 qtio422016.Writer, timestamp int64, value float64) {
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
streammetricRow(qw422016, timestamp, value)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
func metricRow(timestamp int64, value float64) string {
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
writemetricRow(qb422016, timestamp, value)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/util.qtpl:21
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:23
|
||||
func streamvaluesWithTimestamps(qw422016 *qt422016.Writer, values []float64, timestamps []int64) {
|
||||
//line app/vmselect/prometheus/util.qtpl:23
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/prometheus/util.qtpl:25
|
||||
if len(values) == 0 {
|
||||
//line app/vmselect/prometheus/util.qtpl:26
|
||||
return
|
||||
//line app/vmselect/prometheus/util.qtpl:27
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:28
|
||||
/* inline metricRow call here for the sake of performance optimization */
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:28
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/prometheus/util.qtpl:29
|
||||
qw422016.N().F(float64(timestamps[0]) / 1e3)
|
||||
//line app/vmselect/prometheus/util.qtpl:29
|
||||
qw422016.N().S(`,"`)
|
||||
//line app/vmselect/prometheus/util.qtpl:29
|
||||
qw422016.N().F(values[0])
|
||||
//line app/vmselect/prometheus/util.qtpl:29
|
||||
qw422016.N().S(`"]`)
|
||||
//line app/vmselect/prometheus/util.qtpl:31
|
||||
timestamps = timestamps[1:]
|
||||
values = values[1:]
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:34
|
||||
if len(values) > 0 {
|
||||
//line app/vmselect/prometheus/util.qtpl:36
|
||||
// Remove bounds check inside the loop below
|
||||
_ = timestamps[len(values)-1]
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:39
|
||||
for i, v := range values {
|
||||
//line app/vmselect/prometheus/util.qtpl:40
|
||||
/* inline metricRow call here for the sake of performance optimization */
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:40
|
||||
qw422016.N().S(`,[`)
|
||||
//line app/vmselect/prometheus/util.qtpl:41
|
||||
qw422016.N().F(float64(timestamps[i]) / 1e3)
|
||||
//line app/vmselect/prometheus/util.qtpl:41
|
||||
qw422016.N().S(`,"`)
|
||||
//line app/vmselect/prometheus/util.qtpl:41
|
||||
qw422016.N().F(v)
|
||||
//line app/vmselect/prometheus/util.qtpl:41
|
||||
qw422016.N().S(`"]`)
|
||||
//line app/vmselect/prometheus/util.qtpl:42
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:43
|
||||
}
|
||||
//line app/vmselect/prometheus/util.qtpl:43
|
||||
qw422016.N().S(`]`)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
func writevaluesWithTimestamps(qq422016 qtio422016.Writer, values []float64, timestamps []int64) {
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
streamvaluesWithTimestamps(qw422016, values, timestamps)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
}
|
||||
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
func valuesWithTimestamps(values []float64, timestamps []int64) string {
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
writevaluesWithTimestamps(qb422016, values, timestamps)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
return qs422016
|
||||
//line app/vmselect/prometheus/util.qtpl:45
|
||||
}
|
469
app/vmselect/promql/aggr.go
Normal file
469
app/vmselect/promql/aggr.go
Normal file
|
@ -0,0 +1,469 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var aggrFuncs = map[string]aggrFunc{
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#aggregation-operators
|
||||
"sum": newAggrFunc(aggrFuncSum),
|
||||
"min": newAggrFunc(aggrFuncMin),
|
||||
"max": newAggrFunc(aggrFuncMax),
|
||||
"avg": newAggrFunc(aggrFuncAvg),
|
||||
"stddev": newAggrFunc(aggrFuncStddev),
|
||||
"stdvar": newAggrFunc(aggrFuncStdvar),
|
||||
"count": newAggrFunc(aggrFuncCount),
|
||||
"count_values": aggrFuncCountValues,
|
||||
"bottomk": newAggrFuncTopK(true),
|
||||
"topk": newAggrFuncTopK(false),
|
||||
"quantile": aggrFuncQuantile,
|
||||
|
||||
// Extended PromQL funcs
|
||||
"median": aggrFuncMedian,
|
||||
"limitk": aggrFuncLimitK,
|
||||
"distinct": newAggrFunc(aggrFuncDistinct),
|
||||
}
|
||||
|
||||
type aggrFunc func(afa *aggrFuncArg) ([]*timeseries, error)
|
||||
|
||||
type aggrFuncArg struct {
|
||||
args [][]*timeseries
|
||||
ae *aggrFuncExpr
|
||||
ec *EvalConfig
|
||||
}
|
||||
|
||||
func getAggrFunc(s string) aggrFunc {
|
||||
s = strings.ToLower(s)
|
||||
return aggrFuncs[s]
|
||||
}
|
||||
|
||||
func isAggrFunc(s string) bool {
|
||||
return getAggrFunc(s) != nil
|
||||
}
|
||||
|
||||
func isAggrFuncModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "by", "without":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func newAggrFunc(afe func(tss []*timeseries) []*timeseries) aggrFunc {
|
||||
return func(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aggrFuncExt(afe, args[0], &afa.ae.Modifier, false)
|
||||
}
|
||||
}
|
||||
|
||||
func aggrFuncExt(afe func(tss []*timeseries) []*timeseries, argOrig []*timeseries, modifier *modifierExpr, keepOriginal bool) ([]*timeseries, error) {
|
||||
arg := copyTimeseriesMetricNames(argOrig)
|
||||
|
||||
// Filter out superflouos tags.
|
||||
var groupTags []string
|
||||
groupOp := "by"
|
||||
if modifier.Op != "" {
|
||||
groupTags = modifier.Args
|
||||
groupOp = strings.ToLower(modifier.Op)
|
||||
}
|
||||
switch groupOp {
|
||||
case "by":
|
||||
for _, ts := range arg {
|
||||
ts.MetricName.RemoveTagsOn(groupTags)
|
||||
}
|
||||
case "without":
|
||||
for _, ts := range arg {
|
||||
ts.MetricName.RemoveTagsIgnoring(groupTags)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf(`unknown modifier: %q`, groupOp)
|
||||
}
|
||||
|
||||
// Perform grouping.
|
||||
m := make(map[string][]*timeseries)
|
||||
bb := bbPool.Get()
|
||||
for i, ts := range arg {
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||
if keepOriginal {
|
||||
ts = argOrig[i]
|
||||
}
|
||||
m[string(bb.B)] = append(m[string(bb.B)], ts)
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
|
||||
rvs := make([]*timeseries, 0, len(m))
|
||||
for _, tss := range m {
|
||||
rv := afe(tss)
|
||||
rvs = append(rvs, rv...)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func aggrFuncSum(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to sum.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
sum := float64(0)
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
if math.IsNaN(ts.Values[i]) {
|
||||
continue
|
||||
}
|
||||
sum += ts.Values[i]
|
||||
count++
|
||||
}
|
||||
if count == 0 {
|
||||
sum = nan
|
||||
}
|
||||
dst.Values[i] = sum
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncMin(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to min.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
min := dst.Values[i]
|
||||
for _, ts := range tss {
|
||||
if math.IsNaN(min) || ts.Values[i] < min {
|
||||
min = ts.Values[i]
|
||||
}
|
||||
}
|
||||
dst.Values[i] = min
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncMax(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to max.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
max := dst.Values[i]
|
||||
for _, ts := range tss {
|
||||
if math.IsNaN(max) || ts.Values[i] > max {
|
||||
max = ts.Values[i]
|
||||
}
|
||||
}
|
||||
dst.Values[i] = max
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncAvg(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - nothing to avg.
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
// Do not use `Rapid calculation methods` at https://en.wikipedia.org/wiki/Standard_deviation,
|
||||
// since it is slower and has no obvious benefits in increased precision.
|
||||
var sum float64
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
sum += v
|
||||
}
|
||||
avg := nan
|
||||
if count > 0 {
|
||||
avg = sum / float64(count)
|
||||
}
|
||||
dst.Values[i] = avg
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncStddev(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - stddev over a single time series is zero
|
||||
values := tss[0].Values
|
||||
for i, v := range values {
|
||||
if !math.IsNaN(v) {
|
||||
values[i] = 0
|
||||
}
|
||||
}
|
||||
return tss
|
||||
}
|
||||
rvs := aggrFuncStdvar(tss)
|
||||
dst := rvs[0]
|
||||
for i, v := range dst.Values {
|
||||
dst.Values[i] = math.Sqrt(v)
|
||||
}
|
||||
return rvs
|
||||
}
|
||||
|
||||
func aggrFuncStdvar(tss []*timeseries) []*timeseries {
|
||||
if len(tss) == 1 {
|
||||
// Fast path - stdvar over a single time series is zero
|
||||
values := tss[0].Values
|
||||
for i, v := range values {
|
||||
if !math.IsNaN(v) {
|
||||
values[i] = 0
|
||||
}
|
||||
}
|
||||
return tss
|
||||
}
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
// See `Rapid calculation methods` at https://en.wikipedia.org/wiki/Standard_deviation
|
||||
var avg float64
|
||||
var count float64
|
||||
var q float64
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
avgNew := avg + (v-avg)/count
|
||||
q += (v - avg) * (v - avgNew)
|
||||
avg = avgNew
|
||||
}
|
||||
if count == 0 {
|
||||
q = nan
|
||||
}
|
||||
dst.Values[i] = q / count
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncCount(tss []*timeseries) []*timeseries {
|
||||
dst := tss[0]
|
||||
for i := range dst.Values {
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
if math.IsNaN(ts.Values[i]) {
|
||||
continue
|
||||
}
|
||||
count++
|
||||
}
|
||||
dst.Values[i] = float64(count)
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncDistinct(tss []*timeseries) []*timeseries {
|
||||
dst := tss[0]
|
||||
m := make(map[float64]struct{}, len(tss))
|
||||
for i := range dst.Values {
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
m[v] = struct{}{}
|
||||
}
|
||||
n := float64(len(m))
|
||||
if n == 0 {
|
||||
n = nan
|
||||
}
|
||||
dst.Values[i] = n
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncCountValues(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dstLabel, err := getString(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
afe := func(tss []*timeseries) []*timeseries {
|
||||
m := make(map[float64]bool)
|
||||
for _, ts := range tss {
|
||||
for _, v := range ts.Values {
|
||||
m[v] = true
|
||||
}
|
||||
}
|
||||
values := make([]float64, 0, len(m))
|
||||
for v := range m {
|
||||
values = append(values, v)
|
||||
}
|
||||
sort.Float64s(values)
|
||||
|
||||
var rvs []*timeseries
|
||||
for _, v := range values {
|
||||
var dst timeseries
|
||||
dst.CopyFrom(tss[0])
|
||||
dst.MetricName.RemoveTag(dstLabel)
|
||||
dst.MetricName.AddTag(dstLabel, strconv.FormatFloat(v, 'g', -1, 64))
|
||||
for i := range dst.Values {
|
||||
count := 0
|
||||
for _, ts := range tss {
|
||||
if ts.Values[i] == v {
|
||||
count++
|
||||
}
|
||||
}
|
||||
n := float64(count)
|
||||
if n == 0 {
|
||||
n = nan
|
||||
}
|
||||
dst.Values[i] = n
|
||||
}
|
||||
rvs = append(rvs, &dst)
|
||||
}
|
||||
return rvs
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, false)
|
||||
}
|
||||
|
||||
func newAggrFuncTopK(isReverse bool) aggrFunc {
|
||||
return func(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ks, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
afe := func(tss []*timeseries) []*timeseries {
|
||||
rvs := tss
|
||||
for n := range rvs[0].Values {
|
||||
sort.Slice(rvs, func(i, j int) bool {
|
||||
a := rvs[i].Values[n]
|
||||
b := rvs[j].Values[n]
|
||||
cmp := lessWithNaNs(a, b)
|
||||
if isReverse {
|
||||
cmp = !cmp
|
||||
}
|
||||
return cmp
|
||||
})
|
||||
if math.IsNaN(ks[n]) {
|
||||
ks[n] = 0
|
||||
}
|
||||
k := int(ks[n])
|
||||
if k < 0 {
|
||||
k = 0
|
||||
}
|
||||
if k > len(rvs) {
|
||||
k = len(rvs)
|
||||
}
|
||||
for _, ts := range rvs[:len(rvs)-k] {
|
||||
ts.Values[n] = nan
|
||||
}
|
||||
}
|
||||
return rvs
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, true)
|
||||
}
|
||||
}
|
||||
|
||||
func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ks, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxK := 0
|
||||
for _, kf := range ks {
|
||||
k := int(kf)
|
||||
if k > maxK {
|
||||
maxK = k
|
||||
}
|
||||
}
|
||||
afe := func(tss []*timeseries) []*timeseries {
|
||||
if len(tss) > maxK {
|
||||
tss = tss[:maxK]
|
||||
}
|
||||
for i, kf := range ks {
|
||||
k := int(kf)
|
||||
if k < 0 {
|
||||
k = 0
|
||||
}
|
||||
for j := k; j < len(tss); j++ {
|
||||
tss[j].Values[i] = nan
|
||||
}
|
||||
}
|
||||
return tss
|
||||
}
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, true)
|
||||
}
|
||||
|
||||
func aggrFuncQuantile(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
phis, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
afe := newAggrQuantileFunc(phis)
|
||||
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, false)
|
||||
}
|
||||
|
||||
func aggrFuncMedian(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
args := afa.args
|
||||
if err := expectTransformArgsNum(args, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
phis := evalNumber(afa.ec, 0.5)[0].Values
|
||||
afe := newAggrQuantileFunc(phis)
|
||||
return aggrFuncExt(afe, args[0], &afa.ae.Modifier, false)
|
||||
}
|
||||
|
||||
func newAggrQuantileFunc(phis []float64) func(tss []*timeseries) []*timeseries {
|
||||
return func(tss []*timeseries) []*timeseries {
|
||||
dst := tss[0]
|
||||
for n := range dst.Values {
|
||||
sort.Slice(tss, func(i, j int) bool {
|
||||
a := tss[i].Values[n]
|
||||
b := tss[j].Values[n]
|
||||
return lessWithNaNs(a, b)
|
||||
})
|
||||
phi := phis[n]
|
||||
if math.IsNaN(phi) {
|
||||
phi = 1
|
||||
}
|
||||
if phi < 0 {
|
||||
phi = 0
|
||||
}
|
||||
if phi > 1 {
|
||||
phi = 1
|
||||
}
|
||||
idx := int(math.Round(float64(len(tss)-1) * phi))
|
||||
dst.Values[n] = tss[idx].Values[n]
|
||||
}
|
||||
return tss[:1]
|
||||
}
|
||||
}
|
||||
|
||||
func lessWithNaNs(a, b float64) bool {
|
||||
if math.IsNaN(a) {
|
||||
return !math.IsNaN(b)
|
||||
}
|
||||
return a < b
|
||||
}
|
30
app/vmselect/promql/aggr_test.go
Normal file
30
app/vmselect/promql/aggr_test.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsAggrFuncModifierSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isAggrFuncModifier(s) {
|
||||
t.Fatalf("expecting valid funcModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("by")
|
||||
f("BY")
|
||||
f("without")
|
||||
f("Without")
|
||||
}
|
||||
|
||||
func TestIsAggrFuncModifierError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isAggrFuncModifier(s) {
|
||||
t.Fatalf("unexpected valid funcModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("byfix")
|
||||
f("on")
|
||||
f("ignoring")
|
||||
}
|
3
app/vmselect/promql/arch_amd64.go
Normal file
3
app/vmselect/promql/arch_amd64.go
Normal file
|
@ -0,0 +1,3 @@
|
|||
package promql
|
||||
|
||||
const maxByteSliceLen = 1 << 40
|
3
app/vmselect/promql/arch_arm.go
Normal file
3
app/vmselect/promql/arch_arm.go
Normal file
|
@ -0,0 +1,3 @@
|
|||
package promql
|
||||
|
||||
const maxByteSliceLen = 1<<31 - 1
|
499
app/vmselect/promql/binary_op.go
Normal file
499
app/vmselect/promql/binary_op.go
Normal file
|
@ -0,0 +1,499 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
var binaryOpFuncs = map[string]binaryOpFunc{
|
||||
"+": newBinaryOpArithFunc(binaryOpPlus),
|
||||
"-": newBinaryOpArithFunc(binaryOpMinus),
|
||||
"*": newBinaryOpArithFunc(binaryOpMul),
|
||||
"/": newBinaryOpArithFunc(binaryOpDiv),
|
||||
"%": newBinaryOpArithFunc(binaryOpMod),
|
||||
"^": newBinaryOpArithFunc(binaryOpPow),
|
||||
|
||||
// cmp ops
|
||||
"==": newBinaryOpCmpFunc(binaryOpEq),
|
||||
"!=": newBinaryOpCmpFunc(binaryOpNeq),
|
||||
">": newBinaryOpCmpFunc(binaryOpGt),
|
||||
"<": newBinaryOpCmpFunc(binaryOpLt),
|
||||
">=": newBinaryOpCmpFunc(binaryOpGte),
|
||||
"<=": newBinaryOpCmpFunc(binaryOpLte),
|
||||
|
||||
// logical set ops
|
||||
"and": binaryOpAnd,
|
||||
"or": binaryOpOr,
|
||||
"unless": binaryOpUnless,
|
||||
|
||||
// New op
|
||||
"if": newBinaryOpArithFunc(binaryOpIf),
|
||||
"ifnot": newBinaryOpArithFunc(binaryOpIfnot),
|
||||
"default": newBinaryOpArithFunc(binaryOpDefault),
|
||||
}
|
||||
|
||||
var binaryOpPriorities = map[string]int{
|
||||
"default": -1,
|
||||
|
||||
"if": 0,
|
||||
"ifnot": 0,
|
||||
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operator-precedence
|
||||
"or": 1,
|
||||
|
||||
"and": 2,
|
||||
"unless": 2,
|
||||
|
||||
"==": 3,
|
||||
"!=": 3,
|
||||
"<": 3,
|
||||
">": 3,
|
||||
"<=": 3,
|
||||
">=": 3,
|
||||
|
||||
"+": 4,
|
||||
"-": 4,
|
||||
|
||||
"*": 5,
|
||||
"/": 5,
|
||||
"%": 5,
|
||||
|
||||
"^": 6,
|
||||
}
|
||||
|
||||
func getBinaryOpFunc(op string) binaryOpFunc {
|
||||
op = strings.ToLower(op)
|
||||
return binaryOpFuncs[op]
|
||||
}
|
||||
|
||||
func isBinaryOp(op string) bool {
|
||||
return getBinaryOpFunc(op) != nil
|
||||
}
|
||||
|
||||
func binaryOpPriority(op string) int {
|
||||
op = strings.ToLower(op)
|
||||
return binaryOpPriorities[op]
|
||||
}
|
||||
|
||||
func scanBinaryOpPrefix(s string) int {
|
||||
n := 0
|
||||
for op := range binaryOpFuncs {
|
||||
if len(s) < len(op) {
|
||||
continue
|
||||
}
|
||||
ss := strings.ToLower(s[:len(op)])
|
||||
if ss == op && len(op) > n {
|
||||
n = len(op)
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func isRightAssociativeBinaryOp(op string) bool {
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#binary-operator-precedence
|
||||
return op == "^"
|
||||
}
|
||||
|
||||
func isBinaryOpGroupModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/operators/#vector-matching
|
||||
case "on", "ignoring":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isBinaryOpJoinModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "group_left", "group_right":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isBinaryOpBoolModifier(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
return s == "bool"
|
||||
}
|
||||
|
||||
func isBinaryOpCmp(op string) bool {
|
||||
switch op {
|
||||
case "==", "!=", ">", "<", ">=", "<=":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isBinaryOpLogicalSet(op string) bool {
|
||||
op = strings.ToLower(op)
|
||||
switch op {
|
||||
case "and", "or", "unless":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func binaryOpConstants(op string, left, right float64, isBool bool) float64 {
|
||||
if isBinaryOpCmp(op) {
|
||||
evalCmp := func(cf func(left, right float64) bool) float64 {
|
||||
if isBool {
|
||||
if cf(left, right) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if cf(left, right) {
|
||||
return left
|
||||
}
|
||||
return nan
|
||||
}
|
||||
switch op {
|
||||
case "==":
|
||||
left = evalCmp(binaryOpEq)
|
||||
case "!=":
|
||||
left = evalCmp(binaryOpNeq)
|
||||
case ">":
|
||||
left = evalCmp(binaryOpGt)
|
||||
case "<":
|
||||
left = evalCmp(binaryOpLt)
|
||||
case ">=":
|
||||
left = evalCmp(binaryOpGte)
|
||||
case "<=":
|
||||
left = evalCmp(binaryOpLte)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected comparison binaryOp: %q", op)
|
||||
}
|
||||
} else {
|
||||
switch op {
|
||||
case "+":
|
||||
left = binaryOpPlus(left, right)
|
||||
case "-":
|
||||
left = binaryOpMinus(left, right)
|
||||
case "*":
|
||||
left = binaryOpMul(left, right)
|
||||
case "/":
|
||||
left = binaryOpDiv(left, right)
|
||||
case "%":
|
||||
left = binaryOpMod(left, right)
|
||||
case "^":
|
||||
left = binaryOpPow(left, right)
|
||||
case "and":
|
||||
// Nothing to do
|
||||
case "or":
|
||||
// Nothing to do
|
||||
case "unless":
|
||||
left = nan
|
||||
case "default":
|
||||
left = binaryOpDefault(left, right)
|
||||
case "if":
|
||||
left = binaryOpIf(left, right)
|
||||
case "ifnot":
|
||||
left = binaryOpIfnot(left, right)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected non-comparison binaryOp: %q", op)
|
||||
}
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
type binaryOpFuncArg struct {
|
||||
be *binaryOpExpr
|
||||
left []*timeseries
|
||||
right []*timeseries
|
||||
}
|
||||
|
||||
type binaryOpFunc func(bfa *binaryOpFuncArg) ([]*timeseries, error)
|
||||
|
||||
func newBinaryOpCmpFunc(cf func(left, right float64) bool) binaryOpFunc {
|
||||
cfe := func(left, right float64, isBool bool) float64 {
|
||||
if !isBool {
|
||||
if cf(left, right) {
|
||||
return left
|
||||
}
|
||||
return nan
|
||||
}
|
||||
if cf(left, right) {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
return newBinaryOpFunc(cfe)
|
||||
}
|
||||
|
||||
func newBinaryOpArithFunc(af func(left, right float64) float64) binaryOpFunc {
|
||||
afe := func(left, right float64, isBool bool) float64 {
|
||||
return af(left, right)
|
||||
}
|
||||
return newBinaryOpFunc(afe)
|
||||
}
|
||||
|
||||
func newBinaryOpFunc(bf func(left, right float64, isBool bool) float64) binaryOpFunc {
|
||||
return func(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
isBool := bfa.be.Bool
|
||||
left, right, dst, err := adjustBinaryOpTags(bfa.be, bfa.left, bfa.right)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(left) != len(right) || len(left) != len(dst) {
|
||||
logger.Panicf("BUG: len(left) must match len(right) and len(dst); got %d vs %d vs %d", len(left), len(right), len(dst))
|
||||
}
|
||||
for i, tsLeft := range left {
|
||||
leftValues := tsLeft.Values
|
||||
rightValues := right[i].Values
|
||||
dstValues := dst[i].Values
|
||||
if len(leftValues) != len(rightValues) || len(leftValues) != len(dstValues) {
|
||||
logger.Panicf("BUG: len(leftVaues) must match len(rightValues) and len(dstValues); got %d vs %d vs %d",
|
||||
len(leftValues), len(rightValues), len(dstValues))
|
||||
}
|
||||
for j, a := range leftValues {
|
||||
b := rightValues[j]
|
||||
dstValues[j] = bf(a, b, isBool)
|
||||
}
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
}
|
||||
|
||||
func adjustBinaryOpTags(be *binaryOpExpr, left, right []*timeseries) ([]*timeseries, []*timeseries, []*timeseries, error) {
|
||||
if len(be.GroupModifier.Op) == 0 && len(be.JoinModifier.Op) == 0 {
|
||||
if isScalar(left) {
|
||||
// Fast path: `scalar op vector`
|
||||
rvsLeft := make([]*timeseries, len(right))
|
||||
tsLeft := left[0]
|
||||
for i, tsRight := range right {
|
||||
tsRight.MetricName.ResetMetricGroup()
|
||||
rvsLeft[i] = tsLeft
|
||||
}
|
||||
return rvsLeft, right, right, nil
|
||||
}
|
||||
if isScalar(right) {
|
||||
// Fast path: `vector op scalar`
|
||||
rvsRight := make([]*timeseries, len(left))
|
||||
tsRight := right[0]
|
||||
for i, tsLeft := range left {
|
||||
tsLeft.MetricName.ResetMetricGroup()
|
||||
rvsRight[i] = tsRight
|
||||
}
|
||||
return left, rvsRight, left, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Slow path: `vector op vector` or `a op {on|ignoring} {group_left|group_right} b`
|
||||
ensureOneX := func(side string, tss []*timeseries) error {
|
||||
if len(tss) == 0 {
|
||||
logger.Panicf("BUG: tss must contain at least one value")
|
||||
}
|
||||
if len(tss) == 1 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf(`duplicate timeseries on the %s side of %q: %s %s`, side, be.Op, stringMetricTags(&tss[0].MetricName), be.GroupModifier.AppendString(nil))
|
||||
}
|
||||
var rvsLeft, rvsRight []*timeseries
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(be, left, right)
|
||||
joinOp := strings.ToLower(be.JoinModifier.Op)
|
||||
joinTags := be.JoinModifier.Args
|
||||
for k, tssLeft := range mLeft {
|
||||
tssRight := mRight[k]
|
||||
if len(tssRight) == 0 {
|
||||
continue
|
||||
}
|
||||
switch joinOp {
|
||||
case "group_left":
|
||||
if err := ensureOneX("right", tssRight); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
src := tssRight[0]
|
||||
for _, ts := range tssLeft {
|
||||
ts.MetricName.AddMissingTags(joinTags, &src.MetricName)
|
||||
rvsLeft = append(rvsLeft, ts)
|
||||
rvsRight = append(rvsRight, src)
|
||||
}
|
||||
case "group_right":
|
||||
if err := ensureOneX("left", tssLeft); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
src := tssLeft[0]
|
||||
for _, ts := range tssRight {
|
||||
ts.MetricName.AddMissingTags(joinTags, &src.MetricName)
|
||||
rvsLeft = append(rvsLeft, src)
|
||||
rvsRight = append(rvsRight, ts)
|
||||
}
|
||||
case "":
|
||||
if err := ensureOneX("left", tssLeft); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
if err := ensureOneX("right", tssRight); err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
tssLeft[0].MetricName.ResetMetricGroup()
|
||||
rvsLeft = append(rvsLeft, tssLeft[0])
|
||||
rvsRight = append(rvsRight, tssRight[0])
|
||||
default:
|
||||
return nil, nil, nil, fmt.Errorf(`unexpected join modifier %q`, joinOp)
|
||||
}
|
||||
}
|
||||
dst := rvsLeft
|
||||
if joinOp == "group_right" {
|
||||
dst = rvsRight
|
||||
}
|
||||
return rvsLeft, rvsRight, dst, nil
|
||||
}
|
||||
|
||||
func binaryOpPlus(left, right float64) float64 {
|
||||
return left + right
|
||||
}
|
||||
|
||||
func binaryOpMinus(left, right float64) float64 {
|
||||
return left - right
|
||||
}
|
||||
|
||||
func binaryOpMul(left, right float64) float64 {
|
||||
return left * right
|
||||
}
|
||||
|
||||
func binaryOpDiv(left, right float64) float64 {
|
||||
return left / right
|
||||
}
|
||||
|
||||
func binaryOpMod(left, right float64) float64 {
|
||||
return math.Mod(left, right)
|
||||
}
|
||||
|
||||
func binaryOpPow(left, right float64) float64 {
|
||||
return math.Pow(left, right)
|
||||
}
|
||||
|
||||
func binaryOpDefault(left, right float64) float64 {
|
||||
if math.IsNaN(left) {
|
||||
return right
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
func binaryOpIf(left, right float64) float64 {
|
||||
if math.IsNaN(right) {
|
||||
return nan
|
||||
}
|
||||
return left
|
||||
}
|
||||
|
||||
func binaryOpIfnot(left, right float64) float64 {
|
||||
if math.IsNaN(right) {
|
||||
return left
|
||||
}
|
||||
return nan
|
||||
}
|
||||
|
||||
func binaryOpEq(left, right float64) bool {
|
||||
return left == right
|
||||
}
|
||||
|
||||
func binaryOpNeq(left, right float64) bool {
|
||||
return left != right
|
||||
}
|
||||
|
||||
func binaryOpGt(left, right float64) bool {
|
||||
return left > right
|
||||
}
|
||||
|
||||
func binaryOpLt(left, right float64) bool {
|
||||
return left < right
|
||||
}
|
||||
|
||||
func binaryOpGte(left, right float64) bool {
|
||||
return left >= right
|
||||
}
|
||||
|
||||
func binaryOpLte(left, right float64) bool {
|
||||
return left <= right
|
||||
}
|
||||
|
||||
func binaryOpAnd(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for k := range mRight {
|
||||
if tss := mLeft[k]; tss != nil {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func binaryOpOr(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for _, tss := range mLeft {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
for k, tss := range mRight {
|
||||
if mLeft[k] == nil {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func binaryOpUnless(bfa *binaryOpFuncArg) ([]*timeseries, error) {
|
||||
mLeft, mRight := createTimeseriesMapByTagSet(bfa.be, bfa.left, bfa.right)
|
||||
var rvs []*timeseries
|
||||
for k, tss := range mLeft {
|
||||
if mRight[k] == nil {
|
||||
rvs = append(rvs, tss...)
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func createTimeseriesMapByTagSet(be *binaryOpExpr, left, right []*timeseries) (map[string][]*timeseries, map[string][]*timeseries) {
|
||||
groupTags := be.GroupModifier.Args
|
||||
groupOp := strings.ToLower(be.GroupModifier.Op)
|
||||
if len(groupOp) == 0 {
|
||||
groupOp = "ignoring"
|
||||
}
|
||||
getTagsMap := func(arg []*timeseries) map[string][]*timeseries {
|
||||
bb := bbPool.Get()
|
||||
m := make(map[string][]*timeseries, len(arg))
|
||||
mn := storage.GetMetricName()
|
||||
for _, ts := range arg {
|
||||
mn.CopyFrom(&ts.MetricName)
|
||||
mn.ResetMetricGroup()
|
||||
switch groupOp {
|
||||
case "on":
|
||||
mn.RemoveTagsOn(groupTags)
|
||||
case "ignoring":
|
||||
mn.RemoveTagsIgnoring(groupTags)
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected binary op modifier %q", groupOp)
|
||||
}
|
||||
bb.B = marshalMetricTagsSorted(bb.B[:0], mn)
|
||||
m[string(bb.B)] = append(m[string(bb.B)], ts)
|
||||
}
|
||||
storage.PutMetricName(mn)
|
||||
bbPool.Put(bb)
|
||||
return m
|
||||
}
|
||||
mLeft := getTagsMap(left)
|
||||
mRight := getTagsMap(right)
|
||||
return mLeft, mRight
|
||||
}
|
||||
|
||||
func isScalar(arg []*timeseries) bool {
|
||||
if len(arg) != 1 {
|
||||
return false
|
||||
}
|
||||
mn := &arg[0].MetricName
|
||||
if len(mn.MetricGroup) > 0 {
|
||||
return false
|
||||
}
|
||||
return len(mn.Tags) == 0
|
||||
}
|
125
app/vmselect/promql/binary_op_test.go
Normal file
125
app/vmselect/promql/binary_op_test.go
Normal file
|
@ -0,0 +1,125 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsBinaryOpSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isBinaryOp(s) {
|
||||
t.Fatalf("expecting valid binaryOp: %q", s)
|
||||
}
|
||||
}
|
||||
f("and")
|
||||
f("AND")
|
||||
f("unless")
|
||||
f("unleSS")
|
||||
f("==")
|
||||
f("!=")
|
||||
f(">=")
|
||||
f("<=")
|
||||
f("or")
|
||||
f("Or")
|
||||
f("+")
|
||||
f("-")
|
||||
f("*")
|
||||
f("/")
|
||||
f("%")
|
||||
f("^")
|
||||
f(">")
|
||||
f("<")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isBinaryOp(s) {
|
||||
t.Fatalf("unexpected valid binaryOp: %q", s)
|
||||
}
|
||||
}
|
||||
f("foobar")
|
||||
f("=~")
|
||||
f("!~")
|
||||
f("=")
|
||||
f("<==")
|
||||
f("234")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpGroupModifierSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isBinaryOpGroupModifier(s) {
|
||||
t.Fatalf("expecting valid binaryOpGroupModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("on")
|
||||
f("ON")
|
||||
f("oN")
|
||||
f("ignoring")
|
||||
f("IGnoring")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpGroupModifierError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isBinaryOpGroupModifier(s) {
|
||||
t.Fatalf("unexpected valid binaryOpGroupModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("off")
|
||||
f("by")
|
||||
f("without")
|
||||
f("123")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpJoinModifierSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isBinaryOpJoinModifier(s) {
|
||||
t.Fatalf("expecting valid binaryOpJoinModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("group_left")
|
||||
f("group_right")
|
||||
f("group_LEft")
|
||||
f("GRoup_RighT")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpJoinModifierError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isBinaryOpJoinModifier(s) {
|
||||
t.Fatalf("unexpected valid binaryOpJoinModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("on")
|
||||
f("by")
|
||||
f("without")
|
||||
f("123")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpBoolModifierSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if !isBinaryOpBoolModifier(s) {
|
||||
t.Fatalf("expecting valid binaryOpBoolModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("bool")
|
||||
f("bOOL")
|
||||
f("BOOL")
|
||||
}
|
||||
|
||||
func TestIsBinaryOpBoolModifierError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
if isBinaryOpBoolModifier(s) {
|
||||
t.Fatalf("unexpected valid binaryOpBoolModifier: %q", s)
|
||||
}
|
||||
}
|
||||
f("on")
|
||||
f("by")
|
||||
f("without")
|
||||
f("123")
|
||||
}
|
655
app/vmselect/promql/eval.go
Normal file
655
app/vmselect/promql/eval.go
Normal file
|
@ -0,0 +1,655 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
maxPointsPerTimeseries = flag.Int("search.maxPointsPerTimeseries", 10e3, "The maximum points per a single timeseries returned from the search")
|
||||
)
|
||||
|
||||
// The minumum number of points per timeseries for enabling time rounding.
|
||||
// This improves cache hit ratio for frequently requested queries over
|
||||
// big time ranges.
|
||||
const minTimeseriesPointsForTimeRounding = 50
|
||||
|
||||
// ValidateMaxPointsPerTimeseries checks the maximum number of points that
|
||||
// may be returned per each time series.
|
||||
//
|
||||
// The number mustn't exceed -search.maxPointsPerTimeseries.
|
||||
func ValidateMaxPointsPerTimeseries(start, end, step int64) error {
|
||||
points := (end-start)/step + 1
|
||||
if uint64(points) > uint64(*maxPointsPerTimeseries) {
|
||||
return fmt.Errorf(`too many points for the given step=%d, start=%d and end=%d: %d; cannot exceed %d points`,
|
||||
step, start, end, uint64(points), *maxPointsPerTimeseries)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AdjustStartEnd adjusts start and end values, so response caching may be enabled.
|
||||
//
|
||||
// See EvalConfig.mayCache for details.
|
||||
func AdjustStartEnd(start, end, step int64) (int64, int64) {
|
||||
points := (end-start)/step + 1
|
||||
if points < minTimeseriesPointsForTimeRounding {
|
||||
// Too small number of points for rounding.
|
||||
return start, end
|
||||
}
|
||||
|
||||
// Round start and end to values divisible by step in order
|
||||
// to enable response caching (see EvalConfig.mayCache).
|
||||
|
||||
// Round start to the nearest smaller value divisible by step.
|
||||
start -= start % step
|
||||
// Round end to the nearest bigger value divisible by step.
|
||||
adjust := end % step
|
||||
if adjust > 0 {
|
||||
end += step - adjust
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
|
||||
// EvalConfig is the configuration required for query evaluation via Exec
|
||||
type EvalConfig struct {
|
||||
Start int64
|
||||
End int64
|
||||
Step int64
|
||||
|
||||
Deadline netstorage.Deadline
|
||||
|
||||
MayCache bool
|
||||
|
||||
timestamps []int64
|
||||
timestampsOnce sync.Once
|
||||
}
|
||||
|
||||
// newEvalConfig returns new EvalConfig copy from src.
|
||||
func newEvalConfig(src *EvalConfig) *EvalConfig {
|
||||
var ec EvalConfig
|
||||
ec.Start = src.Start
|
||||
ec.End = src.End
|
||||
ec.Step = src.Step
|
||||
ec.Deadline = src.Deadline
|
||||
ec.MayCache = src.MayCache
|
||||
|
||||
// do not copy src.timestamps - they must be generated again.
|
||||
return &ec
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) validate() {
|
||||
if ec.Start > ec.End {
|
||||
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", ec.Start, ec.End)
|
||||
}
|
||||
if ec.Step <= 0 {
|
||||
logger.Panicf("BUG: step must be greater than 0; got %d", ec.Step)
|
||||
}
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) mayCache() bool {
|
||||
if !ec.MayCache {
|
||||
return false
|
||||
}
|
||||
if ec.Start%ec.Step != 0 {
|
||||
return false
|
||||
}
|
||||
if ec.End%ec.Step != 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) getSharedTimestamps() []int64 {
|
||||
ec.timestampsOnce.Do(ec.timestampsInit)
|
||||
return ec.timestamps
|
||||
}
|
||||
|
||||
func (ec *EvalConfig) timestampsInit() {
|
||||
ec.timestamps = getTimestamps(ec.Start, ec.End, ec.Step)
|
||||
}
|
||||
|
||||
func getTimestamps(start, end, step int64) []int64 {
|
||||
// Sanity checks.
|
||||
if step <= 0 {
|
||||
logger.Panicf("BUG: Step must be bigger than 0; got %d", step)
|
||||
}
|
||||
if start > end {
|
||||
logger.Panicf("BUG: Start cannot exceed End; got %d vs %d", start, end)
|
||||
}
|
||||
if err := ValidateMaxPointsPerTimeseries(start, end, step); err != nil {
|
||||
logger.Panicf("BUG: %s; this must be validated before the call to getTimestamps", err)
|
||||
}
|
||||
|
||||
// Prepare timestamps.
|
||||
points := 1 + (end-start)/step
|
||||
timestamps := make([]int64, points)
|
||||
for i := range timestamps {
|
||||
timestamps[i] = start
|
||||
start += step
|
||||
}
|
||||
return timestamps
|
||||
}
|
||||
|
||||
func evalExpr(ec *EvalConfig, e expr) ([]*timeseries, error) {
|
||||
if me, ok := e.(*metricExpr); ok {
|
||||
re := &rollupExpr{
|
||||
Expr: me,
|
||||
}
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, re)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, me.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if re, ok := e.(*rollupExpr); ok {
|
||||
rv, err := evalRollupFunc(ec, "default_rollup", rollupDefault, re)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, re.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if fe, ok := e.(*funcExpr); ok {
|
||||
nrf := getRollupFunc(fe.Name)
|
||||
if nrf == nil {
|
||||
args, err := evalExprs(ec, fe.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tf := getTransformFunc(fe.Name)
|
||||
if tf == nil {
|
||||
return nil, fmt.Errorf(`unknown func %q`, fe.Name)
|
||||
}
|
||||
tfa := &transformFuncArg{
|
||||
ec: ec,
|
||||
fe: fe,
|
||||
args: args,
|
||||
}
|
||||
rv, err := tf(tfa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
args, re, err := evalRollupFuncArgs(ec, fe)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rf, err := nrf(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rv, err := evalRollupFunc(ec, fe.Name, rf, re)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, fe.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if ae, ok := e.(*aggrFuncExpr); ok {
|
||||
args, err := evalExprs(ec, ae.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
af := getAggrFunc(ae.Name)
|
||||
if af == nil {
|
||||
return nil, fmt.Errorf(`unknown func %q`, ae.Name)
|
||||
}
|
||||
afa := &aggrFuncArg{
|
||||
ae: ae,
|
||||
args: args,
|
||||
ec: ec,
|
||||
}
|
||||
rv, err := af(afa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, ae.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if be, ok := e.(*binaryOpExpr); ok {
|
||||
left, err := evalExpr(ec, be.Left)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
right, err := evalExpr(ec, be.Right)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bf := getBinaryOpFunc(be.Op)
|
||||
if bf == nil {
|
||||
return nil, fmt.Errorf(`unknown binary op %q`, be.Op)
|
||||
}
|
||||
bfa := &binaryOpFuncArg{
|
||||
be: be,
|
||||
left: left,
|
||||
right: right,
|
||||
}
|
||||
rv, err := bf(bfa)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(`cannot evaluate %q: %s`, be.AppendString(nil), err)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
if ne, ok := e.(*numberExpr); ok {
|
||||
rv := evalNumber(ec, ne.N)
|
||||
return rv, nil
|
||||
}
|
||||
if se, ok := e.(*stringExpr); ok {
|
||||
rv := evalString(ec, se.S)
|
||||
return rv, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected expression %q", e.AppendString(nil))
|
||||
}
|
||||
|
||||
func evalExprs(ec *EvalConfig, es []expr) ([][]*timeseries, error) {
|
||||
var rvs [][]*timeseries
|
||||
for _, e := range es {
|
||||
rv, err := evalExpr(ec, e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rvs = append(rvs, rv)
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func evalRollupFuncArgs(ec *EvalConfig, fe *funcExpr) ([]interface{}, *rollupExpr, error) {
|
||||
var re *rollupExpr
|
||||
rollupArgIdx := getRollupArgIdx(fe.Name)
|
||||
args := make([]interface{}, len(fe.Args))
|
||||
for i, arg := range fe.Args {
|
||||
if i == rollupArgIdx {
|
||||
re = getRollupExprArg(arg)
|
||||
args[i] = re
|
||||
continue
|
||||
}
|
||||
ts, err := evalExpr(ec, arg)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot evaluate arg #%d for %q: %s", i+1, fe.AppendString(nil), err)
|
||||
}
|
||||
args[i] = ts
|
||||
}
|
||||
return args, re, nil
|
||||
}
|
||||
|
||||
func getRollupExprArg(arg expr) *rollupExpr {
|
||||
re, ok := arg.(*rollupExpr)
|
||||
if !ok {
|
||||
// Wrap non-rollup arg into rollupExpr.
|
||||
return &rollupExpr{
|
||||
Expr: arg,
|
||||
}
|
||||
}
|
||||
if len(re.Step) == 0 && !re.InheritStep {
|
||||
// Return standard rollup if it doesn't set step.
|
||||
return re
|
||||
}
|
||||
me, ok := re.Expr.(*metricExpr)
|
||||
if !ok {
|
||||
// arg contains subquery.
|
||||
return re
|
||||
}
|
||||
// Convert me[w:step] -> default_rollup(me)[w:step]
|
||||
reNew := *re
|
||||
reNew.Expr = &funcExpr{
|
||||
Name: "default_rollup",
|
||||
Args: []expr{
|
||||
&rollupExpr{Expr: me},
|
||||
},
|
||||
}
|
||||
return &reNew
|
||||
}
|
||||
|
||||
func evalRollupFunc(ec *EvalConfig, name string, rf rollupFunc, re *rollupExpr) ([]*timeseries, error) {
|
||||
ecNew := ec
|
||||
var offset int64
|
||||
if len(re.Offset) > 0 {
|
||||
var err error
|
||||
offset, err = DurationValue(re.Offset, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ecNew = newEvalConfig(ec)
|
||||
ecNew.Start -= offset
|
||||
ecNew.End -= offset
|
||||
ecNew.Start, ecNew.End = AdjustStartEnd(ecNew.Start, ecNew.End, ecNew.Step)
|
||||
}
|
||||
var rvs []*timeseries
|
||||
var err error
|
||||
if me, ok := re.Expr.(*metricExpr); ok {
|
||||
if me.IsEmpty() {
|
||||
rvs = evalNumber(ecNew, nan)
|
||||
} else {
|
||||
var window int64
|
||||
if len(re.Window) > 0 {
|
||||
window, err = DurationValue(re.Window, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
rvs, err = evalRollupFuncWithMetricExpr(ecNew, name, rf, me, window)
|
||||
}
|
||||
} else {
|
||||
rvs, err = evalRollupFuncWithSubquery(ecNew, name, rf, re)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if offset != 0 && len(rvs) > 0 {
|
||||
// Make a copy of timestamps, since they may be used in other values.
|
||||
srcTimestamps := rvs[0].Timestamps
|
||||
dstTimestamps := append([]int64{}, srcTimestamps...)
|
||||
for i := range dstTimestamps {
|
||||
dstTimestamps[i] += offset
|
||||
}
|
||||
for _, ts := range rvs {
|
||||
ts.Timestamps = dstTimestamps
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func evalRollupFuncWithSubquery(ec *EvalConfig, name string, rf rollupFunc, re *rollupExpr) ([]*timeseries, error) {
|
||||
// Do not use rollupResultCacheV here, since it works only with metricExpr.
|
||||
var step int64
|
||||
if len(re.Step) > 0 {
|
||||
var err error
|
||||
step, err = DurationValue(re.Step, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
step = ec.Step
|
||||
}
|
||||
var window int64
|
||||
if len(re.Window) > 0 {
|
||||
var err error
|
||||
window, err = DurationValue(re.Window, ec.Step)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ecSQ := newEvalConfig(ec)
|
||||
ecSQ.Start -= window + maxSilenceInterval
|
||||
ecSQ.End += step
|
||||
ecSQ.Step = step
|
||||
if err := ValidateMaxPointsPerTimeseries(ecSQ.Start, ecSQ.End, ecSQ.Step); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ecSQ.Start, ecSQ.End = AdjustStartEnd(ecSQ.Start, ecSQ.End, ecSQ.Step)
|
||||
tssSQ, err := evalExpr(ecSQ, re.Expr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sharedTimestamps := getTimestamps(ec.Start, ec.End, ec.Step)
|
||||
preFunc, rcs := getRollupConfigs(name, rf, ec.Start, ec.End, ec.Step, window, sharedTimestamps)
|
||||
tss := make([]*timeseries, 0, len(tssSQ)*len(rcs))
|
||||
var tssLock sync.Mutex
|
||||
doParallel(tssSQ, func(tsSQ *timeseries, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||
values, timestamps = removeNanValues(values[:0], timestamps[:0], tsSQ.Values, tsSQ.Timestamps)
|
||||
preFunc(values, timestamps)
|
||||
for _, rc := range rcs {
|
||||
var ts timeseries
|
||||
ts.MetricName.CopyFrom(&tsSQ.MetricName)
|
||||
if len(rc.TagValue) > 0 {
|
||||
ts.MetricName.AddTag("rollup", rc.TagValue)
|
||||
}
|
||||
ts.Values = rc.Do(ts.Values[:0], values, timestamps)
|
||||
ts.Timestamps = sharedTimestamps
|
||||
ts.denyReuse = true
|
||||
tssLock.Lock()
|
||||
tss = append(tss, &ts)
|
||||
tssLock.Unlock()
|
||||
}
|
||||
return values, timestamps
|
||||
})
|
||||
if !rollupFuncsKeepMetricGroup[name] {
|
||||
tss = copyTimeseriesMetricNames(tss)
|
||||
for _, ts := range tss {
|
||||
ts.MetricName.ResetMetricGroup()
|
||||
}
|
||||
}
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func doParallel(tss []*timeseries, f func(ts *timeseries, values []float64, timestamps []int64) ([]float64, []int64)) {
|
||||
concurrency := runtime.GOMAXPROCS(-1)
|
||||
if concurrency > len(tss) {
|
||||
concurrency = len(tss)
|
||||
}
|
||||
workCh := make(chan *timeseries, concurrency)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(concurrency)
|
||||
for i := 0; i < concurrency; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var tmpValues []float64
|
||||
var tmpTimestamps []int64
|
||||
for ts := range workCh {
|
||||
tmpValues, tmpTimestamps = f(ts, tmpValues, tmpTimestamps)
|
||||
}
|
||||
}()
|
||||
}
|
||||
for _, ts := range tss {
|
||||
workCh <- ts
|
||||
}
|
||||
close(workCh)
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func removeNanValues(dstValues []float64, dstTimestamps []int64, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||
hasNan := false
|
||||
for _, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
hasNan = true
|
||||
}
|
||||
}
|
||||
if !hasNan {
|
||||
// Fast path - no NaNs.
|
||||
dstValues = append(dstValues, values...)
|
||||
dstTimestamps = append(dstTimestamps, timestamps...)
|
||||
return dstValues, dstTimestamps
|
||||
}
|
||||
|
||||
// Slow path - remove NaNs.
|
||||
for i, v := range values {
|
||||
if math.IsNaN(v) {
|
||||
continue
|
||||
}
|
||||
dstValues = append(dstValues, v)
|
||||
dstTimestamps = append(dstTimestamps, timestamps[i])
|
||||
}
|
||||
return dstValues, dstTimestamps
|
||||
}
|
||||
|
||||
func getMaxPointsPerRollup() int {
|
||||
maxPointsPerRollupOnce.Do(func() {
|
||||
n := memory.Allowed() / 16 / 8
|
||||
if n <= 16 {
|
||||
n = 16
|
||||
}
|
||||
maxPointsPerRollup = n
|
||||
})
|
||||
return maxPointsPerRollup
|
||||
}
|
||||
|
||||
var (
|
||||
maxPointsPerRollup int
|
||||
maxPointsPerRollupOnce sync.Once
|
||||
)
|
||||
|
||||
var (
|
||||
rollupResultCacheFullHits = metrics.NewCounter(`vm_rollup_result_cache_full_hits_total`)
|
||||
rollupResultCachePartialHits = metrics.NewCounter(`vm_rollup_result_cache_partial_hits_total`)
|
||||
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
|
||||
)
|
||||
|
||||
func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc, me *metricExpr, window int64) ([]*timeseries, error) {
|
||||
// Search for partial results in cache.
|
||||
tssCached, start := rollupResultCacheV.Get(name, ec, me, window)
|
||||
if start > ec.End {
|
||||
// The result is fully cached.
|
||||
rollupResultCacheFullHits.Inc()
|
||||
return tssCached, nil
|
||||
}
|
||||
if start > ec.Start {
|
||||
rollupResultCachePartialHits.Inc()
|
||||
} else {
|
||||
rollupResultCacheMiss.Inc()
|
||||
}
|
||||
|
||||
// Fetch the remaining part of the result.
|
||||
sq := &storage.SearchQuery{
|
||||
MinTimestamp: start - window - maxSilenceInterval,
|
||||
MaxTimestamp: ec.End + ec.Step,
|
||||
TagFilterss: [][]storage.TagFilter{me.TagFilters},
|
||||
}
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, ec.Deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rssLen := rss.Len()
|
||||
if rssLen == 0 {
|
||||
rss.Cancel()
|
||||
// Add missing points until ec.End.
|
||||
// Do not cache the result, since missing points
|
||||
// may be backfilled in the future.
|
||||
tss := mergeTimeseries(tssCached, nil, start, ec)
|
||||
return tss, nil
|
||||
}
|
||||
sharedTimestamps := getTimestamps(start, ec.End, ec.Step)
|
||||
preFunc, rcs := getRollupConfigs(name, rf, start, ec.End, ec.Step, window, sharedTimestamps)
|
||||
|
||||
// Verify timeseries fit available memory after the rollup.
|
||||
// Take into account points from tssCached.
|
||||
pointsPerTimeseries := 1 + (ec.End-ec.Start)/ec.Step
|
||||
if uint64(pointsPerTimeseries) > uint64(getMaxPointsPerRollup()/rssLen/len(rcs)) {
|
||||
rss.Cancel()
|
||||
return nil, fmt.Errorf("cannot process more than %d data points for %d time series with %d points in each time series; "+
|
||||
"possible solutions are: reducing the number of matching time series; switching to node with more RAM; increasing `step` query arg (%gs)",
|
||||
getMaxPointsPerRollup(), rssLen*len(rcs), pointsPerTimeseries, float64(ec.Step)/1e3)
|
||||
}
|
||||
|
||||
// Evaluate rollup
|
||||
tss := make([]*timeseries, 0, rssLen*len(rcs))
|
||||
var tssLock sync.Mutex
|
||||
err = rss.RunParallel(func(rs *netstorage.Result) {
|
||||
preFunc(rs.Values, rs.Timestamps)
|
||||
for _, rc := range rcs {
|
||||
var ts timeseries
|
||||
ts.MetricName.CopyFrom(&rs.MetricName)
|
||||
if len(rc.TagValue) > 0 {
|
||||
ts.MetricName.AddTag("rollup", rc.TagValue)
|
||||
}
|
||||
ts.Values = rc.Do(ts.Values[:0], rs.Values, rs.Timestamps)
|
||||
ts.Timestamps = sharedTimestamps
|
||||
ts.denyReuse = true
|
||||
|
||||
tssLock.Lock()
|
||||
tss = append(tss, &ts)
|
||||
tssLock.Unlock()
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !rollupFuncsKeepMetricGroup[name] {
|
||||
tss = copyTimeseriesMetricNames(tss)
|
||||
for _, ts := range tss {
|
||||
ts.MetricName.ResetMetricGroup()
|
||||
}
|
||||
}
|
||||
tss = mergeTimeseries(tssCached, tss, start, ec)
|
||||
rollupResultCacheV.Put(name, ec, me, window, tss)
|
||||
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
func getRollupConfigs(name string, rf rollupFunc, start, end, step, window int64, sharedTimestamps []int64) (func(values []float64, timestamps []int64), []*rollupConfig) {
|
||||
preFunc := func(values []float64, timestamps []int64) {}
|
||||
if rollupFuncsRemoveCounterResets[name] {
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
removeCounterResets(values)
|
||||
}
|
||||
}
|
||||
newRollupConfig := func(rf rollupFunc, tagValue string) *rollupConfig {
|
||||
return &rollupConfig{
|
||||
TagValue: tagValue,
|
||||
Func: rf,
|
||||
Start: start,
|
||||
End: end,
|
||||
Step: step,
|
||||
Window: window,
|
||||
Timestamps: sharedTimestamps,
|
||||
}
|
||||
}
|
||||
appendRollupConfigs := func(dst []*rollupConfig) []*rollupConfig {
|
||||
dst = append(dst, newRollupConfig(rollupMin, "min"))
|
||||
dst = append(dst, newRollupConfig(rollupMax, "max"))
|
||||
dst = append(dst, newRollupConfig(rollupAvg, "avg"))
|
||||
return dst
|
||||
}
|
||||
var rcs []*rollupConfig
|
||||
switch name {
|
||||
case "rollup":
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
case "rollup_rate", "rollup_deriv":
|
||||
preFuncPrev := preFunc
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
preFuncPrev(values, timestamps)
|
||||
derivValues(values, timestamps)
|
||||
}
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
case "rollup_increase", "rollup_delta":
|
||||
preFuncPrev := preFunc
|
||||
preFunc = func(values []float64, timestamps []int64) {
|
||||
preFuncPrev(values, timestamps)
|
||||
deltaValues(values)
|
||||
}
|
||||
rcs = appendRollupConfigs(rcs)
|
||||
default:
|
||||
rcs = append(rcs, newRollupConfig(rf, ""))
|
||||
}
|
||||
return preFunc, rcs
|
||||
}
|
||||
|
||||
var bbPool bytesutil.ByteBufferPool
|
||||
|
||||
func evalNumber(ec *EvalConfig, n float64) []*timeseries {
|
||||
var ts timeseries
|
||||
ts.denyReuse = true
|
||||
timestamps := ec.getSharedTimestamps()
|
||||
values := make([]float64, len(timestamps))
|
||||
for i := range timestamps {
|
||||
values[i] = n
|
||||
}
|
||||
ts.Values = values
|
||||
ts.Timestamps = timestamps
|
||||
return []*timeseries{&ts}
|
||||
}
|
||||
|
||||
func evalString(ec *EvalConfig, s string) []*timeseries {
|
||||
rv := evalNumber(ec, nan)
|
||||
rv[0].MetricName.MetricGroup = append(rv[0].MetricName.MetricGroup[:0], s...)
|
||||
return rv
|
||||
}
|
||||
|
||||
func evalTime(ec *EvalConfig) []*timeseries {
|
||||
rv := evalNumber(ec, nan)
|
||||
timestamps := rv[0].Timestamps
|
||||
values := rv[0].Values
|
||||
for i, ts := range timestamps {
|
||||
values[i] = float64(ts) * 1e-3
|
||||
}
|
||||
return rv
|
||||
}
|
216
app/vmselect/promql/exec.go
Normal file
216
app/vmselect/promql/exec.go
Normal file
|
@ -0,0 +1,216 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// ExpandWithExprs expands WITH expressions inside q and returns the resulting
|
||||
// PromQL without WITH expressions.
|
||||
func ExpandWithExprs(q string) (string, error) {
|
||||
e, err := parsePromQLWithCache(q)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf := e.AppendString(nil)
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Exec executes q for the given ec until the deadline.
|
||||
func Exec(ec *EvalConfig, q string) ([]netstorage.Result, error) {
|
||||
ec.validate()
|
||||
|
||||
e, err := parsePromQLWithCache(q)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Add an additional point to the end. This point is used
|
||||
// in calculating the last value for rate, deriv, increase
|
||||
// and delta funcs.
|
||||
ec.End += ec.Step
|
||||
|
||||
rv, err := evalExpr(ec, e)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove the additional point at the end.
|
||||
for _, ts := range rv {
|
||||
ts.Values = ts.Values[:len(ts.Values)-1]
|
||||
|
||||
// ts.Timestamps may be shared between timeseries, so truncate it with len(ts.Values) instead of len(ts.Timestamps)-1
|
||||
ts.Timestamps = ts.Timestamps[:len(ts.Values)]
|
||||
}
|
||||
ec.End -= ec.Step
|
||||
|
||||
maySort := maySortResults(e, rv)
|
||||
result, err := timeseriesToResult(rv, maySort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
func maySortResults(e expr, tss []*timeseries) bool {
|
||||
if len(tss) > 100 {
|
||||
// There is no sense in sorting a lot of results
|
||||
return false
|
||||
}
|
||||
fe, ok := e.(*funcExpr)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
switch fe.Name {
|
||||
case "sort", "sort_desc":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
func timeseriesToResult(tss []*timeseries, maySort bool) ([]netstorage.Result, error) {
|
||||
tss = removeNaNs(tss)
|
||||
result := make([]netstorage.Result, len(tss))
|
||||
m := make(map[string]bool)
|
||||
bb := bbPool.Get()
|
||||
for i, ts := range tss {
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||
if m[string(bb.B)] {
|
||||
return nil, fmt.Errorf(`duplicate output timeseries: %s%s`, ts.MetricName.MetricGroup, stringMetricName(&ts.MetricName))
|
||||
}
|
||||
m[string(bb.B)] = true
|
||||
|
||||
rs := &result[i]
|
||||
rs.MetricNameMarshaled = append(rs.MetricNameMarshaled[:0], bb.B...)
|
||||
rs.MetricName.CopyFrom(&ts.MetricName)
|
||||
rs.Values = append(rs.Values[:0], ts.Values...)
|
||||
rs.Timestamps = append(rs.Timestamps[:0], ts.Timestamps...)
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
|
||||
if maySort {
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
return string(result[i].MetricNameMarshaled) < string(result[j].MetricNameMarshaled)
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func removeNaNs(tss []*timeseries) []*timeseries {
|
||||
rvs := tss[:0]
|
||||
for _, ts := range tss {
|
||||
nans := 0
|
||||
for _, v := range ts.Values {
|
||||
if math.IsNaN(v) {
|
||||
nans++
|
||||
}
|
||||
}
|
||||
if nans == len(ts.Values) {
|
||||
// Skip timeseries with all NaNs.
|
||||
continue
|
||||
}
|
||||
rvs = append(rvs, ts)
|
||||
}
|
||||
return rvs
|
||||
}
|
||||
|
||||
func parsePromQLWithCache(q string) (expr, error) {
|
||||
pcv := parseCacheV.Get(q)
|
||||
if pcv == nil {
|
||||
e, err := parsePromQL(q)
|
||||
pcv = &parseCacheValue{
|
||||
e: e,
|
||||
err: err,
|
||||
}
|
||||
parseCacheV.Put(q, pcv)
|
||||
}
|
||||
if pcv.err != nil {
|
||||
return nil, pcv.err
|
||||
}
|
||||
return pcv.e, nil
|
||||
}
|
||||
|
||||
var parseCacheV = func() *parseCache {
|
||||
pc := &parseCache{
|
||||
m: make(map[string]*parseCacheValue),
|
||||
}
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.Requests())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.Misses())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="promql/parse"}`, func() float64 {
|
||||
return float64(pc.Len())
|
||||
})
|
||||
return pc
|
||||
}()
|
||||
|
||||
const parseCacheMaxLen = 10e3
|
||||
|
||||
type parseCacheValue struct {
|
||||
e expr
|
||||
err error
|
||||
}
|
||||
|
||||
type parseCache struct {
|
||||
m map[string]*parseCacheValue
|
||||
mu sync.RWMutex
|
||||
|
||||
requests uint64
|
||||
misses uint64
|
||||
}
|
||||
|
||||
func (pc *parseCache) Requests() uint64 {
|
||||
return atomic.LoadUint64(&pc.requests)
|
||||
}
|
||||
|
||||
func (pc *parseCache) Misses() uint64 {
|
||||
return atomic.LoadUint64(&pc.misses)
|
||||
}
|
||||
|
||||
func (pc *parseCache) Len() uint64 {
|
||||
pc.mu.RLock()
|
||||
n := len(pc.m)
|
||||
pc.mu.RUnlock()
|
||||
return uint64(n)
|
||||
}
|
||||
|
||||
func (pc *parseCache) Get(q string) *parseCacheValue {
|
||||
atomic.AddUint64(&pc.requests, 1)
|
||||
|
||||
pc.mu.RLock()
|
||||
pcv := pc.m[q]
|
||||
pc.mu.RUnlock()
|
||||
|
||||
if pcv == nil {
|
||||
atomic.AddUint64(&pc.misses, 1)
|
||||
}
|
||||
return pcv
|
||||
}
|
||||
|
||||
func (pc *parseCache) Put(q string, pcv *parseCacheValue) {
|
||||
pc.mu.Lock()
|
||||
overflow := len(pc.m) - parseCacheMaxLen
|
||||
if overflow > 0 {
|
||||
// Remove 10% of items from the cache.
|
||||
overflow = int(float64(len(pc.m)) * 0.1)
|
||||
for k := range pc.m {
|
||||
delete(pc.m, k)
|
||||
overflow--
|
||||
if overflow <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
pc.m[q] = pcv
|
||||
pc.mu.Unlock()
|
||||
}
|
3593
app/vmselect/promql/exec_test.go
Normal file
3593
app/vmselect/promql/exec_test.go
Normal file
File diff suppressed because it is too large
Load diff
380
app/vmselect/promql/lexer.go
Normal file
380
app/vmselect/promql/lexer.go
Normal file
|
@ -0,0 +1,380 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type lexer struct {
|
||||
// Token contains the currently parsed token.
|
||||
// An empty token means EOF.
|
||||
Token string
|
||||
|
||||
prevTokens []string
|
||||
nextTokens []string
|
||||
|
||||
sOrig string
|
||||
sTail string
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (lex *lexer) Context() string {
|
||||
return fmt.Sprintf("%s%s", lex.Token, lex.sTail)
|
||||
}
|
||||
|
||||
func (lex *lexer) Init(s string) {
|
||||
lex.Token = ""
|
||||
lex.prevTokens = nil
|
||||
lex.nextTokens = nil
|
||||
lex.err = nil
|
||||
|
||||
lex.sOrig = s
|
||||
lex.sTail = s
|
||||
}
|
||||
|
||||
func (lex *lexer) Next() error {
|
||||
if lex.err != nil {
|
||||
return lex.err
|
||||
}
|
||||
lex.prevTokens = append(lex.prevTokens, lex.Token)
|
||||
if len(lex.nextTokens) > 0 {
|
||||
lex.Token = lex.nextTokens[len(lex.nextTokens)-1]
|
||||
lex.nextTokens = lex.nextTokens[:len(lex.nextTokens)-1]
|
||||
return nil
|
||||
}
|
||||
token, err := lex.next()
|
||||
if err != nil {
|
||||
lex.err = err
|
||||
return err
|
||||
}
|
||||
lex.Token = token
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lex *lexer) next() (string, error) {
|
||||
again:
|
||||
// Skip whitespace
|
||||
s := lex.sTail
|
||||
i := 0
|
||||
for i < len(s) && isSpaceChar(s[i]) {
|
||||
i++
|
||||
}
|
||||
s = s[i:]
|
||||
lex.sTail = s
|
||||
|
||||
if len(s) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var token string
|
||||
var err error
|
||||
switch s[0] {
|
||||
case '#':
|
||||
// Skip comment till the end of string
|
||||
s = s[1:]
|
||||
n := strings.IndexByte(s, '\n')
|
||||
if n < 0 {
|
||||
return "", nil
|
||||
}
|
||||
lex.sTail = s[n+1:]
|
||||
goto again
|
||||
case '{', '}', '[', ']', '(', ')', ',':
|
||||
token = s[:1]
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
if isIdentPrefix(s) {
|
||||
token, err = scanIdent(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
if isStringPrefix(s) {
|
||||
token, err = scanString(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
if n := scanBinaryOpPrefix(s); n > 0 {
|
||||
token = s[:n]
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
if n := scanTagFilterOpPrefix(s); n > 0 {
|
||||
token = s[:n]
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
if n := scanDuration(s); n > 0 {
|
||||
token = s[:n]
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
if isPositiveNumberPrefix(s) {
|
||||
token, err = scanPositiveNumber(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
goto tokenFoundLabel
|
||||
}
|
||||
return "", fmt.Errorf("cannot recognize %q", s)
|
||||
|
||||
tokenFoundLabel:
|
||||
lex.sTail = s[len(token):]
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func scanString(s string) (string, error) {
|
||||
if len(s) < 2 {
|
||||
return "", fmt.Errorf("cannot find end of string in %q", s)
|
||||
}
|
||||
|
||||
quote := s[0]
|
||||
i := 1
|
||||
for {
|
||||
n := strings.IndexByte(s[i:], quote)
|
||||
if n < 0 {
|
||||
return "", fmt.Errorf("cannot find closing quote %ch for the string %q", quote, s)
|
||||
}
|
||||
i += n
|
||||
bs := 0
|
||||
for bs < i && s[i-bs-1] == '\\' {
|
||||
bs++
|
||||
}
|
||||
if bs%2 == 0 {
|
||||
token := s[:i+1]
|
||||
return token, nil
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func scanPositiveNumber(s string) (string, error) {
|
||||
if strings.HasPrefix(s, "Inf") {
|
||||
return "Inf", nil
|
||||
}
|
||||
if strings.HasPrefix(s, "NaN") {
|
||||
return "NaN", nil
|
||||
}
|
||||
// Scan integer part. It may be empty if fractional part exists.
|
||||
i := 0
|
||||
for i < len(s) && isDecimalChar(s[i]) {
|
||||
i++
|
||||
}
|
||||
|
||||
if i == len(s) {
|
||||
if i == 0 {
|
||||
return "", fmt.Errorf("number cannot be empty")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
if s[i] != '.' && s[i] != 'e' && s[i] != 'E' {
|
||||
return s[:i], nil
|
||||
}
|
||||
|
||||
if s[i] == '.' {
|
||||
// Scan fractional part. It cannot be empty.
|
||||
i++
|
||||
j := i
|
||||
for j < len(s) && isDecimalChar(s[j]) {
|
||||
j++
|
||||
}
|
||||
if j == i {
|
||||
return "", fmt.Errorf("missing fractional part in %q", s)
|
||||
}
|
||||
i = j
|
||||
if i == len(s) {
|
||||
return s, nil
|
||||
}
|
||||
}
|
||||
|
||||
if s[i] != 'e' && s[i] != 'E' {
|
||||
return s[:i], nil
|
||||
}
|
||||
i++
|
||||
|
||||
// Scan exponent part.
|
||||
if i == len(s) {
|
||||
return "", fmt.Errorf("missing exponent part in %q", s)
|
||||
}
|
||||
if s[i] == '-' || s[i] == '+' {
|
||||
i++
|
||||
}
|
||||
j := i
|
||||
for j < len(s) && isDecimalChar(s[j]) {
|
||||
j++
|
||||
}
|
||||
if j == i {
|
||||
return "", fmt.Errorf("missing exponent part in %q", s)
|
||||
}
|
||||
return s[:j], nil
|
||||
}
|
||||
|
||||
func scanIdent(s string) (string, error) {
|
||||
if len(s) == 0 {
|
||||
return "", fmt.Errorf("ident cannot be empty")
|
||||
}
|
||||
i := 0
|
||||
for i < len(s) && isIdentChar(s[i]) {
|
||||
i++
|
||||
}
|
||||
return s[:i], nil
|
||||
}
|
||||
|
||||
func (lex *lexer) Prev() {
|
||||
lex.nextTokens = append(lex.nextTokens, lex.Token)
|
||||
lex.Token = lex.prevTokens[len(lex.prevTokens)-1]
|
||||
lex.prevTokens = lex.prevTokens[:len(lex.prevTokens)-1]
|
||||
}
|
||||
|
||||
func isEOF(s string) bool {
|
||||
return len(s) == 0
|
||||
}
|
||||
|
||||
func scanTagFilterOpPrefix(s string) int {
|
||||
if len(s) >= 2 {
|
||||
switch s[:2] {
|
||||
case "=~", "!~", "!=":
|
||||
return 2
|
||||
}
|
||||
}
|
||||
if len(s) >= 1 {
|
||||
if s[0] == '=' {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func isOffset(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
return s == "offset"
|
||||
}
|
||||
|
||||
func isStringPrefix(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
switch s[0] {
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/basics/#string-literals
|
||||
case '"', '\'', '`':
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func isPositiveNumberPrefix(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
if isDecimalChar(s[0]) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check for .234 numbers
|
||||
if s[0] != '.' || len(s) < 2 {
|
||||
return strings.HasPrefix(s, "Inf") || strings.HasPrefix(s, "NaN")
|
||||
}
|
||||
return isDecimalChar(s[1])
|
||||
}
|
||||
|
||||
func isDuration(s string) bool {
|
||||
n := scanDuration(s)
|
||||
return n == len(s)
|
||||
}
|
||||
|
||||
// DurationValue returns the duration in milliseconds for the given s
|
||||
// and the given step.
|
||||
func DurationValue(s string, step int64) (int64, error) {
|
||||
n := scanDuration(s)
|
||||
if n != len(s) {
|
||||
return 0, fmt.Errorf("cannot parse duration %q", s)
|
||||
}
|
||||
|
||||
f, err := strconv.ParseFloat(s[:len(s)-1], 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("cannot parse duration %q: %s", s, err)
|
||||
}
|
||||
|
||||
var mp float64
|
||||
switch s[len(s)-1] {
|
||||
case 's':
|
||||
mp = 1
|
||||
case 'm':
|
||||
mp = 60
|
||||
case 'h':
|
||||
mp = 60 * 60
|
||||
case 'd':
|
||||
mp = 24 * 60 * 60
|
||||
case 'w':
|
||||
mp = 7 * 24 * 60 * 60
|
||||
case 'y':
|
||||
mp = 365 * 24 * 60 * 60
|
||||
case 'i':
|
||||
mp = float64(step) / 1e3
|
||||
default:
|
||||
return 0, fmt.Errorf("invalid duration suffix in %q", s)
|
||||
}
|
||||
return int64(mp * f * 1e3), nil
|
||||
}
|
||||
|
||||
func scanDuration(s string) int {
|
||||
i := 0
|
||||
for i < len(s) && isDecimalChar(s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 || i == len(s) {
|
||||
return -1
|
||||
}
|
||||
if s[i] == '.' {
|
||||
j := i
|
||||
i++
|
||||
for i < len(s) && isDecimalChar(s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == j || i == len(s) {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
switch s[i] {
|
||||
case 's', 'm', 'h', 'd', 'w', 'y', 'i':
|
||||
return i + 1
|
||||
default:
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
func isDecimalChar(ch byte) bool {
|
||||
return ch >= '0' && ch <= '9'
|
||||
}
|
||||
|
||||
func isIdentPrefix(s string) bool {
|
||||
if len(s) == 0 {
|
||||
return false
|
||||
}
|
||||
return isFirstIdentChar(s[0])
|
||||
}
|
||||
|
||||
func isFirstIdentChar(ch byte) bool {
|
||||
if ch >= 'a' && ch <= 'z' || ch >= 'A' && ch <= 'Z' {
|
||||
return true
|
||||
}
|
||||
return ch == '_' || ch == ':'
|
||||
}
|
||||
|
||||
func isIdentChar(ch byte) bool {
|
||||
if isFirstIdentChar(ch) {
|
||||
return true
|
||||
}
|
||||
return isDecimalChar(ch) || ch == ':' || ch == '.'
|
||||
}
|
||||
|
||||
func isSpaceChar(ch byte) bool {
|
||||
switch ch {
|
||||
case ' ', '\t', '\n', '\v', '\f', '\r':
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
295
app/vmselect/promql/lexer_test.go
Normal file
295
app/vmselect/promql/lexer_test.go
Normal file
|
@ -0,0 +1,295 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLexerNextPrev(t *testing.T) {
|
||||
var lex lexer
|
||||
lex.Init("foo bar baz")
|
||||
if lex.Token != "" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "")
|
||||
}
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpeted error: %s", err)
|
||||
}
|
||||
if lex.Token != "foo" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "foo")
|
||||
}
|
||||
|
||||
// Rewind before the first item.
|
||||
lex.Prev()
|
||||
if lex.Token != "" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "")
|
||||
}
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if lex.Token != "foo" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "foo")
|
||||
}
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if lex.Token != "bar" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "bar")
|
||||
}
|
||||
|
||||
// Rewind to the first item.
|
||||
lex.Prev()
|
||||
if lex.Token != "foo" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "foo")
|
||||
}
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if lex.Token != "bar" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "bar")
|
||||
}
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if lex.Token != "baz" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "baz")
|
||||
}
|
||||
|
||||
// Go beyond the token stream.
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if lex.Token != "" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "")
|
||||
}
|
||||
if !isEOF(lex.Token) {
|
||||
t.Fatalf("expecting eof")
|
||||
}
|
||||
lex.Prev()
|
||||
if lex.Token != "baz" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "baz")
|
||||
}
|
||||
|
||||
// Go multiple times lex.Next() beyond token stream.
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if lex.Token != "" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "")
|
||||
}
|
||||
if !isEOF(lex.Token) {
|
||||
t.Fatalf("expecting eof")
|
||||
}
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if lex.Token != "" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "")
|
||||
}
|
||||
if !isEOF(lex.Token) {
|
||||
t.Fatalf("expecting eof")
|
||||
}
|
||||
lex.Prev()
|
||||
if lex.Token != "" {
|
||||
t.Fatalf("unexpected token got: %q; want %q", lex.Token, "")
|
||||
}
|
||||
if !isEOF(lex.Token) {
|
||||
t.Fatalf("expecting eof")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLexerSuccess(t *testing.T) {
|
||||
var s string
|
||||
var expectedTokens []string
|
||||
|
||||
// An empty string
|
||||
s = ""
|
||||
expectedTokens = nil
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// String with whitespace
|
||||
s = " \n\t\r "
|
||||
expectedTokens = nil
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// Just metric name
|
||||
s = "metric"
|
||||
expectedTokens = []string{"metric"}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// Metric name with spec chars
|
||||
s = ":foo.bar_"
|
||||
expectedTokens = []string{":foo.bar_"}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// Metric name with window
|
||||
s = "metric[5m] "
|
||||
expectedTokens = []string{"metric", "[", "5m", "]"}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// Metric name with tag filters
|
||||
s = ` metric:12.34{a="foo", b != "bar", c=~ "x.+y", d !~ "zzz"}`
|
||||
expectedTokens = []string{`metric:12.34`, `{`, `a`, `=`, `"foo"`, `,`, `b`, `!=`, `"bar"`, `,`, `c`, `=~`, `"x.+y"`, `,`, `d`, `!~`, `"zzz"`, `}`}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// Metric name with offset
|
||||
s = ` metric offset 10d `
|
||||
expectedTokens = []string{`metric`, `offset`, `10d`}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// Func call
|
||||
s = `sum ( metric{x="y" } [5m] offset 10h)`
|
||||
expectedTokens = []string{`sum`, `(`, `metric`, `{`, `x`, `=`, `"y"`, `}`, `[`, `5m`, `]`, `offset`, `10h`, `)`}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// Binary op
|
||||
s = `a+b or c % d and e unless f`
|
||||
expectedTokens = []string{`a`, `+`, `b`, `or`, `c`, `%`, `d`, `and`, `e`, `unless`, `f`}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// Numbers
|
||||
s = `3+1.2-.23+4.5e5-78e-6+1.24e+45-NaN+Inf`
|
||||
expectedTokens = []string{`3`, `+`, `1.2`, `-`, `.23`, `+`, `4.5e5`, `-`, `78e-6`, `+`, `1.24e+45`, `-`, `NaN`, `+`, `Inf`}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
s = `12.34`
|
||||
expectedTokens = []string{`12.34`}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
// Strings
|
||||
s = `""''` + "``" + `"\\" '\\' "\"" '\''"\\\"\\"`
|
||||
expectedTokens = []string{`""`, `''`, "``", `"\\"`, `'\\'`, `"\""`, `'\''`, `"\\\"\\"`}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
s = " `foo\\\\\\`бар` "
|
||||
expectedTokens = []string{"`foo\\\\\\`бар`"}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
|
||||
s = `# comment # sdf
|
||||
foobar # comment
|
||||
baz
|
||||
# yet another comment`
|
||||
expectedTokens = []string{"foobar", "baz"}
|
||||
testLexerSuccess(t, s, expectedTokens)
|
||||
}
|
||||
|
||||
func testLexerSuccess(t *testing.T, s string, expectedTokens []string) {
|
||||
t.Helper()
|
||||
|
||||
var lex lexer
|
||||
lex.Init(s)
|
||||
|
||||
var tokens []string
|
||||
for {
|
||||
if err := lex.Next(); err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if isEOF(lex.Token) {
|
||||
break
|
||||
}
|
||||
tokens = append(tokens, lex.Token)
|
||||
}
|
||||
if !reflect.DeepEqual(tokens, expectedTokens) {
|
||||
t.Fatalf("unexected tokens\ngot\n%q\nwant\n%q", tokens, expectedTokens)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLexerError(t *testing.T) {
|
||||
// Invalid identifier
|
||||
testLexerError(t, ".foo")
|
||||
|
||||
// Incomplete string
|
||||
testLexerError(t, `"foobar`)
|
||||
testLexerError(t, `'`)
|
||||
testLexerError(t, "`")
|
||||
|
||||
// Unrecognized char
|
||||
testLexerError(t, "тест")
|
||||
|
||||
// Invalid numbers
|
||||
testLexerError(t, `.`)
|
||||
testLexerError(t, `123.`)
|
||||
testLexerError(t, `12e`)
|
||||
testLexerError(t, `1.2e`)
|
||||
testLexerError(t, `1.2E+`)
|
||||
testLexerError(t, `1.2E-`)
|
||||
}
|
||||
|
||||
func testLexerError(t *testing.T, s string) {
|
||||
t.Helper()
|
||||
|
||||
var lex lexer
|
||||
lex.Init(s)
|
||||
for {
|
||||
if err := lex.Next(); err != nil {
|
||||
// Expected error
|
||||
break
|
||||
}
|
||||
if isEOF(lex.Token) {
|
||||
t.Fatalf("expecting error during parse")
|
||||
}
|
||||
}
|
||||
|
||||
// Try calling Next again. It must return error.
|
||||
if err := lex.Next(); err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDurationSuccess(t *testing.T) {
|
||||
// Integer durations
|
||||
testDurationSuccess(t, "123s", 42, 123*1000)
|
||||
testDurationSuccess(t, "123m", 42, 123*60*1000)
|
||||
testDurationSuccess(t, "1h", 42, 1*60*60*1000)
|
||||
testDurationSuccess(t, "2d", 42, 2*24*60*60*1000)
|
||||
testDurationSuccess(t, "3w", 42, 3*7*24*60*60*1000)
|
||||
testDurationSuccess(t, "4y", 42, 4*365*24*60*60*1000)
|
||||
testDurationSuccess(t, "1i", 42*1000, 42*1000)
|
||||
testDurationSuccess(t, "3i", 42, 3*42)
|
||||
|
||||
// Float durations
|
||||
testDurationSuccess(t, "0.234s", 42, 234)
|
||||
testDurationSuccess(t, "1.5s", 42, 1.5*1000)
|
||||
testDurationSuccess(t, "1.5m", 42, 1.5*60*1000)
|
||||
testDurationSuccess(t, "1.2h", 42, 1.2*60*60*1000)
|
||||
testDurationSuccess(t, "1.1d", 42, 1.1*24*60*60*1000)
|
||||
testDurationSuccess(t, "1.1w", 42, 1.1*7*24*60*60*1000)
|
||||
testDurationSuccess(t, "1.3y", 42, 1.3*365*24*60*60*1000)
|
||||
testDurationSuccess(t, "0.1i", 12340, 0.1*12340)
|
||||
}
|
||||
|
||||
func testDurationSuccess(t *testing.T, s string, step, expectedD int64) {
|
||||
t.Helper()
|
||||
d, err := DurationValue(s, step)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
if d != expectedD {
|
||||
t.Fatalf("unexpected duration; got %d; want %d", d, expectedD)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDurationError(t *testing.T) {
|
||||
testDurationError(t, "")
|
||||
testDurationError(t, "foo")
|
||||
testDurationError(t, "m")
|
||||
testDurationError(t, "12")
|
||||
testDurationError(t, "1.23")
|
||||
testDurationError(t, "1.23mm")
|
||||
testDurationError(t, "123q")
|
||||
}
|
||||
|
||||
func testDurationError(t *testing.T, s string) {
|
||||
t.Helper()
|
||||
|
||||
if isDuration(s) {
|
||||
t.Fatalf("unexpected valud duration %q", s)
|
||||
}
|
||||
|
||||
d, err := DurationValue(s, 42)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error for duration %q", s)
|
||||
}
|
||||
if d != 0 {
|
||||
t.Fatalf("expecting zero duration; got %d", d)
|
||||
}
|
||||
}
|
1649
app/vmselect/promql/parser.go
Normal file
1649
app/vmselect/promql/parser.go
Normal file
File diff suppressed because it is too large
Load diff
677
app/vmselect/promql/parser_test.go
Normal file
677
app/vmselect/promql/parser_test.go
Normal file
|
@ -0,0 +1,677 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseMetricSelectorSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
||||
}
|
||||
if tfs == nil {
|
||||
t.Fatalf("expecting non-nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("foo")
|
||||
f(":foo")
|
||||
f(" :fo:bar.baz")
|
||||
f(`a{}`)
|
||||
f(`{foo="bar"}`)
|
||||
f(`{:f:oo=~"bar.+"}`)
|
||||
f(`foo {bar != "baz"}`)
|
||||
f(` foo { bar !~ "^ddd(x+)$", a="ss", __name__="sffd"} `)
|
||||
f(`(foo)`)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
if tfs != nil {
|
||||
t.Fatalf("expecting nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f(`{}`)
|
||||
f(`foo bar`)
|
||||
f(`foo+bar`)
|
||||
f(`sum(bar)`)
|
||||
f(`x{y}`)
|
||||
f(`x{y+z}`)
|
||||
f(`foo[5m]`)
|
||||
f(`foo offset 5m`)
|
||||
}
|
||||
|
||||
func TestParsePromQLSuccess(t *testing.T) {
|
||||
another := func(s string, sExpected string) {
|
||||
t.Helper()
|
||||
|
||||
e, err := parsePromQL(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
||||
}
|
||||
res := e.AppendString(nil)
|
||||
if string(res) != sExpected {
|
||||
t.Fatalf("unexpected string constructed;\ngot\n%q\nwant\n%q", res, sExpected)
|
||||
}
|
||||
}
|
||||
same := func(s string) {
|
||||
t.Helper()
|
||||
another(s, s)
|
||||
}
|
||||
|
||||
// metricExpr
|
||||
same(`{}`)
|
||||
same(`{}[5m]`)
|
||||
same(`{}[5m:]`)
|
||||
same(`{}[:]`)
|
||||
another(`{}[: ]`, `{}[:]`)
|
||||
same(`{}[:3s]`)
|
||||
another(`{}[: 3s ]`, `{}[:3s]`)
|
||||
same(`{}[5m:3s]`)
|
||||
another(`{}[ 5m : 3s ]`, `{}[5m:3s]`)
|
||||
same(`{} offset 5m`)
|
||||
same(`{}[5m] offset 10y`)
|
||||
same(`{}[5.3m:3.4s] offset 10y`)
|
||||
same(`{}[:3.4s] offset 10y`)
|
||||
same(`{Foo="bAR"}`)
|
||||
same(`{foo="bar"}`)
|
||||
same(`{foo="bar"}[5m]`)
|
||||
same(`{foo="bar"}[5m:]`)
|
||||
same(`{foo="bar"}[5m:3s]`)
|
||||
same(`{foo="bar"} offset 10y`)
|
||||
same(`{foo="bar"}[5m] offset 10y`)
|
||||
same(`{foo="bar"}[5m:3s] offset 10y`)
|
||||
another(`{foo="bar"}[5m] oFFSEt 10y`, `{foo="bar"}[5m] offset 10y`)
|
||||
same("METRIC")
|
||||
same("metric")
|
||||
same("m_e:tri44:_c123")
|
||||
another("-metric", "0 - metric")
|
||||
same(`metric offset 10h`)
|
||||
same("metric[5m]")
|
||||
same("metric[5m:3s]")
|
||||
same("metric[5m] offset 10h")
|
||||
same("metric[5m:3s] offset 10h")
|
||||
same("metric[5i:3i] offset 10i")
|
||||
same(`metric{foo="bar"}`)
|
||||
same(`metric{foo="bar"} offset 10h`)
|
||||
same(`metric{foo!="bar"}[2d]`)
|
||||
same(`metric{foo="bar"}[2d] offset 10h`)
|
||||
same(`metric{foo="bar", b="sdfsdf"}[2d:3h] offset 10h`)
|
||||
another(` metric { foo = "bar" } [ 2d ] offset 10h `, `metric{foo="bar"}[2d] offset 10h`)
|
||||
// metric name matching keywords
|
||||
same("rate")
|
||||
same("RATE")
|
||||
same("by")
|
||||
same("BY")
|
||||
same("bool")
|
||||
same("BOOL")
|
||||
same("unless")
|
||||
same("UNLESS")
|
||||
same("Ignoring")
|
||||
same("with")
|
||||
same("WITH")
|
||||
same("With")
|
||||
// Metric filters ending with comma
|
||||
another(`m{foo="bar",}`, `m{foo="bar"}`)
|
||||
// String concat in tag value
|
||||
another(`m{foo="bar" + "baz"}`, `m{foo="barbaz"}`)
|
||||
|
||||
// Valid regexp
|
||||
same(`foo{bar=~"x"}`)
|
||||
same(`foo{bar=~"^x"}`)
|
||||
same(`foo{bar=~"^x$"}`)
|
||||
same(`foo{bar=~"^(a[bc]|d)$"}`)
|
||||
same(`foo{bar!~"x"}`)
|
||||
same(`foo{bar!~"^x"}`)
|
||||
same(`foo{bar!~"^x$"}`)
|
||||
same(`foo{bar!~"^(a[bc]|d)$"}`)
|
||||
|
||||
// stringExpr
|
||||
same(`""`)
|
||||
same(`"\n\t\r 12:{}[]()44"`)
|
||||
another(`''`, `""`)
|
||||
another("``", `""`)
|
||||
another(" `foo\"b'ar` ", "\"foo\\\"b'ar\"")
|
||||
another(` 'foo\'bar"BAZ' `, `"foo'bar\"BAZ"`)
|
||||
// string concat
|
||||
another(`"foo"+'bar'`, `"foobar"`)
|
||||
|
||||
// numberExpr
|
||||
same(`1`)
|
||||
same(`1.23`)
|
||||
same(`0.23`)
|
||||
same(`1.2e+45`)
|
||||
same(`1.2e-45`)
|
||||
same(`-1`)
|
||||
same(`-1.23`)
|
||||
same(`-0.23`)
|
||||
same(`-1.2e+45`)
|
||||
same(`-1.2e-45`)
|
||||
same(`-1.2e-45`)
|
||||
another(`12.5E34`, `1.25e+35`)
|
||||
another(`-.2`, `-0.2`)
|
||||
another(`-.2E-2`, `-0.002`)
|
||||
same(`NaN`)
|
||||
another(`Inf`, `+Inf`)
|
||||
another(`+Inf`, `+Inf`)
|
||||
another(`-Inf`, `-Inf`)
|
||||
|
||||
// binaryOpExpr
|
||||
another(`NaN + 2 *3 * Inf`, `NaN`)
|
||||
another(`Inf - Inf`, `NaN`)
|
||||
another(`Inf + Inf`, `+Inf`)
|
||||
another(`-m`, `0 - m`)
|
||||
same(`m + ignoring () n[5m]`)
|
||||
another(`M + IGNORING () N[5m]`, `M + ignoring () N[5m]`)
|
||||
same(`m + on (foo) n[5m]`)
|
||||
another(`m + ON (Foo) n[5m]`, `m + on (Foo) n[5m]`)
|
||||
same(`m + ignoring (a, b) n[5m]`)
|
||||
another(`1 or 2`, `1`)
|
||||
another(`1 and 2`, `1`)
|
||||
another(`1 unless 2`, `NaN`)
|
||||
another(`1 default 2`, `1`)
|
||||
another(`1 default NaN`, `1`)
|
||||
another(`NaN default 2`, `2`)
|
||||
another(`1 > 2`, `NaN`)
|
||||
another(`1 > bool 2`, `0`)
|
||||
another(`3 >= 2`, `3`)
|
||||
another(`3 <= bool 2`, `0`)
|
||||
another(`1 + -2 - 3`, `-4`)
|
||||
another(`1 / 0 + 2`, `+Inf`)
|
||||
another(`2 + -1 / 0`, `-Inf`)
|
||||
another(`-1 ^ 0.5`, `NaN`)
|
||||
another(`512.5 - (1 + 3) * (2 ^ 2) ^ 3`, `256.5`)
|
||||
another(`1 == bool 1 != bool 24 < bool 4 > bool -1`, `1`)
|
||||
another(`1 == bOOl 1 != BOOL 24 < Bool 4 > booL -1`, `1`)
|
||||
another(`m1+on(foo)group_left m2`, `m1 + on (foo) group_left () m2`)
|
||||
another(`M1+ON(FOO)GROUP_left M2`, `M1 + on (FOO) group_left () M2`)
|
||||
same(`m1 + on (foo) group_right () m2`)
|
||||
same(`m1 + on (foo, bar) group_right (x, y) m2`)
|
||||
another(`m1 + on (foo, bar,) group_right (x, y,) m2`, `m1 + on (foo, bar) group_right (x, y) m2`)
|
||||
same(`m1 == bool on (foo, bar) group_right (x, y) m2`)
|
||||
another(`5 - 1 + 3 * 2 ^ 2 ^ 3 - 2 OR Metric {Bar= "Baz", aaa!="bb",cc=~"dd" ,zz !~"ff" } `,
|
||||
`770 or Metric{Bar="Baz", aaa!="bb", cc=~"dd", zz!~"ff"}`)
|
||||
same(`"foo" + bar()`)
|
||||
same(`"foo" + bar{x="y"}`)
|
||||
same(`("foo"[3s] + bar{x="y"})[5m:3s] offset 10s`)
|
||||
same(`("foo"[3s] + bar{x="y"})[5i:3i] offset 10i`)
|
||||
same(`bar + "foo" offset 3s`)
|
||||
same(`bar + "foo" offset 3i`)
|
||||
another(`1+2 if 2>3`, `NaN`)
|
||||
another(`1+4 if 2<3`, `5`)
|
||||
another(`2+6 default 3 if 2>3`, `8`)
|
||||
another(`2+6 if 2>3 default NaN`, `NaN`)
|
||||
another(`42 if 3>2 if 2+2<5`, `42`)
|
||||
another(`42 if 3>2 if 2+2>=5`, `NaN`)
|
||||
another(`1+2 ifnot 2>3`, `3`)
|
||||
another(`1+4 ifnot 2<3`, `NaN`)
|
||||
another(`2+6 default 3 ifnot 2>3`, `8`)
|
||||
another(`2+6 ifnot 2>3 default NaN`, `8`)
|
||||
another(`42 if 3>2 ifnot 2+2<5`, `NaN`)
|
||||
another(`42 if 3>2 ifnot 2+2>=5`, `42`)
|
||||
|
||||
// parensExpr
|
||||
another(`(-foo + ((bar) / (baz))) + ((23))`, `((0 - foo) + (bar / baz)) + 23`)
|
||||
another(`(FOO + ((Bar) / (baZ))) + ((23))`, `(FOO + (Bar / baZ)) + 23`)
|
||||
same(`(foo, bar)`)
|
||||
another(`1+(foo, bar,)`, `1 + (foo, bar)`)
|
||||
another(`((foo(bar,baz)), (1+(2)+(3,4)+()))`, `(foo(bar, baz), (3 + (3, 4)) + ())`)
|
||||
same(`()`)
|
||||
|
||||
// funcExpr
|
||||
same(`f()`)
|
||||
another(`f(x,)`, `f(x)`)
|
||||
another(`-f()-Ff()`, `(0 - f()) - Ff()`)
|
||||
same(`F()`)
|
||||
another(`+F()`, `F()`)
|
||||
another(`++F()`, `F()`)
|
||||
another(`--F()`, `0 - (0 - F())`)
|
||||
same(`f(http_server_request)`)
|
||||
same(`f(http_server_request)[4s:5m] offset 10m`)
|
||||
same(`f(http_server_request)[4i:5i] offset 10i`)
|
||||
same(`F(HttpServerRequest)`)
|
||||
same(`f(job, foo)`)
|
||||
same(`F(Job, Foo)`)
|
||||
another(` FOO (bar) + f ( m ( ),ff(1 + ( 2.5)) ,M[5m ] , "ff" )`, `FOO(bar) + f(m(), ff(3.5), M[5m], "ff")`)
|
||||
// funcName matching keywords
|
||||
same(`by(2)`)
|
||||
same(`BY(2)`)
|
||||
same(`or(2)`)
|
||||
same(`OR(2)`)
|
||||
same(`bool(2)`)
|
||||
same(`BOOL(2)`)
|
||||
same(`rate(rate(m))`)
|
||||
same(`rate(rate(m[5m]))`)
|
||||
same(`rate(rate(m[5m])[1h:])`)
|
||||
same(`rate(rate(m[5m])[1h:3s])`)
|
||||
|
||||
// aggrFuncExpr
|
||||
same(`sum(http_server_request) by ()`)
|
||||
same(`sum(http_server_request) by (job)`)
|
||||
same(`sum(http_server_request) without (job, foo)`)
|
||||
another(`sum(x,y,) without (a,b,)`, `sum(x, y) without (a, b)`)
|
||||
another(`sum by () (xx)`, `sum(xx) by ()`)
|
||||
another(`sum by (s) (xx)[5s]`, `(sum(xx) by (s))[5s]`)
|
||||
another(`SUM BY (ZZ, aa) (XX)`, `sum(XX) by (ZZ, aa)`)
|
||||
another(`sum without (a, b) (xx,2+2)`, `sum(xx, 4) without (a, b)`)
|
||||
another(`Sum WIthout (a, B) (XX,2+2)`, `sum(XX, 4) without (a, B)`)
|
||||
same(`sum(a) or sum(b)`)
|
||||
same(`sum(a) by () or sum(b) without (x, y)`)
|
||||
same(`sum(a) + sum(b)`)
|
||||
same(`sum(x) * (1 + sum(a))`)
|
||||
|
||||
// All the above
|
||||
another(`Sum(Ff(M) * M{X=""}[5m] Offset 7m - 123, 35) BY (X, y) * F2("Test")`,
|
||||
`sum((Ff(M) * M{X=""}[5m] offset 7m) - 123, 35) by (X, y) * F2("Test")`)
|
||||
another(`# comment
|
||||
Sum(Ff(M) * M{X=""}[5m] Offset 7m - 123, 35) BY (X, y) # yet another comment
|
||||
* F2("Test")`,
|
||||
`sum((Ff(M) * M{X=""}[5m] offset 7m) - 123, 35) by (X, y) * F2("Test")`)
|
||||
|
||||
// withExpr
|
||||
another(`with () x`, `x`)
|
||||
another(`with (x=1,) x`, `1`)
|
||||
another(`with (x = m offset 5h) x + x`, `m offset 5h + m offset 5h`)
|
||||
another(`with (x = m offset 5i) x + x`, `m offset 5i + m offset 5i`)
|
||||
another(`with (foo = bar{x="x"}) 1`, `1`)
|
||||
another(`with (foo = bar{x="x"}) "x"`, `"x"`)
|
||||
another(`with (f="x") f`, `"x"`)
|
||||
another(`with (foo = bar{x="x"}) x{x="y"}`, `x{x="y"}`)
|
||||
another(`with (foo = bar{x="x"}) 1+1`, `2`)
|
||||
another(`with (foo = bar{x="x"}) f()`, `f()`)
|
||||
another(`with (foo = bar{x="x"}) sum(x)`, `sum(x)`)
|
||||
another(`with (foo = bar{x="x"}) baz{foo="bar"}`, `baz{foo="bar"}`)
|
||||
another(`with (foo = bar) baz`, `baz`)
|
||||
another(`with (foo = bar) foo + foo{a="b"}`, `bar + bar{a="b"}`)
|
||||
another(`with (foo = bar, bar=baz + f()) test`, `test`)
|
||||
another(`with (ct={job="test"}) a{ct} + ct() + f({ct="x"})`, `(a{job="test"} + {job="test"}) + f({ct="x"})`)
|
||||
another(`with (ct={job="test", i="bar"}) ct + {ct, x="d"} + foo{ct, ct} + ctx(1)`,
|
||||
`(({job="test", i="bar"} + {job="test", i="bar", x="d"}) + foo{job="test", i="bar"}) + ctx(1)`)
|
||||
another(`with (foo = bar) {__name__=~"foo"}`, `{__name__=~"foo"}`)
|
||||
another(`with (foo = bar) {__name__="foo"}`, `bar`)
|
||||
another(`with (foo = bar) {__name__="foo", x="y"}`, `bar{x="y"}`)
|
||||
another(`with (foo(bar) = {__name__!="bar"}) foo(x)`, `{__name__!="bar"}`)
|
||||
another(`with (foo(bar) = {__name__="bar"}) foo(x)`, `x`)
|
||||
// override ttf to something new.
|
||||
another(`with (ttf = a) ttf + b`, `a + b`)
|
||||
// override ttf to ru
|
||||
another(`with (ttf = ru(m, n)) ttf`, `(clamp_min(n - clamp_min(m, 0), 0) / clamp_min(n, 0)) * 100`)
|
||||
|
||||
// Verify withExpr recursion and forward reference
|
||||
another(`with (x = x+y, y = x+x) y ^ 2`, `((x + y) + (x + y)) ^ 2`)
|
||||
another(`with (f1(x)=f2(x), f2(x)=f1(x)^2) f1(foobar)`, `f2(foobar)`)
|
||||
another(`with (f1(x)=f2(x), f2(x)=f1(x)^2) f2(foobar)`, `f2(foobar) ^ 2`)
|
||||
|
||||
// Verify withExpr funcs
|
||||
another(`with (x() = y+1) x`, `y + 1`)
|
||||
another(`with (x(foo) = foo+1) x(a)`, `a + 1`)
|
||||
another(`with (x(a, b) = a + b) x(foo, bar)`, `foo + bar`)
|
||||
another(`with (x(a, b) = a + b) x(foo, x(1, 2))`, `foo + 3`)
|
||||
another(`with (x(a) = sum(a) by (b)) x(xx) / x(y)`, `sum(xx) by (b) / sum(y) by (b)`)
|
||||
another(`with (f(a,f,x)=ff(x,f,a)) f(f(x,y,z),1,2)`, `ff(2, 1, ff(z, y, x))`)
|
||||
another(`with (f(x)=1+f(x)) f(foo{bar="baz"})`, `1 + f(foo{bar="baz"})`)
|
||||
another(`with (a=foo, y=bar, f(a)= a+a+y) f(x)`, `(x + x) + bar`)
|
||||
another(`with (f(a, b) = m{a, b}) f({a="x", b="y"}, {c="d"})`, `m{a="x", b="y", c="d"}`)
|
||||
another(`with (xx={a="x"}, f(a, b) = m{a, b}) f({xx, b="y"}, {c="d"})`, `m{a="x", b="y", c="d"}`)
|
||||
another(`with (x() = {b="c"}) foo{x}`, `foo{b="c"}`)
|
||||
another(`with (f(x)=x{foo="bar"} offset 5m) f(m offset 10m)`, `(m{foo="bar"} offset 10m) offset 5m`)
|
||||
another(`with (f(x)=x{foo="bar",bas="a"}[5m]) f(m[10m] offset 3s)`, `(m{foo="bar", bas="a"}[10m] offset 3s)[5m]`)
|
||||
another(`with (f(x)=x{foo="bar"}[5m] offset 10m) f(m{x="y"})`, `m{x="y", foo="bar"}[5m] offset 10m`)
|
||||
another(`with (f(x)=x{foo="bar"}[5m] offset 10m) f({x="y", foo="bar", foo="bar"})`, `{x="y", foo="bar"}[5m] offset 10m`)
|
||||
another(`with (f(m, x)=m{x}[5m] offset 10m) f(foo, {})`, `foo[5m] offset 10m`)
|
||||
another(`with (f(m, x)=m{x, bar="baz"}[5m] offset 10m) f(foo, {})`, `foo{bar="baz"}[5m] offset 10m`)
|
||||
another(`with (f(x)=x[5m] offset 3s) f(foo[3m]+bar)`, `(foo[3m] + bar)[5m] offset 3s`)
|
||||
another(`with (f(x)=x[5m:3s] oFFsEt 1.5m) f(sum(s) by (a,b))`, `(sum(s) by (a, b))[5m:3s] offset 1.5m`)
|
||||
another(`with (x="a", y=x) y+"bc"`, `"abc"`)
|
||||
another(`with (x="a", y="b"+x) "we"+y+"z"+f()`, `"webaz" + f()`)
|
||||
another(`with (f(x) = m{foo=x+"y", bar="y"+x, baz=x} + x) f("qwe")`, `m{foo="qwey", bar="yqwe", baz="qwe"} + "qwe"`)
|
||||
|
||||
// Verify withExpr for aggr func modifiers
|
||||
another(`with (f(x) = sum(m) by (x)) f(foo)`, `sum(m) by (foo)`)
|
||||
another(`with (f(x) = sum(m) by (x)) f((foo, bar, foo))`, `sum(m) by (foo, bar)`)
|
||||
another(`with (f(x) = sum(m) without (x,y)) f((a, b))`, `sum(m) without (a, b, y)`)
|
||||
another(`with (f(x) = sum(m) without (y,x)) f((a, y))`, `sum(m) without (y, a)`)
|
||||
another(`with (f(x,y) = a + on (x,y) group_left (y,bar) b) f(foo,())`, `a + on (foo) group_left (bar) b`)
|
||||
another(`with (f(x,y) = a + on (x,y) group_left (y,bar) b) f((foo),())`, `a + on (foo) group_left (bar) b`)
|
||||
another(`with (f(x,y) = a + on (x,y) group_left (y,bar) b) f((foo,xx),())`, `a + on (foo, xx) group_left (bar) b`)
|
||||
|
||||
// Verify nested with exprs
|
||||
another(`with (f(x) = (with(x=y) x) + x) f(z)`, `y + z`)
|
||||
another(`with (x=foo) f(a, with (y=x) y)`, `f(a, foo)`)
|
||||
another(`with (x=foo) a * x + (with (y=x) y) / y`, `(a * foo) + (foo / y)`)
|
||||
another(`with (x = with (y = foo) y + x) x/x`, `(foo + x) / (foo + x)`)
|
||||
another(`with (
|
||||
x = {foo="bar"},
|
||||
q = m{x, y="1"},
|
||||
f(x) =
|
||||
with (
|
||||
z(y) = x + y * q
|
||||
)
|
||||
z(foo) / f(x)
|
||||
)
|
||||
f(a)`, `(a + (foo * m{foo="bar", y="1"})) / f(a)`)
|
||||
|
||||
// complex withExpr
|
||||
another(`WITH (
|
||||
treshold = (0.9),
|
||||
commonFilters = {job="cacher", instance=~"1.2.3.4"},
|
||||
hits = rate(cache{type="hit", commonFilters}[5m]),
|
||||
miss = rate(cache{type="miss", commonFilters}[5m]),
|
||||
sumByInstance(arg) = sum(arg) by (instance),
|
||||
hitRatio = sumByInstance(hits) / sumByInstance(hits + miss)
|
||||
)
|
||||
hitRatio < treshold`,
|
||||
`(sum(rate(cache{type="hit", job="cacher", instance=~"1.2.3.4"}[5m])) by (instance) / sum(rate(cache{type="hit", job="cacher", instance=~"1.2.3.4"}[5m]) + rate(cache{type="miss", job="cacher", instance=~"1.2.3.4"}[5m])) by (instance)) < 0.9`)
|
||||
another(`WITH (
|
||||
x2(x) = x^2,
|
||||
f(x, y) = x2(x) + x*y + x2(y)
|
||||
)
|
||||
f(a, 3)
|
||||
`, `((a ^ 2) + (a * 3)) + 9`)
|
||||
another(`WITH (
|
||||
x2(x) = x^2,
|
||||
f(x, y) = x2(x) + x*y + x2(y)
|
||||
)
|
||||
f(2, 3)
|
||||
`, `19`)
|
||||
another(`WITH (
|
||||
commonFilters = {instance="foo"},
|
||||
timeToFuckup(currv, maxv) = (maxv - currv) / rate(currv)
|
||||
)
|
||||
timeToFuckup(diskUsage{commonFilters}, maxDiskSize{commonFilters})`,
|
||||
`(maxDiskSize{instance="foo"} - diskUsage{instance="foo"}) / rate(diskUsage{instance="foo"})`)
|
||||
another(`WITH (
|
||||
commonFilters = {job="foo", instance="bar"},
|
||||
sumRate(m, cf) = sum(rate(m{cf})) by (job, instance),
|
||||
hitRate(hits, misses) = sumRate(hits, commonFilters) / (sumRate(hits, commonFilters) + sumRate(misses, commonFilters))
|
||||
)
|
||||
hitRate(cacheHits, cacheMisses)`,
|
||||
`sum(rate(cacheHits{job="foo", instance="bar"})) by (job, instance) / (sum(rate(cacheHits{job="foo", instance="bar"})) by (job, instance) + sum(rate(cacheMisses{job="foo", instance="bar"})) by (job, instance))`)
|
||||
another(`with(y=123,z=5) union(with(y=3,f(x)=x*y) f(2) + f(3), with(x=5,y=2) x*y*z)`, `union(15, 50)`)
|
||||
}
|
||||
|
||||
func TestParsePromQLError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
|
||||
e, err := parsePromQL(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
if e != nil {
|
||||
t.Fatalf("expecting nil expr when parsing %q", s)
|
||||
}
|
||||
}
|
||||
|
||||
// an empty string
|
||||
f("")
|
||||
f(" \t\b\r\n ")
|
||||
|
||||
// invalid metricExpr
|
||||
f(`{__name__="ff"} offset 55`)
|
||||
f(`{__name__="ff"} offset -5m`)
|
||||
f(`foo[55]`)
|
||||
f(`m[-5m]`)
|
||||
f(`{`)
|
||||
f(`foo{`)
|
||||
f(`foo{bar`)
|
||||
f(`foo{bar=`)
|
||||
f(`foo{bar="baz"`)
|
||||
f(`foo{bar="baz", `)
|
||||
f(`foo{123="23"}`)
|
||||
f(`foo{foo}`)
|
||||
f(`foo{,}`)
|
||||
f(`foo{,foo="bar"}`)
|
||||
f(`foo{foo=}`)
|
||||
f(`foo{foo="ba}`)
|
||||
f(`foo{"foo"="bar"}`)
|
||||
f(`foo{$`)
|
||||
f(`foo{a $`)
|
||||
f(`foo{a="b",$`)
|
||||
f(`foo{a="b"}$`)
|
||||
f(`[`)
|
||||
f(`[]`)
|
||||
f(`f[5m]$`)
|
||||
f(`[5m]`)
|
||||
f(`[5m] offset 4h`)
|
||||
f(`m[5m] offset $`)
|
||||
f(`m[5m] offset 5h $`)
|
||||
f(`m[]`)
|
||||
f(`m[-5m]`)
|
||||
f(`m[5m:`)
|
||||
f(`m[5m:-`)
|
||||
f(`m[5m:-1`)
|
||||
f(`m[5m:-1]`)
|
||||
f(`m[:`)
|
||||
f(`m[:-`)
|
||||
f(`m[:1]`)
|
||||
f(`m[:-1m]`)
|
||||
f(`m[5]`)
|
||||
f(`m[[5m]]`)
|
||||
f(`m[foo]`)
|
||||
f(`m["ff"]`)
|
||||
f(`m[10m`)
|
||||
f(`m[123`)
|
||||
f(`m["ff`)
|
||||
f(`m[(f`)
|
||||
f(`fd}`)
|
||||
f(`]`)
|
||||
f(`m $`)
|
||||
f(`m{,}`)
|
||||
f(`m{x=y}`)
|
||||
f(`m{x=y/5}`)
|
||||
f(`m{x=y+5}`)
|
||||
|
||||
// Invalid regexp
|
||||
f(`foo{bar=~"x["}`)
|
||||
f(`foo{bar=~"x("}`)
|
||||
f(`foo{bar=~"x)"}`)
|
||||
f(`foo{bar!~"x["}`)
|
||||
f(`foo{bar!~"x("}`)
|
||||
f(`foo{bar!~"x)"}`)
|
||||
|
||||
// invalid stringExpr
|
||||
f(`'`)
|
||||
f(`"`)
|
||||
f("`")
|
||||
f(`"foo`)
|
||||
f(`'foo`)
|
||||
f("`foo")
|
||||
f(`"foo\"bar`)
|
||||
f(`'foo\'bar`)
|
||||
f("`foo\\`bar")
|
||||
f(`"" $`)
|
||||
f(`"foo" +`)
|
||||
f(`n{"foo" + m`)
|
||||
|
||||
// invalid numberExpr
|
||||
f(`12.`)
|
||||
f(`1.2e`)
|
||||
f(`23e-`)
|
||||
f(`23E+`)
|
||||
f(`.`)
|
||||
f(`-12.`)
|
||||
f(`-1.2e`)
|
||||
f(`-23e-`)
|
||||
f(`-23E+`)
|
||||
f(`-.`)
|
||||
f(`-1$$`)
|
||||
f(`-$$`)
|
||||
f(`+$$`)
|
||||
f(`23 $$`)
|
||||
|
||||
// invalid binaryOpExpr
|
||||
f(`+`)
|
||||
f(`1 +`)
|
||||
f(`1 + 2.`)
|
||||
f(`3 unless`)
|
||||
f(`23 + on (foo)`)
|
||||
f(`m + on (,) m`)
|
||||
f(`3 * ignoring`)
|
||||
f(`m * on (`)
|
||||
f(`m * on (foo`)
|
||||
f(`m * on (foo,`)
|
||||
f(`m * on (foo,)`)
|
||||
f(`m * on (,foo)`)
|
||||
f(`m * on (,)`)
|
||||
f(`m == bool (bar) baz`)
|
||||
f(`m == bool () baz`)
|
||||
f(`m * by (baz) n`)
|
||||
f(`m + bool group_left m2`)
|
||||
f(`m + on () group_left (`)
|
||||
f(`m + on () group_left (,`)
|
||||
f(`m + on () group_left (,foo`)
|
||||
f(`m + on () group_left (foo,)`)
|
||||
f(`m + on () group_left (,foo)`)
|
||||
f(`m + on () group_left (foo)`)
|
||||
f(`m + on () group_right (foo) (m`)
|
||||
f(`m or ignoring () group_left () n`)
|
||||
f(`1 + bool 2`)
|
||||
f(`m % bool n`)
|
||||
f(`m * bool baz`)
|
||||
f(`M * BOoL BaZ`)
|
||||
f(`foo unless ignoring (bar) group_left xxx`)
|
||||
f(`foo or bool bar`)
|
||||
f(`foo == bool $$`)
|
||||
f(`"foo" + bar`)
|
||||
|
||||
// invalid parensExpr
|
||||
f(`(`)
|
||||
f(`($`)
|
||||
f(`(+`)
|
||||
f(`(1`)
|
||||
f(`(m+`)
|
||||
f(`1)`)
|
||||
f(`(,)`)
|
||||
f(`(1)$`)
|
||||
|
||||
// invalid funcExpr
|
||||
f(`f $`)
|
||||
f(`f($)`)
|
||||
f(`f[`)
|
||||
f(`f()$`)
|
||||
f(`f(`)
|
||||
f(`f(foo`)
|
||||
f(`f(f,`)
|
||||
f(`f(,`)
|
||||
f(`f(,)`)
|
||||
f(`f(,foo)`)
|
||||
f(`f(,foo`)
|
||||
f(`f(foo,$`)
|
||||
f(`f() by (a)`)
|
||||
f(`f without (x) (y)`)
|
||||
f(`f() foo (a)`)
|
||||
f(`f bar (x) (b)`)
|
||||
f(`f bar (x)`)
|
||||
|
||||
// invalid aggrFuncExpr
|
||||
f(`sum(`)
|
||||
f(`sum $`)
|
||||
f(`sum [`)
|
||||
f(`sum($)`)
|
||||
f(`sum()$`)
|
||||
f(`sum(foo) ba`)
|
||||
f(`sum(foo) ba()`)
|
||||
f(`sum(foo) by`)
|
||||
f(`sum(foo) without x`)
|
||||
f(`sum(foo) aaa`)
|
||||
f(`sum(foo) aaa x`)
|
||||
f(`sum() by $`)
|
||||
f(`sum() by (`)
|
||||
f(`sum() by ($`)
|
||||
f(`sum() by (a`)
|
||||
f(`sum() by (a $`)
|
||||
f(`sum() by (a ]`)
|
||||
f(`sum() by (a)$`)
|
||||
f(`sum() by (,`)
|
||||
f(`sum() by (a,$`)
|
||||
f(`sum() by (,)`)
|
||||
f(`sum() by (,a`)
|
||||
f(`sum() by (,a)`)
|
||||
f(`sum() on (b)`)
|
||||
f(`sum() bool`)
|
||||
f(`sum() group_left`)
|
||||
f(`sum() group_right(x)`)
|
||||
f(`sum ba`)
|
||||
f(`sum ba ()`)
|
||||
f(`sum by (`)
|
||||
f(`sum by (a`)
|
||||
f(`sum by (,`)
|
||||
f(`sum by (,)`)
|
||||
f(`sum by (,a`)
|
||||
f(`sum by (,a)`)
|
||||
f(`sum by (a)`)
|
||||
f(`sum by (a) (`)
|
||||
f(`sum by (a) [`)
|
||||
f(`sum by (a) {`)
|
||||
f(`sum by (a) (b`)
|
||||
f(`sum by (a) (b,`)
|
||||
f(`sum by (a) (,)`)
|
||||
f(`avg by (a) (,b)`)
|
||||
f(`sum by (x) (y) by (z)`)
|
||||
f(`sum(m) by (1)`)
|
||||
|
||||
// invalid withExpr
|
||||
f(`with $`)
|
||||
f(`with a`)
|
||||
f(`with a=b c`)
|
||||
f(`with (`)
|
||||
f(`with (x=b)$`)
|
||||
f(`with ($`)
|
||||
f(`with (foo`)
|
||||
f(`with (foo $`)
|
||||
f(`with (x y`)
|
||||
f(`with (x =`)
|
||||
f(`with (x = $`)
|
||||
f(`with (x= y`)
|
||||
f(`with (x= y $`)
|
||||
f(`with (x= y)`)
|
||||
f(`with (x=(`)
|
||||
f(`with (x=[)`)
|
||||
f(`with (x=() x)`)
|
||||
f(`with ($$)`)
|
||||
f(`with (x $$`)
|
||||
f(`with (x = $$)`)
|
||||
f(`with (x = foo) bar{x}`)
|
||||
f(`with (x = {foo="bar"}[5m]) bar{x}`)
|
||||
f(`with (x = {foo="bar"} offset 5m) bar{x}`)
|
||||
f(`with (x = a, x = b) c`)
|
||||
f(`with (x(a, a) = b) c`)
|
||||
f(`with (x=m{f="x"}) foo{x}`)
|
||||
f(`with (sum = x) y`)
|
||||
f(`with (rate(a) = b) c`)
|
||||
f(`with (clamp_min=x) y`)
|
||||
f(`with (f()`)
|
||||
f(`with (a=b c=d) e`)
|
||||
f(`with (f(x)=x^2) m{x}`)
|
||||
f(`with (f(x)=ff()) m{x}`)
|
||||
f(`with (f(x`)
|
||||
f(`with (x=m) a{x} + b`)
|
||||
f(`with (x=m) b + a{x}`)
|
||||
f(`with (x=m) f(b, a{x})`)
|
||||
f(`with (x=m) sum(a{x})`)
|
||||
f(`with (x=m) (a{x})`)
|
||||
f(`with (f(a)=a) f`)
|
||||
f(`with (f(x)=x{foo="bar"}) f(1)`)
|
||||
f(`with (f(x)=x{foo="bar"}) f(m + n)`)
|
||||
f(`with (f = with`)
|
||||
f(`with (,)`)
|
||||
f(`with (1) 2`)
|
||||
f(`with (f(1)=2) 3`)
|
||||
f(`with (f(,)=x) x`)
|
||||
f(`with (x(a) = {b="c"}) foo{x}`)
|
||||
f(`with (f(x) = m{foo=xx}) f("qwe")`)
|
||||
f(`a + with(f(x)=x) f`)
|
||||
f(`with (f(x) = x, y = sum(m) by (f)) y`)
|
||||
f(`with (f(x) = sum(m) by (x)) f({foo="bar"})`)
|
||||
f(`with (f(x) = sum(m) by (x)) f((xx(), {foo="bar"}))`)
|
||||
f(`with (f(x) = m + on (x) n) f(xx())`)
|
||||
f(`with (f(x) = m + on (a) group_right (x) n) f(xx())`)
|
||||
}
|
102
app/vmselect/promql/regexp_cache.go
Normal file
102
app/vmselect/promql/regexp_cache.go
Normal file
|
@ -0,0 +1,102 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
func compileRegexpAnchored(re string) (*regexp.Regexp, error) {
|
||||
rcv := regexpCacheV.Get(re)
|
||||
if rcv != nil {
|
||||
return rcv.r, rcv.err
|
||||
}
|
||||
regexAnchored := fmt.Sprintf("^(?:%s)$", re)
|
||||
r, err := regexp.Compile(regexAnchored)
|
||||
rcv = ®expCacheValue{
|
||||
r: r,
|
||||
err: err,
|
||||
}
|
||||
regexpCacheV.Put(re, rcv)
|
||||
return rcv.r, rcv.err
|
||||
}
|
||||
|
||||
var regexpCacheV = func() *regexpCache {
|
||||
rc := ®expCache{
|
||||
m: make(map[string]*regexpCacheValue),
|
||||
}
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="promql/regexp"}`, func() float64 {
|
||||
return float64(rc.Requests())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="promql/regexp"}`, func() float64 {
|
||||
return float64(rc.Misses())
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="promql/regexp"}`, func() float64 {
|
||||
return float64(rc.Len())
|
||||
})
|
||||
return rc
|
||||
}()
|
||||
|
||||
const regexpCacheMaxLen = 10e3
|
||||
|
||||
type regexpCacheValue struct {
|
||||
r *regexp.Regexp
|
||||
err error
|
||||
}
|
||||
|
||||
type regexpCache struct {
|
||||
m map[string]*regexpCacheValue
|
||||
mu sync.RWMutex
|
||||
|
||||
requests uint64
|
||||
misses uint64
|
||||
}
|
||||
|
||||
func (rc *regexpCache) Requests() uint64 {
|
||||
return atomic.LoadUint64(&rc.requests)
|
||||
}
|
||||
|
||||
func (rc *regexpCache) Misses() uint64 {
|
||||
return atomic.LoadUint64(&rc.misses)
|
||||
}
|
||||
|
||||
func (rc *regexpCache) Len() uint64 {
|
||||
rc.mu.RLock()
|
||||
n := len(rc.m)
|
||||
rc.mu.RUnlock()
|
||||
return uint64(n)
|
||||
}
|
||||
|
||||
func (rc *regexpCache) Get(regexp string) *regexpCacheValue {
|
||||
atomic.AddUint64(&rc.requests, 1)
|
||||
|
||||
rc.mu.RLock()
|
||||
rcv := rc.m[regexp]
|
||||
rc.mu.RUnlock()
|
||||
|
||||
if rc == nil {
|
||||
atomic.AddUint64(&rc.misses, 1)
|
||||
}
|
||||
return rcv
|
||||
}
|
||||
|
||||
func (rc *regexpCache) Put(regexp string, rcv *regexpCacheValue) {
|
||||
rc.mu.Lock()
|
||||
overflow := len(rc.m) - regexpCacheMaxLen
|
||||
if overflow > 0 {
|
||||
// Remove 10% of items from the cache.
|
||||
overflow = int(float64(len(rc.m)) * 0.1)
|
||||
for k := range rc.m {
|
||||
delete(rc.m, k)
|
||||
overflow--
|
||||
if overflow <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
rc.m[regexp] = rcv
|
||||
rc.mu.Unlock()
|
||||
}
|
763
app/vmselect/promql/rollup.go
Normal file
763
app/vmselect/promql/rollup.go
Normal file
|
@ -0,0 +1,763 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/valyala/histogram"
|
||||
)
|
||||
|
||||
var rollupFuncs = map[string]newRollupFunc{
|
||||
"default_rollup": newRollupFuncOneArg(rollupDefault), // default rollup func
|
||||
|
||||
// Standard rollup funcs from PromQL.
|
||||
// See funcs accepting range-vector on https://prometheus.io/docs/prometheus/latest/querying/functions/ .
|
||||
"changes": newRollupFuncOneArg(rollupChanges),
|
||||
"delta": newRollupFuncOneArg(rollupDelta),
|
||||
"deriv": newRollupFuncOneArg(rollupDeriv),
|
||||
"holt_winters": newRollupHoltWinters,
|
||||
"idelta": newRollupFuncOneArg(rollupIdelta),
|
||||
"increase": newRollupFuncOneArg(rollupDelta), // + rollupFuncsRemoveCounterResets
|
||||
"irate": newRollupFuncOneArg(rollupIderiv), // + rollupFuncsRemoveCounterResets
|
||||
"predict_linear": newRollupPredictLinear,
|
||||
"rate": newRollupFuncOneArg(rollupDeriv), // + rollupFuncsRemoveCounterResets
|
||||
"resets": newRollupFuncOneArg(rollupResets),
|
||||
"avg_over_time": newRollupFuncOneArg(rollupAvg),
|
||||
"min_over_time": newRollupFuncOneArg(rollupMin),
|
||||
"max_over_time": newRollupFuncOneArg(rollupMax),
|
||||
"sum_over_time": newRollupFuncOneArg(rollupSum),
|
||||
"count_over_time": newRollupFuncOneArg(rollupCount),
|
||||
"quantile_over_time": newRollupQuantile,
|
||||
"stddev_over_time": newRollupFuncOneArg(rollupStddev),
|
||||
"stdvar_over_time": newRollupFuncOneArg(rollupStdvar),
|
||||
|
||||
// Additional rollup funcs.
|
||||
"first_over_time": newRollupFuncOneArg(rollupFirst),
|
||||
"last_over_time": newRollupFuncOneArg(rollupLast),
|
||||
"distinct_over_time": newRollupFuncOneArg(rollupDistinct),
|
||||
"integrate": newRollupFuncOneArg(rollupIntegrate),
|
||||
"ideriv": newRollupFuncOneArg(rollupIderiv),
|
||||
"rollup": newRollupFuncOneArg(rollupFake),
|
||||
"rollup_rate": newRollupFuncOneArg(rollupFake), // + rollupFuncsRemoveCounterResets
|
||||
"rollup_deriv": newRollupFuncOneArg(rollupFake),
|
||||
"rollup_delta": newRollupFuncOneArg(rollupFake),
|
||||
"rollup_increase": newRollupFuncOneArg(rollupFake), // + rollupFuncsRemoveCounterResets
|
||||
}
|
||||
|
||||
var rollupFuncsRemoveCounterResets = map[string]bool{
|
||||
"increase": true,
|
||||
"irate": true,
|
||||
"rate": true,
|
||||
"rollup_rate": true,
|
||||
"rollup_increase": true,
|
||||
}
|
||||
|
||||
var rollupFuncsKeepMetricGroup = map[string]bool{
|
||||
"default_rollup": true,
|
||||
"avg_over_time": true,
|
||||
"min_over_time": true,
|
||||
"max_over_time": true,
|
||||
"quantile_over_time": true,
|
||||
"rollup": true,
|
||||
}
|
||||
|
||||
func getRollupArgIdx(funcName string) int {
|
||||
funcName = strings.ToLower(funcName)
|
||||
if rollupFuncs[funcName] == nil {
|
||||
logger.Panicf("BUG: getRollupArgIdx is called for non-rollup func %q", funcName)
|
||||
}
|
||||
if funcName == "quantile_over_time" {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func getRollupFunc(funcName string) newRollupFunc {
|
||||
funcName = strings.ToLower(funcName)
|
||||
return rollupFuncs[funcName]
|
||||
}
|
||||
|
||||
func isRollupFunc(funcName string) bool {
|
||||
return getRollupFunc(funcName) != nil
|
||||
}
|
||||
|
||||
type rollupFuncArg struct {
|
||||
prevValue float64
|
||||
prevTimestamp int64
|
||||
values []float64
|
||||
timestamps []int64
|
||||
|
||||
idx int
|
||||
step int64
|
||||
}
|
||||
|
||||
func (rfa *rollupFuncArg) reset() {
|
||||
rfa.prevValue = 0
|
||||
rfa.prevTimestamp = 0
|
||||
rfa.values = nil
|
||||
rfa.timestamps = nil
|
||||
rfa.idx = 0
|
||||
rfa.step = 0
|
||||
}
|
||||
|
||||
// rollupFunc must return rollup value for the given rfa.
|
||||
//
|
||||
// prevValue may be nan, values and timestamps may be empty.
|
||||
type rollupFunc func(rfa *rollupFuncArg) float64
|
||||
|
||||
type rollupConfig struct {
|
||||
// This tag value must be added to "rollup" tag if non-empty.
|
||||
TagValue string
|
||||
|
||||
Func rollupFunc
|
||||
Start int64
|
||||
End int64
|
||||
Step int64
|
||||
Window int64
|
||||
|
||||
Timestamps []int64
|
||||
}
|
||||
|
||||
var (
|
||||
nan = math.NaN()
|
||||
inf = math.Inf(1)
|
||||
)
|
||||
|
||||
// The maximum interval without previous rows.
|
||||
const maxSilenceInterval = 5 * 60 * 1000
|
||||
|
||||
// Do calculates rollups for the given timestamps and values, appends
|
||||
// them to dstValues and returns results.
|
||||
//
|
||||
// rc.Timestamps are used as timestamps for dstValues.
|
||||
//
|
||||
// timestamps must cover time range [rc.Start - rc.Window - maxSilenceInterval ... rc.End + rc.Step].
|
||||
//
|
||||
// Cannot be called from concurrent goroutines.
|
||||
func (rc *rollupConfig) Do(dstValues []float64, values []float64, timestamps []int64) []float64 {
|
||||
// Sanity checks.
|
||||
if rc.Step <= 0 {
|
||||
logger.Panicf("BUG: Step must be bigger than 0; got %d", rc.Step)
|
||||
}
|
||||
if rc.Start > rc.End {
|
||||
logger.Panicf("BUG: Start cannot exceed End; got %d vs %d", rc.Start, rc.End)
|
||||
}
|
||||
if rc.Window < 0 {
|
||||
logger.Panicf("BUG: Window must be non-negative; got %d", rc.Window)
|
||||
}
|
||||
if err := ValidateMaxPointsPerTimeseries(rc.Start, rc.End, rc.Step); err != nil {
|
||||
logger.Panicf("BUG: %s; this must be validated before the call to rollupConfig.Do", err)
|
||||
}
|
||||
|
||||
// Extend dstValues in order to remove mallocs below.
|
||||
dstValues = decimal.ExtendFloat64sCapacity(dstValues, len(rc.Timestamps))
|
||||
|
||||
maxPrevInterval := getMaxPrevInterval(timestamps)
|
||||
window := rc.Window
|
||||
if window <= 0 {
|
||||
window = rc.Step
|
||||
}
|
||||
if window < maxPrevInterval {
|
||||
window = maxPrevInterval
|
||||
}
|
||||
rfa := getRollupFuncArg()
|
||||
rfa.idx = 0
|
||||
rfa.step = rc.Step
|
||||
|
||||
i := 0
|
||||
j := 0
|
||||
for _, ts := range rc.Timestamps {
|
||||
tEnd := ts + rc.Step
|
||||
tStart := tEnd - window
|
||||
n := sort.Search(len(timestamps)-i, func(n int) bool {
|
||||
return timestamps[i+n] > tStart
|
||||
})
|
||||
i += n
|
||||
if j < i {
|
||||
j = i
|
||||
}
|
||||
n = sort.Search(len(timestamps)-j, func(n int) bool {
|
||||
return timestamps[j+n] > tEnd
|
||||
})
|
||||
j += n
|
||||
|
||||
rfa.prevValue = nan
|
||||
rfa.prevTimestamp = tStart - maxPrevInterval
|
||||
if i > 0 && timestamps[i-1] > rfa.prevTimestamp {
|
||||
rfa.prevValue = values[i-1]
|
||||
rfa.prevTimestamp = timestamps[i-1]
|
||||
}
|
||||
|
||||
rfa.values = values[i:j]
|
||||
rfa.timestamps = timestamps[i:j]
|
||||
value := rc.Func(rfa)
|
||||
rfa.idx++
|
||||
dstValues = append(dstValues, value)
|
||||
}
|
||||
putRollupFuncArg(rfa)
|
||||
|
||||
return dstValues
|
||||
}
|
||||
|
||||
func getMaxPrevInterval(timestamps []int64) int64 {
|
||||
if len(timestamps) < 2 {
|
||||
return int64(maxSilenceInterval)
|
||||
}
|
||||
d := (timestamps[len(timestamps)-1] - timestamps[0]) / int64(len(timestamps)-1)
|
||||
if d <= 0 {
|
||||
return 1
|
||||
}
|
||||
// Slightly increase d in order to handle possible jitter in scrape interval.
|
||||
return d + (d / 16)
|
||||
}
|
||||
|
||||
func removeCounterResets(values []float64) {
|
||||
// There is no need in handling NaNs here, since they are impossible
|
||||
// on values from vmstorage.
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
var correction float64
|
||||
prevValue := values[0]
|
||||
for i, v := range values {
|
||||
d := v - prevValue
|
||||
if d < 0 {
|
||||
if (-d * 8) < prevValue {
|
||||
// This is likely jitter from `Prometheus HA pairs`.
|
||||
// Just substitute v with prevValue.
|
||||
v = prevValue
|
||||
} else {
|
||||
correction += prevValue
|
||||
}
|
||||
}
|
||||
prevValue = v
|
||||
values[i] = v + correction
|
||||
}
|
||||
}
|
||||
|
||||
func deltaValues(values []float64) {
|
||||
// There is no need in handling NaNs here, since they are impossible
|
||||
// on values from vmstorage.
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
prevValue := values[0]
|
||||
for i, v := range values[1:] {
|
||||
values[i] = v - prevValue
|
||||
prevValue = v
|
||||
}
|
||||
values[len(values)-1] = nan
|
||||
}
|
||||
|
||||
func derivValues(values []float64, timestamps []int64) {
|
||||
// There is no need in handling NaNs here, since they are impossible
|
||||
// on values from vmstorage.
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
prevValue := values[0]
|
||||
prevTs := timestamps[0]
|
||||
for i, v := range values[1:] {
|
||||
ts := timestamps[i+1]
|
||||
dt := float64(ts-prevTs) * 1e-3
|
||||
values[i] = (v - prevValue) / dt
|
||||
prevValue = v
|
||||
prevTs = ts
|
||||
}
|
||||
values[len(values)-1] = nan
|
||||
}
|
||||
|
||||
type newRollupFunc func(args []interface{}) (rollupFunc, error)
|
||||
|
||||
func newRollupFuncOneArg(rf rollupFunc) newRollupFunc {
|
||||
return func(args []interface{}) (rollupFunc, error) {
|
||||
if err := expectRollupArgsNum(args, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rf, nil
|
||||
}
|
||||
}
|
||||
|
||||
func newRollupHoltWinters(args []interface{}) (rollupFunc, error) {
|
||||
if err := expectRollupArgsNum(args, 3); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sfs, err := getScalar(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tfs, err := getScalar(args[2], 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rf := func(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
sf := sfs[rfa.idx]
|
||||
if sf <= 0 || sf >= 1 {
|
||||
return nan
|
||||
}
|
||||
tf := tfs[rfa.idx]
|
||||
if tf <= 0 || tf >= 1 {
|
||||
return nan
|
||||
}
|
||||
|
||||
// See https://en.wikipedia.org/wiki/Exponential_smoothing#Double_exponential_smoothing .
|
||||
// TODO: determine whether this shit really works.
|
||||
s0 := rfa.prevValue
|
||||
if math.IsNaN(s0) {
|
||||
s0 = values[0]
|
||||
values = values[1:]
|
||||
if len(values) == 0 {
|
||||
return s0
|
||||
}
|
||||
}
|
||||
b0 := values[0] - s0
|
||||
for _, v := range values {
|
||||
s1 := sf*v + (1-sf)*(s0+b0)
|
||||
b1 := tf*(s1-s0) + (1-tf)*b0
|
||||
s0 = s1
|
||||
b0 = b1
|
||||
}
|
||||
return s0
|
||||
}
|
||||
return rf, nil
|
||||
}
|
||||
|
||||
func newRollupPredictLinear(args []interface{}) (rollupFunc, error) {
|
||||
if err := expectRollupArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secs, err := getScalar(args[1], 1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rf := func(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
timestamps := rfa.timestamps
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
|
||||
// See https://en.wikipedia.org/wiki/Simple_linear_regression#Numerical_example
|
||||
// TODO: determine whether this shit really works.
|
||||
tFirst := rfa.prevTimestamp
|
||||
vSum := rfa.prevValue
|
||||
if math.IsNaN(rfa.prevValue) {
|
||||
tFirst = timestamps[0]
|
||||
vSum = 0
|
||||
}
|
||||
tSum := float64(0)
|
||||
tvSum := float64(0)
|
||||
ttSum := float64(0)
|
||||
for i, v := range values {
|
||||
dt := float64(timestamps[i]-tFirst) * 1e-3
|
||||
vSum += v
|
||||
tSum += dt
|
||||
tvSum += dt * v
|
||||
ttSum += dt * dt
|
||||
}
|
||||
n := float64(len(values))
|
||||
k := (n*tvSum - tSum*vSum) / (n*ttSum - tSum*tSum)
|
||||
v := (vSum - k*tSum) / n
|
||||
sec := secs[rfa.idx]
|
||||
return v + k*sec
|
||||
}
|
||||
return rf, nil
|
||||
}
|
||||
|
||||
func newRollupQuantile(args []interface{}) (rollupFunc, error) {
|
||||
if err := expectRollupArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
phis, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rf := func(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
hf := histogram.GetFast()
|
||||
for _, v := range values {
|
||||
hf.Update(v)
|
||||
}
|
||||
phi := phis[rfa.idx]
|
||||
qv := hf.Quantile(phi)
|
||||
histogram.PutFast(hf)
|
||||
return qv
|
||||
}
|
||||
return rf, nil
|
||||
}
|
||||
|
||||
func rollupAvg(rfa *rollupFuncArg) float64 {
|
||||
// Do not use `Rapid calculation methods` at https://en.wikipedia.org/wiki/Standard_deviation,
|
||||
// since it is slower and has no significant benefits in precision.
|
||||
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
var sum float64
|
||||
for _, v := range values {
|
||||
sum += v
|
||||
}
|
||||
return sum / float64(len(values))
|
||||
}
|
||||
|
||||
func rollupMin(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
minValue := values[0]
|
||||
for _, v := range values {
|
||||
if v < minValue {
|
||||
minValue = v
|
||||
}
|
||||
}
|
||||
return minValue
|
||||
}
|
||||
|
||||
func rollupMax(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
maxValue := values[0]
|
||||
for _, v := range values {
|
||||
if v > maxValue {
|
||||
maxValue = v
|
||||
}
|
||||
}
|
||||
return maxValue
|
||||
}
|
||||
|
||||
func rollupSum(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
var sum float64
|
||||
for _, v := range values {
|
||||
sum += v
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func rollupCount(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
return float64(len(values))
|
||||
}
|
||||
|
||||
func rollupStddev(rfa *rollupFuncArg) float64 {
|
||||
stdvar := rollupStdvar(rfa)
|
||||
return math.Sqrt(stdvar)
|
||||
}
|
||||
|
||||
func rollupStdvar(rfa *rollupFuncArg) float64 {
|
||||
// See `Rapid calculation methods` at https://en.wikipedia.org/wiki/Standard_deviation
|
||||
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
var avg float64
|
||||
var count float64
|
||||
var q float64
|
||||
for _, v := range values {
|
||||
count++
|
||||
avgNew := avg + (v-avg)/count
|
||||
q += (v - avg) * (v - avgNew)
|
||||
avg = avgNew
|
||||
}
|
||||
return q / count
|
||||
}
|
||||
|
||||
func rollupDelta(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
prevValue := rfa.prevValue
|
||||
if math.IsNaN(prevValue) {
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
prevValue = values[0]
|
||||
values = values[1:]
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
return values[len(values)-1] - prevValue
|
||||
}
|
||||
|
||||
func rollupIdelta(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
lastValue := values[len(values)-1]
|
||||
values = values[:len(values)-1]
|
||||
if len(values) == 0 {
|
||||
prevValue := rfa.prevValue
|
||||
if math.IsNaN(prevValue) {
|
||||
return nan
|
||||
}
|
||||
return lastValue - prevValue
|
||||
}
|
||||
return lastValue - values[len(values)-1]
|
||||
}
|
||||
|
||||
func rollupDeriv(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
timestamps := rfa.timestamps
|
||||
prevValue := rfa.prevValue
|
||||
prevTimestamp := rfa.prevTimestamp
|
||||
if math.IsNaN(prevValue) {
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
prevValue = values[0]
|
||||
prevTimestamp = timestamps[0]
|
||||
values = values[1:]
|
||||
timestamps = timestamps[1:]
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
vEnd := values[len(values)-1]
|
||||
tEnd := timestamps[len(timestamps)-1]
|
||||
dv := vEnd - prevValue
|
||||
dt := float64(tEnd-prevTimestamp) * 1e-3
|
||||
return dv / dt
|
||||
}
|
||||
|
||||
func rollupIderiv(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
timestamps := rfa.timestamps
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
vEnd := values[len(values)-1]
|
||||
tEnd := timestamps[len(timestamps)-1]
|
||||
values = values[:len(values)-1]
|
||||
timestamps = timestamps[:len(timestamps)-1]
|
||||
prevValue := rfa.prevValue
|
||||
prevTimestamp := rfa.prevTimestamp
|
||||
if len(values) == 0 {
|
||||
if math.IsNaN(prevValue) {
|
||||
return nan
|
||||
}
|
||||
} else {
|
||||
prevValue = values[len(values)-1]
|
||||
prevTimestamp = timestamps[len(timestamps)-1]
|
||||
}
|
||||
dv := vEnd - prevValue
|
||||
dt := tEnd - prevTimestamp
|
||||
return dv / (float64(dt) / 1000)
|
||||
}
|
||||
|
||||
func rollupChanges(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
n := 0
|
||||
prevValue := rfa.prevValue
|
||||
if math.IsNaN(prevValue) {
|
||||
prevValue = values[0]
|
||||
}
|
||||
for _, v := range values {
|
||||
if v != prevValue {
|
||||
n++
|
||||
prevValue = v
|
||||
}
|
||||
}
|
||||
return float64(n)
|
||||
}
|
||||
|
||||
func rollupResets(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
prevValue := rfa.prevValue
|
||||
if math.IsNaN(prevValue) {
|
||||
prevValue = values[0]
|
||||
values = values[1:]
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
n := 0
|
||||
for _, v := range values {
|
||||
if v < prevValue {
|
||||
n++
|
||||
}
|
||||
prevValue = v
|
||||
}
|
||||
return float64(n)
|
||||
}
|
||||
|
||||
func rollupFirst(rfa *rollupFuncArg) float64 {
|
||||
// See https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness
|
||||
v := rfa.prevValue
|
||||
if !math.IsNaN(v) {
|
||||
return v
|
||||
}
|
||||
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
return values[0]
|
||||
}
|
||||
|
||||
var rollupDefault = rollupFirst
|
||||
|
||||
func rollupLast(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
return values[len(values)-1]
|
||||
}
|
||||
|
||||
func rollupDistinct(rfa *rollupFuncArg) float64 {
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
m := make(map[float64]struct{})
|
||||
for _, v := range values {
|
||||
m[v] = struct{}{}
|
||||
}
|
||||
return float64(len(m))
|
||||
}
|
||||
|
||||
func rollupIntegrate(rfa *rollupFuncArg) float64 {
|
||||
prevTimestamp := rfa.prevTimestamp
|
||||
|
||||
// There is no need in handling NaNs here, since they must be cleanup up
|
||||
// before calling rollup funcs.
|
||||
values := rfa.values
|
||||
timestamps := rfa.timestamps
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
prevValue := rfa.prevValue
|
||||
if math.IsNaN(prevValue) {
|
||||
prevValue = values[0]
|
||||
prevTimestamp = timestamps[0]
|
||||
values = values[1:]
|
||||
timestamps = timestamps[1:]
|
||||
}
|
||||
if len(values) == 0 {
|
||||
return nan
|
||||
}
|
||||
|
||||
var sum float64
|
||||
for i, v := range values {
|
||||
timestamp := timestamps[i]
|
||||
dt := float64(timestamp-prevTimestamp) * 1e-3
|
||||
sum += 0.5 * (v + prevValue) * dt
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func rollupFake(rfa *rollupFuncArg) float64 {
|
||||
logger.Panicf("BUG: rollupFake shouldn't be called")
|
||||
return 0
|
||||
}
|
||||
|
||||
func getScalar(arg interface{}, argNum int) ([]float64, error) {
|
||||
ts, ok := arg.([]*timeseries)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(`unexpected type for arg #%d; got %T; want %T`, argNum+1, arg, ts)
|
||||
}
|
||||
if len(ts) != 1 {
|
||||
return nil, fmt.Errorf(`arg #%d must contain a single timeseries; got %d timeseries`, argNum+1, len(ts))
|
||||
}
|
||||
return ts[0].Values, nil
|
||||
}
|
||||
|
||||
func getString(tss []*timeseries, argNum int) (string, error) {
|
||||
if len(tss) != 1 {
|
||||
return "", fmt.Errorf(`arg #%d must contain a single timeseries; got %d timeseries`, argNum+1, len(tss))
|
||||
}
|
||||
ts := tss[0]
|
||||
for _, v := range ts.Values {
|
||||
if !math.IsNaN(v) {
|
||||
return "", fmt.Errorf(`arg #%d contains non-string timeseries`, argNum+1)
|
||||
}
|
||||
}
|
||||
return string(ts.MetricName.MetricGroup), nil
|
||||
}
|
||||
|
||||
func expectRollupArgsNum(args []interface{}, expectedNum int) error {
|
||||
if len(args) == expectedNum {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf(`unexpected number of args; got %d; want %d`, len(args), expectedNum)
|
||||
}
|
||||
|
||||
func getRollupFuncArg() *rollupFuncArg {
|
||||
v := rfaPool.Get()
|
||||
if v == nil {
|
||||
return &rollupFuncArg{}
|
||||
}
|
||||
return v.(*rollupFuncArg)
|
||||
}
|
||||
|
||||
func putRollupFuncArg(rfa *rollupFuncArg) {
|
||||
rfa.reset()
|
||||
rfaPool.Put(rfa)
|
||||
}
|
||||
|
||||
var rfaPool sync.Pool
|
496
app/vmselect/promql/rollup_result_cache.go
Normal file
496
app/vmselect/promql/rollup_result_cache.go
Normal file
|
@ -0,0 +1,496 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/fastcache"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var rollupResultCacheV = &rollupResultCache{
|
||||
fastcache.New(1024 * 1024), // This is a cache for testing.
|
||||
}
|
||||
var rollupResultCachePath string
|
||||
|
||||
func getRollupResultCacheSize() int {
|
||||
rollupResultCacheSizeOnce.Do(func() {
|
||||
n := memory.Allowed() / 16
|
||||
if n <= 0 {
|
||||
n = 1024 * 1024
|
||||
}
|
||||
rollupResultCacheSize = n
|
||||
})
|
||||
return rollupResultCacheSize
|
||||
}
|
||||
|
||||
var (
|
||||
rollupResultCacheSize int
|
||||
rollupResultCacheSizeOnce sync.Once
|
||||
)
|
||||
|
||||
// InitRollupResultCache initializes the rollupResult cache
|
||||
func InitRollupResultCache(cachePath string) {
|
||||
rollupResultCachePath = cachePath
|
||||
startTime := time.Now()
|
||||
var c *fastcache.Cache
|
||||
if len(rollupResultCachePath) > 0 {
|
||||
logger.Infof("loading rollupResult cache from %q...", rollupResultCachePath)
|
||||
c = fastcache.LoadFromFileOrNew(rollupResultCachePath, getRollupResultCacheSize())
|
||||
} else {
|
||||
c = fastcache.New(getRollupResultCacheSize())
|
||||
}
|
||||
stats := &fastcache.Stats{}
|
||||
var statsLock sync.Mutex
|
||||
var statsLastUpdate time.Time
|
||||
fcs := func() *fastcache.Stats {
|
||||
statsLock.Lock()
|
||||
defer statsLock.Unlock()
|
||||
|
||||
if time.Since(statsLastUpdate) < time.Second {
|
||||
return stats
|
||||
}
|
||||
var fcs fastcache.Stats
|
||||
c.UpdateStats(&fcs)
|
||||
stats = &fcs
|
||||
statsLastUpdate = time.Now()
|
||||
return stats
|
||||
}
|
||||
if len(rollupResultCachePath) > 0 {
|
||||
logger.Infof("loaded rollupResult cache from %q in %s; entriesCount: %d, bytesSize: %d",
|
||||
rollupResultCachePath, time.Since(startTime), fcs().EntriesCount, fcs().BytesSize)
|
||||
}
|
||||
|
||||
metrics.NewGauge(`vm_cache_entries{type="promql/rollupResult"}`, func() float64 {
|
||||
return float64(fcs().EntriesCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="promql/rollupResult"}`, func() float64 {
|
||||
return float64(fcs().BytesSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="promql/rollupResult"}`, func() float64 {
|
||||
return float64(fcs().GetCalls)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="promql/rollupResult"}`, func() float64 {
|
||||
return float64(fcs().Misses)
|
||||
})
|
||||
|
||||
rollupResultCacheV = &rollupResultCache{
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// StopRollupResultCache closes the rollupResult cache.
|
||||
func StopRollupResultCache() {
|
||||
if len(rollupResultCachePath) == 0 {
|
||||
rollupResultCacheV.c.Reset()
|
||||
return
|
||||
}
|
||||
gomaxprocs := runtime.GOMAXPROCS(-1)
|
||||
logger.Infof("saving rollupResult cache to %q...", rollupResultCachePath)
|
||||
startTime := time.Now()
|
||||
if err := rollupResultCacheV.c.SaveToFileConcurrent(rollupResultCachePath, gomaxprocs); err != nil {
|
||||
logger.Errorf("cannot close rollupResult cache at %q: %s", rollupResultCachePath, err)
|
||||
} else {
|
||||
var fcs fastcache.Stats
|
||||
rollupResultCacheV.c.UpdateStats(&fcs)
|
||||
rollupResultCacheV.c.Reset()
|
||||
logger.Infof("saved rollupResult cache to %q in %s; entriesCount: %d, bytesSize: %d",
|
||||
rollupResultCachePath, time.Since(startTime), fcs.EntriesCount, fcs.BytesSize)
|
||||
}
|
||||
}
|
||||
|
||||
type rollupResultCache struct {
|
||||
c *fastcache.Cache
|
||||
}
|
||||
|
||||
var rollupResultCacheResets = metrics.NewCounter(`vm_cache_resets_total{type="promql/rollupResult"}`)
|
||||
|
||||
// ResetRollupResultCache resets rollup result cache.
|
||||
func ResetRollupResultCache() {
|
||||
rollupResultCacheResets.Inc()
|
||||
rollupResultCacheV.c.Reset()
|
||||
}
|
||||
|
||||
func (rrc *rollupResultCache) Get(funcName string, ec *EvalConfig, me *metricExpr, window int64) (tss []*timeseries, newStart int64) {
|
||||
if !ec.mayCache() {
|
||||
return nil, ec.Start
|
||||
}
|
||||
|
||||
// Obtain tss from the cache.
|
||||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], funcName, me, window, ec.Step)
|
||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||
if len(metainfoBuf) == 0 {
|
||||
return nil, ec.Start
|
||||
}
|
||||
var mi rollupResultCacheMetainfo
|
||||
if err := mi.Unmarshal(metainfoBuf); err != nil {
|
||||
logger.Panicf("BUG: cannot unmarshal rollupResultCacheMetainfo: %s; it looks like it was improperly saved", err)
|
||||
}
|
||||
key := mi.GetBestKey(ec.Start, ec.End)
|
||||
if key.prefix == 0 && key.suffix == 0 {
|
||||
return nil, ec.Start
|
||||
}
|
||||
bb.B = key.Marshal(bb.B[:0])
|
||||
resultBuf := rrc.c.GetBig(nil, bb.B)
|
||||
if len(resultBuf) == 0 {
|
||||
mi.RemoveKey(key)
|
||||
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], funcName, me, window, ec.Step)
|
||||
rrc.c.Set(bb.B, metainfoBuf)
|
||||
return nil, ec.Start
|
||||
}
|
||||
tss, err := unmarshalTimeseriesFast(resultBuf)
|
||||
if err != nil {
|
||||
logger.Panicf("BUG: cannot unmarshal timeseries from rollupResultCache: %s; it looks like it was improperly saved", err)
|
||||
}
|
||||
|
||||
// Extract values for the matching timestamps
|
||||
timestamps := tss[0].Timestamps
|
||||
i := 0
|
||||
for i < len(timestamps) && timestamps[i] < ec.Start {
|
||||
i++
|
||||
}
|
||||
if i == len(timestamps) {
|
||||
// no matches.
|
||||
return nil, ec.Start
|
||||
}
|
||||
if timestamps[i] != ec.Start {
|
||||
// The cached range doesn't cover the requested range.
|
||||
return nil, ec.Start
|
||||
}
|
||||
|
||||
j := len(timestamps) - 1
|
||||
for j >= 0 && timestamps[j] > ec.End {
|
||||
j--
|
||||
}
|
||||
j++
|
||||
if j <= i {
|
||||
// no matches.
|
||||
return nil, ec.Start
|
||||
}
|
||||
|
||||
for _, ts := range tss {
|
||||
ts.Timestamps = ts.Timestamps[i:j]
|
||||
ts.Values = ts.Values[i:j]
|
||||
}
|
||||
|
||||
timestamps = tss[0].Timestamps
|
||||
newStart = timestamps[len(timestamps)-1] + ec.Step
|
||||
return tss, newStart
|
||||
}
|
||||
|
||||
func (rrc *rollupResultCache) Put(funcName string, ec *EvalConfig, me *metricExpr, window int64, tss []*timeseries) {
|
||||
if len(tss) == 0 || !ec.mayCache() {
|
||||
return
|
||||
}
|
||||
|
||||
// Remove values up to currentTime - step - maxSilenceInterval,
|
||||
// since these values may be added later.
|
||||
timestamps := tss[0].Timestamps
|
||||
deadline := (time.Now().UnixNano() / 1e6) - ec.Step - maxSilenceInterval
|
||||
i := len(timestamps) - 1
|
||||
for i >= 0 && timestamps[i] > deadline {
|
||||
i--
|
||||
}
|
||||
i++
|
||||
if i == 0 {
|
||||
// Nothing to store in the cache.
|
||||
return
|
||||
}
|
||||
if i < len(timestamps) {
|
||||
timestamps = timestamps[:i]
|
||||
// Make a copy of tss and remove unfit values
|
||||
rvs := copyTimeseriesShallow(tss)
|
||||
for _, ts := range rvs {
|
||||
ts.Timestamps = ts.Timestamps[:i]
|
||||
ts.Values = ts.Values[:i]
|
||||
}
|
||||
tss = rvs
|
||||
}
|
||||
|
||||
// Store tss in the cache.
|
||||
maxMarshaledSize := getRollupResultCacheSize() / 4
|
||||
tssMarshaled := marshalTimeseriesFast(tss, maxMarshaledSize, ec.Step)
|
||||
if tssMarshaled == nil {
|
||||
tooBigRollupResults.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
var key rollupResultCacheKey
|
||||
key.prefix = rollupResultCacheKeyPrefix
|
||||
key.suffix = atomic.AddUint64(&rollupResultCacheKeySuffix, 1)
|
||||
bb.B = key.Marshal(bb.B[:0])
|
||||
rrc.c.SetBig(bb.B, tssMarshaled)
|
||||
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], funcName, me, window, ec.Step)
|
||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||
var mi rollupResultCacheMetainfo
|
||||
if len(metainfoBuf) > 0 {
|
||||
if err := mi.Unmarshal(metainfoBuf); err != nil {
|
||||
logger.Panicf("BUG: cannot unmarshal rollupResultCacheMetainfo: %s; it looks like it was improperly saved", err)
|
||||
}
|
||||
}
|
||||
mi.AddKey(key, timestamps[0], timestamps[len(timestamps)-1])
|
||||
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
||||
rrc.c.Set(bb.B, metainfoBuf)
|
||||
}
|
||||
|
||||
var (
|
||||
rollupResultCacheKeyPrefix = func() uint64 {
|
||||
var buf [8]byte
|
||||
if _, err := rand.Read(buf[:]); err != nil {
|
||||
// do not use logger.Panicf, since it isn't initialized yet.
|
||||
panic(fmt.Errorf("FATAL: cannot read random data for rollupResultCacheKeyPrefix: %s", err))
|
||||
}
|
||||
return encoding.UnmarshalUint64(buf[:])
|
||||
}()
|
||||
rollupResultCacheKeySuffix = uint64(time.Now().UnixNano())
|
||||
)
|
||||
|
||||
var tooBigRollupResults = metrics.NewCounter("vm_too_big_rollup_results_total")
|
||||
|
||||
// Increment this value every time the format of the cache changes.
|
||||
const rollupResultCacheVersion = 4
|
||||
|
||||
func marshalRollupResultCacheKey(dst []byte, funcName string, me *metricExpr, window, step int64) []byte {
|
||||
dst = append(dst, rollupResultCacheVersion)
|
||||
dst = encoding.MarshalUint64(dst, uint64(len(funcName)))
|
||||
dst = append(dst, funcName...)
|
||||
dst = encoding.MarshalInt64(dst, window)
|
||||
dst = encoding.MarshalInt64(dst, step)
|
||||
for i := range me.TagFilters {
|
||||
dst = me.TagFilters[i].Marshal(dst)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// mergeTimeseries concatenates b with a and returns the result.
|
||||
//
|
||||
// Preconditions:
|
||||
// - a mustn't intersect with b.
|
||||
// - a timestamps must be smaller than b timestamps.
|
||||
//
|
||||
// Postconditions:
|
||||
// - a and b cannot be used after returning from the call.
|
||||
func mergeTimeseries(a, b []*timeseries, bStart int64, ec *EvalConfig) []*timeseries {
|
||||
sharedTimestamps := ec.getSharedTimestamps()
|
||||
if bStart == ec.Start {
|
||||
// Nothing to merge - b covers all the time range.
|
||||
// Verify b is correct.
|
||||
for _, tsB := range b {
|
||||
tsB.denyReuse = true
|
||||
tsB.Timestamps = sharedTimestamps
|
||||
if len(tsB.Values) != len(tsB.Timestamps) {
|
||||
logger.Panicf("BUG: unexpected number of values in b; got %d; want %d", len(tsB.Values), len(tsB.Timestamps))
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
m := make(map[string]*timeseries, len(a))
|
||||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
for _, ts := range a {
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &ts.MetricName)
|
||||
m[string(bb.B)] = ts
|
||||
}
|
||||
|
||||
rvs := make([]*timeseries, 0, len(a))
|
||||
for _, tsB := range b {
|
||||
var tmp timeseries
|
||||
tmp.denyReuse = true
|
||||
tmp.Timestamps = sharedTimestamps
|
||||
tmp.Values = make([]float64, 0, len(tmp.Timestamps))
|
||||
// Do not use MetricName.CopyFrom for performance reasons.
|
||||
// It is safe to make shallow copy, since tsB must no longer used.
|
||||
tmp.MetricName = tsB.MetricName
|
||||
|
||||
bb.B = marshalMetricNameSorted(bb.B[:0], &tsB.MetricName)
|
||||
tsA := m[string(bb.B)]
|
||||
if tsA == nil {
|
||||
tStart := ec.Start
|
||||
for tStart < bStart {
|
||||
tmp.Values = append(tmp.Values, nan)
|
||||
tStart += ec.Step
|
||||
}
|
||||
} else {
|
||||
tmp.Values = append(tmp.Values, tsA.Values...)
|
||||
delete(m, string(bb.B))
|
||||
}
|
||||
tmp.Values = append(tmp.Values, tsB.Values...)
|
||||
if len(tmp.Values) != len(tmp.Timestamps) {
|
||||
logger.Panicf("BUG: unexpected values after merging new values; got %d; want %d", len(tmp.Values), len(tmp.Timestamps))
|
||||
}
|
||||
rvs = append(rvs, &tmp)
|
||||
}
|
||||
|
||||
// Copy the remaining timeseries from m.
|
||||
for _, tsA := range m {
|
||||
var tmp timeseries
|
||||
tmp.denyReuse = true
|
||||
tmp.Timestamps = sharedTimestamps
|
||||
// Do not use MetricName.CopyFrom for performance reasons.
|
||||
// It is safe to make shallow copy, since tsA must no longer used.
|
||||
tmp.MetricName = tsA.MetricName
|
||||
tmp.Values = append(tmp.Values, tsA.Values...)
|
||||
|
||||
tStart := bStart
|
||||
for tStart <= ec.End {
|
||||
tmp.Values = append(tmp.Values, nan)
|
||||
tStart += ec.Step
|
||||
}
|
||||
if len(tmp.Values) != len(tmp.Timestamps) {
|
||||
logger.Panicf("BUG: unexpected values in the result after adding cached values; got %d; want %d", len(tmp.Values), len(tmp.Timestamps))
|
||||
}
|
||||
rvs = append(rvs, &tmp)
|
||||
}
|
||||
return rvs
|
||||
}
|
||||
|
||||
type rollupResultCacheMetainfo struct {
|
||||
entries []rollupResultCacheMetainfoEntry
|
||||
}
|
||||
|
||||
func (mi *rollupResultCacheMetainfo) Marshal(dst []byte) []byte {
|
||||
dst = encoding.MarshalUint32(dst, uint32(len(mi.entries)))
|
||||
for i := range mi.entries {
|
||||
dst = mi.entries[i].Marshal(dst)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func (mi *rollupResultCacheMetainfo) Unmarshal(src []byte) error {
|
||||
if len(src) < 4 {
|
||||
return fmt.Errorf("cannot unmarshal len(etries) from %d bytes; need at least %d bytes", len(src), 4)
|
||||
}
|
||||
entriesLen := int(encoding.UnmarshalUint32(src))
|
||||
src = src[4:]
|
||||
if n := entriesLen - cap(mi.entries); n > 0 {
|
||||
mi.entries = append(mi.entries[:cap(mi.entries)], make([]rollupResultCacheMetainfoEntry, n)...)
|
||||
}
|
||||
mi.entries = mi.entries[:entriesLen]
|
||||
for i := 0; i < entriesLen; i++ {
|
||||
tail, err := mi.entries[i].Unmarshal(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal entry #%d: %s", i, err)
|
||||
}
|
||||
src = tail
|
||||
}
|
||||
if len(src) > 0 {
|
||||
return fmt.Errorf("unexpected non-empty tail left; len(tail)=%d", len(src))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mi *rollupResultCacheMetainfo) GetBestKey(start, end int64) rollupResultCacheKey {
|
||||
if start > end {
|
||||
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", start, end)
|
||||
}
|
||||
var bestKey rollupResultCacheKey
|
||||
bestD := int64(1<<63 - 1)
|
||||
for i := range mi.entries {
|
||||
e := &mi.entries[i]
|
||||
if start < e.start || end <= e.start {
|
||||
continue
|
||||
}
|
||||
d := start - e.start
|
||||
if d < bestD {
|
||||
bestD = d
|
||||
bestKey = e.key
|
||||
}
|
||||
}
|
||||
return bestKey
|
||||
}
|
||||
|
||||
func (mi *rollupResultCacheMetainfo) AddKey(key rollupResultCacheKey, start, end int64) {
|
||||
if start > end {
|
||||
logger.Panicf("BUG: start cannot exceed end; got %d vs %d", start, end)
|
||||
}
|
||||
mi.entries = append(mi.entries, rollupResultCacheMetainfoEntry{
|
||||
start: start,
|
||||
end: end,
|
||||
key: key,
|
||||
})
|
||||
if len(mi.entries) > 30 {
|
||||
// Remove old entries.
|
||||
mi.entries = append(mi.entries[:0], mi.entries[10:]...)
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *rollupResultCacheMetainfo) RemoveKey(key rollupResultCacheKey) {
|
||||
for i := range mi.entries {
|
||||
if mi.entries[i].key == key {
|
||||
mi.entries = append(mi.entries[:i], mi.entries[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type rollupResultCacheMetainfoEntry struct {
|
||||
start int64
|
||||
end int64
|
||||
key rollupResultCacheKey
|
||||
}
|
||||
|
||||
func (mie *rollupResultCacheMetainfoEntry) Marshal(dst []byte) []byte {
|
||||
dst = encoding.MarshalInt64(dst, mie.start)
|
||||
dst = encoding.MarshalInt64(dst, mie.end)
|
||||
dst = encoding.MarshalUint64(dst, mie.key.prefix)
|
||||
dst = encoding.MarshalUint64(dst, mie.key.suffix)
|
||||
return dst
|
||||
}
|
||||
|
||||
func (mie *rollupResultCacheMetainfoEntry) Unmarshal(src []byte) ([]byte, error) {
|
||||
if len(src) < 8 {
|
||||
return src, fmt.Errorf("cannot unmarshal start from %d bytes; need at least %d bytes", len(src), 8)
|
||||
}
|
||||
mie.start = encoding.UnmarshalInt64(src)
|
||||
src = src[8:]
|
||||
|
||||
if len(src) < 8 {
|
||||
return src, fmt.Errorf("cannot unmarshal end from %d bytes; need at least %d bytes", len(src), 8)
|
||||
}
|
||||
mie.end = encoding.UnmarshalInt64(src)
|
||||
src = src[8:]
|
||||
|
||||
if len(src) < 8 {
|
||||
return src, fmt.Errorf("cannot unmarshal key prefix from %d bytes; need at least %d bytes", len(src), 8)
|
||||
}
|
||||
mie.key.prefix = encoding.UnmarshalUint64(src)
|
||||
src = src[8:]
|
||||
|
||||
if len(src) < 8 {
|
||||
return src, fmt.Errorf("cannot unmarshal key suffix from %d bytes; need at least %d bytes", len(src), 8)
|
||||
}
|
||||
mie.key.suffix = encoding.UnmarshalUint64(src)
|
||||
src = src[8:]
|
||||
|
||||
return src, nil
|
||||
}
|
||||
|
||||
// rollupResultCacheKey must be globally unique across vmselect nodes,
|
||||
// so it has prefix and suffix.
|
||||
type rollupResultCacheKey struct {
|
||||
prefix uint64
|
||||
suffix uint64
|
||||
}
|
||||
|
||||
func (k *rollupResultCacheKey) Marshal(dst []byte) []byte {
|
||||
dst = append(dst, rollupResultCacheVersion)
|
||||
dst = encoding.MarshalUint64(dst, k.prefix)
|
||||
dst = encoding.MarshalUint64(dst, k.suffix)
|
||||
return dst
|
||||
}
|
368
app/vmselect/promql/rollup_result_cache_test.go
Normal file
368
app/vmselect/promql/rollup_result_cache_test.go
Normal file
|
@ -0,0 +1,368 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestRollupResultCache(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
funcName := "foo"
|
||||
window := int64(456)
|
||||
ec := &EvalConfig{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Step: 200,
|
||||
|
||||
MayCache: true,
|
||||
}
|
||||
me := &metricExpr{
|
||||
TagFilters: []storage.TagFilter{{
|
||||
Key: []byte("aaa"),
|
||||
Value: []byte("xxx"),
|
||||
}},
|
||||
}
|
||||
|
||||
// Try obtaining an empty value.
|
||||
t.Run("empty", func(t *testing.T) {
|
||||
tss, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != ec.Start {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, ec.Start)
|
||||
}
|
||||
if len(tss) != 0 {
|
||||
t.Fatalf("got %d timeseries, while expecting zero", len(tss))
|
||||
}
|
||||
})
|
||||
|
||||
// Store timeseries overlapping with start
|
||||
t.Run("start-overlap", func(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
tss := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{800, 1000, 1200},
|
||||
Values: []float64{0, 1, 2},
|
||||
},
|
||||
}
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss)
|
||||
tss, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != 1400 {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
|
||||
}
|
||||
tssExpected := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200},
|
||||
Values: []float64{1, 2},
|
||||
},
|
||||
}
|
||||
testTimeseriesEqual(t, tss, tssExpected)
|
||||
})
|
||||
|
||||
// Store timeseries overlapping with end
|
||||
t.Run("end-overlap", func(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
tss := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1800, 2000, 2200, 2400},
|
||||
Values: []float64{333, 0, 1, 2},
|
||||
},
|
||||
}
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss)
|
||||
tss, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != 1000 {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
||||
}
|
||||
if len(tss) != 0 {
|
||||
t.Fatalf("got %d timeseries, while expecting zero", len(tss))
|
||||
}
|
||||
})
|
||||
|
||||
// Store timeseries covered by [start ... end]
|
||||
t.Run("full-cover", func(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
tss := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1200, 1400, 1600},
|
||||
Values: []float64{0, 1, 2},
|
||||
},
|
||||
}
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss)
|
||||
tss, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != 1000 {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
||||
}
|
||||
if len(tss) != 0 {
|
||||
t.Fatalf("got %d timeseries, while expecting zero", len(tss))
|
||||
}
|
||||
})
|
||||
|
||||
// Store timeseries below start
|
||||
t.Run("before-start", func(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
tss := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{200, 400, 600},
|
||||
Values: []float64{0, 1, 2},
|
||||
},
|
||||
}
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss)
|
||||
tss, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != 1000 {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
||||
}
|
||||
if len(tss) != 0 {
|
||||
t.Fatalf("got %d timeseries, while expecting zero", len(tss))
|
||||
}
|
||||
})
|
||||
|
||||
// Store timeseries after end
|
||||
t.Run("after-end", func(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
tss := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{2200, 2400, 2600},
|
||||
Values: []float64{0, 1, 2},
|
||||
},
|
||||
}
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss)
|
||||
tss, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != 1000 {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1000)
|
||||
}
|
||||
if len(tss) != 0 {
|
||||
t.Fatalf("got %d timeseries, while expecting zero", len(tss))
|
||||
}
|
||||
})
|
||||
|
||||
// Store timeseries bigger than the interval [start ... end]
|
||||
t.Run("bigger-than-start-end", func(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
tss := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{800, 1000, 1200, 1400, 1600, 1800, 2000, 2200},
|
||||
Values: []float64{0, 1, 2, 3, 4, 5, 6, 7},
|
||||
},
|
||||
}
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss)
|
||||
tss, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != 2200 {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
|
||||
}
|
||||
tssExpected := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{1, 2, 3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
testTimeseriesEqual(t, tss, tssExpected)
|
||||
})
|
||||
|
||||
// Store timeseries matching the interval [start ... end]
|
||||
t.Run("start-end-match", func(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
tss := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{1, 2, 3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss)
|
||||
tss, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != 2200 {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
|
||||
}
|
||||
tssExpected := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{1, 2, 3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
testTimeseriesEqual(t, tss, tssExpected)
|
||||
})
|
||||
|
||||
// Store big timeseries, so their marshaled size exceeds 64Kb.
|
||||
t.Run("big-timeseries", func(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
var tss []*timeseries
|
||||
for i := 0; i < 1000; i++ {
|
||||
ts := ×eries{
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{1, 2, 3, 4, 5, 6},
|
||||
}
|
||||
tss = append(tss, ts)
|
||||
}
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss)
|
||||
tssResult, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != 2200 {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 2200)
|
||||
}
|
||||
testTimeseriesEqual(t, tssResult, tss)
|
||||
})
|
||||
|
||||
// Store multiple time series
|
||||
t.Run("multi-timeseries", func(t *testing.T) {
|
||||
ResetRollupResultCache()
|
||||
tss1 := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{800, 1000, 1200},
|
||||
Values: []float64{0, 1, 2},
|
||||
},
|
||||
}
|
||||
tss2 := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1800, 2000, 2200, 2400},
|
||||
Values: []float64{333, 0, 1, 2},
|
||||
},
|
||||
}
|
||||
tss3 := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1200, 1400, 1600},
|
||||
Values: []float64{0, 1, 2},
|
||||
},
|
||||
}
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss1)
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss2)
|
||||
rollupResultCacheV.Put(funcName, ec, me, window, tss3)
|
||||
tss, newStart := rollupResultCacheV.Get(funcName, ec, me, window)
|
||||
if newStart != 1400 {
|
||||
t.Fatalf("unexpected newStart; got %d; want %d", newStart, 1400)
|
||||
}
|
||||
tssExpected := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200},
|
||||
Values: []float64{1, 2},
|
||||
},
|
||||
}
|
||||
testTimeseriesEqual(t, tss, tssExpected)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestMergeTimeseries(t *testing.T) {
|
||||
ec := &EvalConfig{
|
||||
Start: 1000,
|
||||
End: 2000,
|
||||
Step: 200,
|
||||
}
|
||||
bStart := int64(1400)
|
||||
|
||||
t.Run("bStart=ec.Start", func(t *testing.T) {
|
||||
a := []*timeseries{}
|
||||
b := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{1, 2, 3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
tss := mergeTimeseries(a, b, 1000, ec)
|
||||
tssExpected := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{1, 2, 3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
testTimeseriesEqual(t, tss, tssExpected)
|
||||
})
|
||||
t.Run("a-empty", func(t *testing.T) {
|
||||
a := []*timeseries{}
|
||||
b := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1400, 1600, 1800, 2000},
|
||||
Values: []float64{3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
tss := mergeTimeseries(a, b, bStart, ec)
|
||||
tssExpected := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{nan, nan, 3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
testTimeseriesEqual(t, tss, tssExpected)
|
||||
})
|
||||
t.Run("b-empty", func(t *testing.T) {
|
||||
a := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200},
|
||||
Values: []float64{2, 1},
|
||||
},
|
||||
}
|
||||
b := []*timeseries{}
|
||||
tss := mergeTimeseries(a, b, bStart, ec)
|
||||
tssExpected := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{2, 1, nan, nan, nan, nan},
|
||||
},
|
||||
}
|
||||
testTimeseriesEqual(t, tss, tssExpected)
|
||||
})
|
||||
t.Run("non-empty", func(t *testing.T) {
|
||||
a := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200},
|
||||
Values: []float64{2, 1},
|
||||
},
|
||||
}
|
||||
b := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1400, 1600, 1800, 2000},
|
||||
Values: []float64{3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
tss := mergeTimeseries(a, b, bStart, ec)
|
||||
tssExpected := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{2, 1, 3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
testTimeseriesEqual(t, tss, tssExpected)
|
||||
})
|
||||
t.Run("non-empty-distinct-metric-names", func(t *testing.T) {
|
||||
a := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1000, 1200},
|
||||
Values: []float64{2, 1},
|
||||
},
|
||||
}
|
||||
a[0].MetricName.MetricGroup = []byte("bar")
|
||||
b := []*timeseries{
|
||||
×eries{
|
||||
Timestamps: []int64{1400, 1600, 1800, 2000},
|
||||
Values: []float64{3, 4, 5, 6},
|
||||
},
|
||||
}
|
||||
b[0].MetricName.MetricGroup = []byte("foo")
|
||||
tss := mergeTimeseries(a, b, bStart, ec)
|
||||
tssExpected := []*timeseries{
|
||||
×eries{
|
||||
MetricName: storage.MetricName{
|
||||
MetricGroup: []byte("foo"),
|
||||
},
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{nan, nan, 3, 4, 5, 6},
|
||||
},
|
||||
×eries{
|
||||
MetricName: storage.MetricName{
|
||||
MetricGroup: []byte("bar"),
|
||||
},
|
||||
Timestamps: []int64{1000, 1200, 1400, 1600, 1800, 2000},
|
||||
Values: []float64{2, 1, nan, nan, nan, nan},
|
||||
},
|
||||
}
|
||||
testTimeseriesEqual(t, tss, tssExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func testTimeseriesEqual(t *testing.T, tss, tssExpected []*timeseries) {
|
||||
t.Helper()
|
||||
if len(tss) != len(tssExpected) {
|
||||
t.Fatalf(`unexpected timeseries count; got %d; want %d`, len(tss), len(tssExpected))
|
||||
}
|
||||
for i, ts := range tss {
|
||||
tsExpected := tssExpected[i]
|
||||
testMetricNamesEqual(t, &ts.MetricName, &tsExpected.MetricName)
|
||||
testRowsEqual(t, ts.Values, ts.Timestamps, tsExpected.Values, tsExpected.Timestamps)
|
||||
}
|
||||
}
|
649
app/vmselect/promql/rollup_test.go
Normal file
649
app/vmselect/promql/rollup_test.go
Normal file
|
@ -0,0 +1,649 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
testValues = []float64{123, 34, 44, 21, 54, 34, 99, 12, 44, 32, 34, 34}
|
||||
testTimestamps = []int64{5, 15, 24, 36, 49, 60, 78, 80, 97, 115, 120, 130}
|
||||
)
|
||||
|
||||
func TestRemoveCounterResets(t *testing.T) {
|
||||
removeCounterResets(nil)
|
||||
|
||||
values := append([]float64{}, testValues...)
|
||||
removeCounterResets(values)
|
||||
valuesExpected := []float64{123, 157, 167, 188, 221, 255, 320, 332, 364, 396, 398, 398}
|
||||
testRowsEqual(t, values, testTimestamps, valuesExpected, testTimestamps)
|
||||
|
||||
// removeCounterResets doesn't expect negative values, so it doesn't work properly with them.
|
||||
values = []float64{-100, -200, -300, -400}
|
||||
removeCounterResets(values)
|
||||
valuesExpected = []float64{-100, -300, -600, -1000}
|
||||
timestampsExpected := []int64{0, 1, 2, 3}
|
||||
testRowsEqual(t, values, timestampsExpected, valuesExpected, timestampsExpected)
|
||||
|
||||
// verify how jitter from `Prometheus HA pairs` is handled
|
||||
values = []float64{100, 95, 120, 140, 137, 50}
|
||||
removeCounterResets(values)
|
||||
valuesExpected = []float64{100, 100, 120, 140, 140, 190}
|
||||
timestampsExpected = []int64{0, 1, 2, 3, 4, 5}
|
||||
testRowsEqual(t, values, timestampsExpected, valuesExpected, timestampsExpected)
|
||||
}
|
||||
|
||||
func TestDeltaValues(t *testing.T) {
|
||||
deltaValues(nil)
|
||||
|
||||
values := []float64{123}
|
||||
deltaValues(values)
|
||||
valuesExpected := []float64{nan}
|
||||
testRowsEqual(t, values, testTimestamps[:1], valuesExpected, testTimestamps[:1])
|
||||
|
||||
values = append([]float64{}, testValues...)
|
||||
deltaValues(values)
|
||||
valuesExpected = []float64{-89, 10, -23, 33, -20, 65, -87, 32, -12, 2, 0, nan}
|
||||
testRowsEqual(t, values, testTimestamps, valuesExpected, testTimestamps)
|
||||
|
||||
// remove counter resets
|
||||
values = append([]float64{}, testValues...)
|
||||
removeCounterResets(values)
|
||||
deltaValues(values)
|
||||
valuesExpected = []float64{34, 10, 21, 33, 34, 65, 12, 32, 32, 2, 0, nan}
|
||||
testRowsEqual(t, values, testTimestamps, valuesExpected, testTimestamps)
|
||||
}
|
||||
|
||||
func TestDerivValues(t *testing.T) {
|
||||
derivValues(nil, nil)
|
||||
|
||||
values := []float64{123}
|
||||
derivValues(values, testTimestamps[:1])
|
||||
valuesExpected := []float64{nan}
|
||||
testRowsEqual(t, values, testTimestamps[:1], valuesExpected, testTimestamps[:1])
|
||||
|
||||
values = append([]float64{}, testValues...)
|
||||
derivValues(values, testTimestamps)
|
||||
valuesExpected = []float64{-8900, 1111.111111111111, -1916.6666666666665, 2538.461538461538, -1818.1818181818182, 3611.111111111111,
|
||||
-43500, 1882.3529411764705, -666.6666666666666, 400, 0, nan}
|
||||
testRowsEqual(t, values, testTimestamps, valuesExpected, testTimestamps)
|
||||
|
||||
// remove counter resets
|
||||
values = append([]float64{}, testValues...)
|
||||
removeCounterResets(values)
|
||||
derivValues(values, testTimestamps)
|
||||
valuesExpected = []float64{3400, 1111.111111111111, 1750, 2538.461538461538, 3090.909090909091, 3611.111111111111,
|
||||
6000, 1882.3529411764705, 1777.7777777777776, 400, 0, nan}
|
||||
testRowsEqual(t, values, testTimestamps, valuesExpected, testTimestamps)
|
||||
}
|
||||
|
||||
func testRollupFunc(t *testing.T, funcName string, args []interface{}, meExpected *metricExpr, vExpected float64) {
|
||||
t.Helper()
|
||||
nrf := getRollupFunc(funcName)
|
||||
if nrf == nil {
|
||||
t.Fatalf("cannot obtain %q", funcName)
|
||||
}
|
||||
rf, err := nrf(args)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
var rfa rollupFuncArg
|
||||
rfa.prevValue = nan
|
||||
rfa.prevTimestamp = 0
|
||||
rfa.values = append(rfa.values, testValues...)
|
||||
rfa.timestamps = append(rfa.timestamps, testTimestamps...)
|
||||
if rollupFuncsRemoveCounterResets[funcName] {
|
||||
removeCounterResets(rfa.values)
|
||||
}
|
||||
for i := 0; i < 5; i++ {
|
||||
v := rf(&rfa)
|
||||
if math.IsNaN(vExpected) {
|
||||
if !math.IsNaN(v) {
|
||||
t.Fatalf("unexpected value; got %v; want %v", v, vExpected)
|
||||
}
|
||||
} else {
|
||||
if v != vExpected {
|
||||
t.Fatalf("unexpected value; got %v; want %v", v, vExpected)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRollupQuantileOverTime(t *testing.T) {
|
||||
f := func(phi, vExpected float64) {
|
||||
t.Helper()
|
||||
phis := []*timeseries{{
|
||||
Values: []float64{phi},
|
||||
Timestamps: []int64{123},
|
||||
}}
|
||||
var me metricExpr
|
||||
args := []interface{}{phis, &rollupExpr{Expr: &me}}
|
||||
testRollupFunc(t, "quantile_over_time", args, &me, vExpected)
|
||||
}
|
||||
|
||||
f(-123, 12)
|
||||
f(-0.5, 12)
|
||||
f(0, 12)
|
||||
f(0.1, 21)
|
||||
f(0.5, 34)
|
||||
f(0.9, 99)
|
||||
f(1, 123)
|
||||
f(234, 123)
|
||||
}
|
||||
|
||||
func TestRollupPredictLinear(t *testing.T) {
|
||||
f := func(sec, vExpected float64) {
|
||||
t.Helper()
|
||||
secs := []*timeseries{{
|
||||
Values: []float64{sec},
|
||||
Timestamps: []int64{123},
|
||||
}}
|
||||
var me metricExpr
|
||||
args := []interface{}{&rollupExpr{Expr: &me}, secs}
|
||||
testRollupFunc(t, "predict_linear", args, &me, vExpected)
|
||||
}
|
||||
|
||||
f(0e-3, 63.739757761102624)
|
||||
f(50e-3, 50.39682764539959)
|
||||
f(100e-3, 37.053897529696556)
|
||||
f(200e-3, 10.368037298290488)
|
||||
}
|
||||
|
||||
func TestRollupHoltWinters(t *testing.T) {
|
||||
f := func(sf, tf, vExpected float64) {
|
||||
t.Helper()
|
||||
sfs := []*timeseries{{
|
||||
Values: []float64{sf},
|
||||
Timestamps: []int64{123},
|
||||
}}
|
||||
tfs := []*timeseries{{
|
||||
Values: []float64{tf},
|
||||
Timestamps: []int64{123},
|
||||
}}
|
||||
var me metricExpr
|
||||
args := []interface{}{&rollupExpr{Expr: &me}, sfs, tfs}
|
||||
testRollupFunc(t, "holt_winters", args, &me, vExpected)
|
||||
}
|
||||
|
||||
f(-1, 0.5, nan)
|
||||
f(0, 0.5, nan)
|
||||
f(1, 0.5, nan)
|
||||
f(2, 0.5, nan)
|
||||
f(0.5, -1, nan)
|
||||
f(0.5, 0, nan)
|
||||
f(0.5, 1, nan)
|
||||
f(0.5, 2, nan)
|
||||
f(0.5, 0.5, 34.97794532775879)
|
||||
f(0.1, 0.5, -131.30529492371622)
|
||||
f(0.1, 0.1, -397.3307790780296)
|
||||
f(0.5, 0.1, -5.791530520284198)
|
||||
f(0.5, 0.9, 25.498906408926757)
|
||||
f(0.9, 0.9, 33.99637566941818)
|
||||
}
|
||||
|
||||
func TestRollupNewRollupFuncSuccess(t *testing.T) {
|
||||
f := func(funcName string, vExpected float64) {
|
||||
t.Helper()
|
||||
var me metricExpr
|
||||
args := []interface{}{&rollupExpr{Expr: &me}}
|
||||
testRollupFunc(t, funcName, args, &me, vExpected)
|
||||
}
|
||||
|
||||
f("default_rollup", 123)
|
||||
f("changes", 10)
|
||||
f("delta", -89)
|
||||
f("deriv", -712)
|
||||
f("idelta", 0)
|
||||
f("increase", 275)
|
||||
f("irate", 0)
|
||||
f("rate", 2200)
|
||||
f("resets", 5)
|
||||
f("avg_over_time", 47.083333333333336)
|
||||
f("min_over_time", 12)
|
||||
f("max_over_time", 123)
|
||||
f("sum_over_time", 565)
|
||||
f("count_over_time", 12)
|
||||
f("stddev_over_time", 30.752935722554287)
|
||||
f("stdvar_over_time", 945.7430555555555)
|
||||
f("first_over_time", 123)
|
||||
f("last_over_time", 34)
|
||||
f("integrate", 61.0275)
|
||||
}
|
||||
|
||||
func TestRollupNewRollupFuncError(t *testing.T) {
|
||||
if nrf := getRollupFunc("non-existing-func"); nrf != nil {
|
||||
t.Fatalf("expecting nil func; got %p", nrf)
|
||||
}
|
||||
|
||||
f := func(funcName string, args []interface{}) {
|
||||
t.Helper()
|
||||
|
||||
nrf := getRollupFunc(funcName)
|
||||
rf, err := nrf(args)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error")
|
||||
}
|
||||
if rf != nil {
|
||||
t.Fatalf("expecting nil rf; got %p", rf)
|
||||
}
|
||||
}
|
||||
|
||||
// Invalid number of args
|
||||
f("default_rollup", nil)
|
||||
f("holt_winters", nil)
|
||||
f("predict_linear", nil)
|
||||
f("quantile_over_time", nil)
|
||||
|
||||
// Invalid arg type
|
||||
scalarTs := []*timeseries{{
|
||||
Values: []float64{321},
|
||||
Timestamps: []int64{123},
|
||||
}}
|
||||
me := &metricExpr{}
|
||||
f("holt_winters", []interface{}{123, 123, 321})
|
||||
f("holt_winters", []interface{}{me, 123, 321})
|
||||
f("holt_winters", []interface{}{me, scalarTs, 321})
|
||||
f("predict_linear", []interface{}{123, 123})
|
||||
f("predict_linear", []interface{}{me, 123})
|
||||
f("quantile_over_time", []interface{}{123, 123})
|
||||
}
|
||||
|
||||
func TestRollupNoWindowNoPoints(t *testing.T) {
|
||||
t.Run("beforeStart", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 4,
|
||||
Step: 1,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{nan, nan, nan, nan, 123}
|
||||
timestampsExpected := []int64{0, 1, 2, 3, 4}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("afterEnd", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDelta,
|
||||
Start: 120,
|
||||
End: 144,
|
||||
Step: 4,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{2, 2, 0, 0, 0, nan, nan}
|
||||
timestampsExpected := []int64{120, 124, 128, 132, 136, 140, 144}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRollupWindowNoPoints(t *testing.T) {
|
||||
t.Run("beforeStart", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 4,
|
||||
Step: 1,
|
||||
Window: 3,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{nan, nan, nan, nan, 123}
|
||||
timestampsExpected := []int64{0, 1, 2, 3, 4}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("afterEnd", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 141,
|
||||
End: 171,
|
||||
Step: 10,
|
||||
Window: 3,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{34, nan, nan, nan}
|
||||
timestampsExpected := []int64{141, 151, 161, 171}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRollupNoWindowPartialPoints(t *testing.T) {
|
||||
t.Run("beforeStart", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 20,
|
||||
Step: 5,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{123, 123, 123, 123, 123}
|
||||
timestampsExpected := []int64{0, 5, 10, 15, 20}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("afterEnd", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 100,
|
||||
End: 160,
|
||||
Step: 20,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{44, 34, 34, nan}
|
||||
timestampsExpected := []int64{100, 120, 140, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("middle", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: -50,
|
||||
End: 150,
|
||||
Step: 50,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{nan, 123, 54, 44, nan}
|
||||
timestampsExpected := []int64{-50, 0, 50, 100, 150}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRollupWindowPartialPoints(t *testing.T) {
|
||||
t.Run("beforeStart", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupLast,
|
||||
Start: 0,
|
||||
End: 20,
|
||||
Step: 5,
|
||||
Window: 8,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{123, 123, 34, 34, 44}
|
||||
timestampsExpected := []int64{0, 5, 10, 15, 20}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("afterEnd", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupLast,
|
||||
Start: 100,
|
||||
End: 160,
|
||||
Step: 20,
|
||||
Window: 18,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{34, 34, nan, nan}
|
||||
timestampsExpected := []int64{100, 120, 140, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("middle", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupLast,
|
||||
Start: 0,
|
||||
End: 150,
|
||||
Step: 50,
|
||||
Window: 19,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{54, 44, nan, nan}
|
||||
timestampsExpected := []int64{0, 50, 100, 150}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func TestRollupFuncsNoWindow(t *testing.T) {
|
||||
t.Run("first", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupFirst,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{123, 21, 12, 34, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("count", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupCount,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{4, 4, 3, 1, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("min", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupMin,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{21, 12, 32, 34, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("max", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupMax,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{123, 99, 44, 34, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("sum", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupSum,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{222, 199, 110, 34, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("delta", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDelta,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{-102, -9, 22, 0, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("idelta", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupIdelta,
|
||||
Start: 10,
|
||||
End: 130,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{33, -87, 0, nan}
|
||||
timestampsExpected := []int64{10, 50, 90, 130}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("changes", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupChanges,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{3, 4, 3, 0, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("resets", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupResets,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{2, 2, 1, 0, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("avg", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupAvg,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{55.5, 49.75, 36.666666666666664, 34, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("deriv", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDeriv,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{-3290.3225806451615, -204.54545454545456, 550, 0, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("ideriv", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupIderiv,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{-1916.6666666666665, -43500, 400, 0, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("stddev", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupStddev,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{39.81519810323691, 32.080952292598795, 5.2493385826745405, 0, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("integrate", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupIntegrate,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{4.6035, 4.3934999999999995, 2.166, 0.34, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
t.Run("distinct", func(t *testing.T) {
|
||||
rc := rollupConfig{
|
||||
Func: rollupDistinct,
|
||||
Start: 0,
|
||||
End: 160,
|
||||
Step: 40,
|
||||
Window: 0,
|
||||
}
|
||||
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||
values := rc.Do(nil, testValues, testTimestamps)
|
||||
valuesExpected := []float64{4, 4, 3, 1, nan}
|
||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||
})
|
||||
}
|
||||
|
||||
func testRowsEqual(t *testing.T, values []float64, timestamps []int64, valuesExpected []float64, timestampsExpected []int64) {
|
||||
t.Helper()
|
||||
if len(values) != len(valuesExpected) {
|
||||
t.Fatalf("unexpected len(values); got %d; want %d\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
len(values), len(valuesExpected), values, valuesExpected)
|
||||
}
|
||||
if len(timestamps) != len(timestampsExpected) {
|
||||
t.Fatalf("unexpected len(timestamps); got %d; want %d\ntimestamps=\n%v\ntimestampsExpected=\n%v",
|
||||
len(timestamps), len(timestampsExpected), timestamps, timestampsExpected)
|
||||
}
|
||||
if len(values) != len(timestamps) {
|
||||
t.Fatalf("len(values) doesn't match len(timestamps); got %d vs %d", len(values), len(timestamps))
|
||||
}
|
||||
for i, v := range values {
|
||||
ts := timestamps[i]
|
||||
tsExpected := timestampsExpected[i]
|
||||
if ts != tsExpected {
|
||||
t.Fatalf("unexpected timestamp at timestamps[%d]; got %d; want %d\ntimestamps=\n%v\ntimestampsExpected=\n%v",
|
||||
i, ts, tsExpected, timestamps, timestampsExpected)
|
||||
}
|
||||
vExpected := valuesExpected[i]
|
||||
if math.IsNaN(v) {
|
||||
if !math.IsNaN(vExpected) {
|
||||
t.Fatalf("unexpected nan value at values[%d]; want %f\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
i, vExpected, values, valuesExpected)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if v != vExpected {
|
||||
t.Fatalf("unexpected value at values[%d]; got %f; want %f\nvalues=\n%v\nvaluesExpected=\n%v",
|
||||
i, v, vExpected, values, valuesExpected)
|
||||
}
|
||||
}
|
||||
}
|
43
app/vmselect/promql/rollup_timing_test.go
Normal file
43
app/vmselect/promql/rollup_timing_test.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkRollupAvg(b *testing.B) {
|
||||
rfa := &rollupFuncArg{
|
||||
values: benchValues,
|
||||
}
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchValues)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var vSum float64
|
||||
for pb.Next() {
|
||||
vSum += rollupAvg(rfa)
|
||||
}
|
||||
SinkLock.Lock()
|
||||
Sink += vSum
|
||||
SinkLock.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
var (
|
||||
// Sink is a global sink for benchmarks.
|
||||
// It guarantees the compiler doesn't remove the code in benchmarks,
|
||||
// which writes data to the Sink.
|
||||
Sink float64
|
||||
|
||||
// SinkLock locks Sink.
|
||||
SinkLock sync.Mutex
|
||||
)
|
||||
|
||||
var benchValues = func() []float64 {
|
||||
values := make([]float64, 1000)
|
||||
for i := range values {
|
||||
values[i] = rand.Float64() * 100
|
||||
}
|
||||
return values
|
||||
}()
|
383
app/vmselect/promql/timeseries.go
Normal file
383
app/vmselect/promql/timeseries.go
Normal file
|
@ -0,0 +1,383 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
type timeseries struct {
|
||||
MetricName storage.MetricName
|
||||
Values []float64
|
||||
Timestamps []int64
|
||||
|
||||
// Whether the timeseries may be re-used.
|
||||
// Timeseries may be re-used only if their members own values
|
||||
// they refer to.
|
||||
denyReuse bool
|
||||
}
|
||||
|
||||
func (ts *timeseries) Reset() {
|
||||
if ts.denyReuse {
|
||||
*ts = timeseries{}
|
||||
return
|
||||
}
|
||||
|
||||
ts.MetricName.Reset()
|
||||
ts.Values = ts.Values[:0]
|
||||
ts.Timestamps = ts.Timestamps[:0]
|
||||
}
|
||||
|
||||
func (ts *timeseries) String() string {
|
||||
return fmt.Sprintf("MetricName=%s, Values=%g, Timestamps=%d", &ts.MetricName, ts.Values, ts.Timestamps)
|
||||
}
|
||||
|
||||
func (ts *timeseries) CopyFrom(src *timeseries) {
|
||||
ts.Reset()
|
||||
ts.MetricName.CopyFrom(&src.MetricName)
|
||||
ts.Values = append(ts.Values[:0], src.Values...)
|
||||
ts.Timestamps = append(ts.Timestamps[:0], src.Timestamps...)
|
||||
}
|
||||
|
||||
func (ts *timeseries) CopyFromMetricNames(src *timeseries) {
|
||||
ts.Reset()
|
||||
ts.MetricName.CopyFrom(&src.MetricName)
|
||||
ts.Values = src.Values
|
||||
ts.Timestamps = src.Timestamps
|
||||
|
||||
ts.denyReuse = true
|
||||
}
|
||||
|
||||
func (ts *timeseries) CopyShallow(src *timeseries) {
|
||||
*ts = *src
|
||||
ts.denyReuse = true
|
||||
}
|
||||
|
||||
func marshalTimeseriesFast(tss []*timeseries, maxSize int, step int64) []byte {
|
||||
if len(tss) == 0 {
|
||||
logger.Panicf("BUG: tss cannot be empty")
|
||||
}
|
||||
|
||||
// Calculate the required size for marshaled tss.
|
||||
size := 0
|
||||
for _, ts := range tss {
|
||||
size += ts.marshaledFastSizeNoTimestamps()
|
||||
}
|
||||
// timestamps are stored only once for all the tss, since they are identical.
|
||||
assertIdenticalTimestamps(tss, step)
|
||||
size += 8 * len(tss[0].Timestamps)
|
||||
|
||||
if size > maxSize {
|
||||
// Do not marshal tss, since it would occupy too much space
|
||||
return nil
|
||||
}
|
||||
|
||||
// Allocate the buffer for the marshaled tss before its' marshaling.
|
||||
// This should reduce memory fragmentation and memory usage.
|
||||
dst := make([]byte, 0, size)
|
||||
dst = marshalFastTimestamps(dst, tss[0].Timestamps)
|
||||
for _, ts := range tss {
|
||||
dst = ts.marshalFastNoTimestamps(dst)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// unmarshalTimeseriesFast unmarshals timeseries from src.
|
||||
//
|
||||
// The returned timeseries refer to src, so it is unsafe to modify it
|
||||
// until timeseries are in use.
|
||||
func unmarshalTimeseriesFast(src []byte) ([]*timeseries, error) {
|
||||
tail, timestamps, err := unmarshalFastTimestamps(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src = tail
|
||||
|
||||
var tss []*timeseries
|
||||
for len(src) > 0 {
|
||||
var ts timeseries
|
||||
ts.denyReuse = false
|
||||
ts.Timestamps = timestamps
|
||||
|
||||
tail, err := ts.unmarshalFastNoTimestamps(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
src = tail
|
||||
|
||||
tss = append(tss, &ts)
|
||||
}
|
||||
return tss, nil
|
||||
}
|
||||
|
||||
// marshaledFastSizeNoTimestamps returns the size of marshaled ts
|
||||
// returned from marshalFastNoTimestamps.
|
||||
func (ts *timeseries) marshaledFastSizeNoTimestamps() int {
|
||||
mn := &ts.MetricName
|
||||
n := 2 + len(mn.MetricGroup)
|
||||
n += 2 // Length of tags.
|
||||
for i := range mn.Tags {
|
||||
tag := &mn.Tags[i]
|
||||
n += 2 + len(tag.Key)
|
||||
n += 2 + len(tag.Value)
|
||||
}
|
||||
n += 8 * len(ts.Values)
|
||||
return n
|
||||
}
|
||||
|
||||
// marshalFastNoTimestamps appends marshaled ts to dst and returns the result.
|
||||
//
|
||||
// It doesn't marshal timestamps.
|
||||
//
|
||||
// The result must be unmarshaled with unmarshalFastNoTimestamps.
|
||||
func (ts *timeseries) marshalFastNoTimestamps(dst []byte) []byte {
|
||||
mn := &ts.MetricName
|
||||
dst = marshalBytesFast(dst, mn.MetricGroup)
|
||||
dst = encoding.MarshalUint16(dst, uint16(len(mn.Tags)))
|
||||
// There is no need in tags' sorting - they must be sorted after unmarshaling.
|
||||
for i := range mn.Tags {
|
||||
tag := &mn.Tags[i]
|
||||
dst = marshalBytesFast(dst, tag.Key)
|
||||
dst = marshalBytesFast(dst, tag.Value)
|
||||
}
|
||||
|
||||
// Do not marshal len(ts.Values), since it is already encoded as len(ts.Timestamps)
|
||||
// during marshalFastTimestamps.
|
||||
var valuesBuf []byte
|
||||
if len(ts.Values) > 0 {
|
||||
valuesBuf = (*[maxByteSliceLen]byte)(unsafe.Pointer(&ts.Values[0]))[:len(ts.Values)*8]
|
||||
}
|
||||
dst = append(dst, valuesBuf...)
|
||||
return dst
|
||||
}
|
||||
|
||||
func marshalFastTimestamps(dst []byte, timestamps []int64) []byte {
|
||||
dst = encoding.MarshalUint32(dst, uint32(len(timestamps)))
|
||||
var timestampsBuf []byte
|
||||
if len(timestamps) > 0 {
|
||||
timestampsBuf = (*[maxByteSliceLen]byte)(unsafe.Pointer(×tamps[0]))[:len(timestamps)*8]
|
||||
}
|
||||
dst = append(dst, timestampsBuf...)
|
||||
return dst
|
||||
}
|
||||
|
||||
// it is unsafe modifying src while the returned timestamps is in use.
|
||||
func unmarshalFastTimestamps(src []byte) ([]byte, []int64, error) {
|
||||
if len(src) < 4 {
|
||||
return src, nil, fmt.Errorf("cannot decode len(timestamps); got %d bytes; want at least %d bytes", len(src), 4)
|
||||
}
|
||||
timestampsCount := int(encoding.UnmarshalUint32(src))
|
||||
src = src[4:]
|
||||
if timestampsCount == 0 {
|
||||
return src, nil, nil
|
||||
}
|
||||
|
||||
bufSize := timestampsCount * 8
|
||||
if len(src) < bufSize {
|
||||
return src, nil, fmt.Errorf("cannot unmarshal timestamps; got %d bytes; want at least %d bytes", len(src), bufSize)
|
||||
}
|
||||
timestamps := (*[maxByteSliceLen / 8]int64)(unsafe.Pointer(&src[0]))[:timestampsCount]
|
||||
timestamps = timestamps[:len(timestamps):len(timestamps)]
|
||||
src = src[bufSize:]
|
||||
|
||||
return src, timestamps, nil
|
||||
}
|
||||
|
||||
// unmarshalFastNoTimestamps unmarshals ts from src, so ts members reference src.
|
||||
//
|
||||
// It is expected that ts.Timestamps is already unmarshaled.
|
||||
//
|
||||
// It is unsafe to modify src while ts is in use.
|
||||
func (ts *timeseries) unmarshalFastNoTimestamps(src []byte) ([]byte, error) {
|
||||
// ts members point to src, so they cannot be re-used.
|
||||
ts.denyReuse = true
|
||||
|
||||
tail, err := unmarshalMetricNameFast(&ts.MetricName, src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal MetricName: %s", err)
|
||||
}
|
||||
src = tail
|
||||
|
||||
valuesCount := len(ts.Timestamps)
|
||||
if valuesCount == 0 {
|
||||
return src, nil
|
||||
}
|
||||
bufSize := valuesCount * 8
|
||||
if len(src) < bufSize {
|
||||
return src, fmt.Errorf("cannot unmarshal values; got %d bytes; want at least %d bytes", len(src), bufSize)
|
||||
}
|
||||
values := (*[maxByteSliceLen / 8]float64)(unsafe.Pointer(&src[0]))[:valuesCount]
|
||||
ts.Values = values[:len(values):len(values)]
|
||||
|
||||
return src[bufSize:], nil
|
||||
}
|
||||
|
||||
// unmarshalMetricNameFast unmarshals mn from src, so mn members
|
||||
// hold references to src.
|
||||
//
|
||||
// It is unsafe modifying src while mn is in use.
|
||||
func unmarshalMetricNameFast(mn *storage.MetricName, src []byte) ([]byte, error) {
|
||||
mn.Reset()
|
||||
|
||||
tail, metricGroup, err := unmarshalBytesFast(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal MetricGroup: %s", err)
|
||||
}
|
||||
src = tail
|
||||
mn.MetricGroup = metricGroup[:len(metricGroup):len(metricGroup)]
|
||||
|
||||
if len(src) < 2 {
|
||||
return src, fmt.Errorf("not enough bytes for unmarshaling len(tags); need at least 2 bytes; got %d bytes", len(src))
|
||||
}
|
||||
tagsLen := encoding.UnmarshalUint16(src)
|
||||
src = src[2:]
|
||||
if n := int(tagsLen) - cap(mn.Tags); n > 0 {
|
||||
mn.Tags = append(mn.Tags[:cap(mn.Tags)], make([]storage.Tag, n)...)
|
||||
}
|
||||
mn.Tags = mn.Tags[:tagsLen]
|
||||
for i := range mn.Tags {
|
||||
tail, key, err := unmarshalBytesFast(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal key for tag[%d]: %s", i, err)
|
||||
}
|
||||
src = tail
|
||||
|
||||
tail, value, err := unmarshalBytesFast(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal value for tag[%d]: %s", i, err)
|
||||
}
|
||||
src = tail
|
||||
|
||||
tag := &mn.Tags[i]
|
||||
tag.Key = key[:len(key):len(key)]
|
||||
tag.Value = value[:len(value):len(value)]
|
||||
}
|
||||
return src, nil
|
||||
}
|
||||
|
||||
func marshalMetricTagsFast(dst []byte, tags []storage.Tag) []byte {
|
||||
for i := range tags {
|
||||
tag := &tags[i]
|
||||
dst = marshalBytesFast(dst, tag.Key)
|
||||
dst = marshalBytesFast(dst, tag.Value)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func marshalMetricNameSorted(dst []byte, mn *storage.MetricName) []byte {
|
||||
// Do not marshal AccountID and ProjectID, since they are unused.
|
||||
dst = marshalBytesFast(dst, mn.MetricGroup)
|
||||
sortMetricTags(mn.Tags)
|
||||
dst = marshalMetricTagsFast(dst, mn.Tags)
|
||||
return dst
|
||||
}
|
||||
|
||||
func marshalMetricTagsSorted(dst []byte, mn *storage.MetricName) []byte {
|
||||
sortMetricTags(mn.Tags)
|
||||
return marshalMetricTagsFast(dst, mn.Tags)
|
||||
}
|
||||
|
||||
func sortMetricTags(tags []storage.Tag) {
|
||||
less := func(i, j int) bool {
|
||||
return string(tags[i].Key) < string(tags[j].Key)
|
||||
}
|
||||
if sort.SliceIsSorted(tags, less) {
|
||||
return
|
||||
}
|
||||
sort.Slice(tags, less)
|
||||
}
|
||||
|
||||
func marshalBytesFast(dst []byte, s []byte) []byte {
|
||||
dst = encoding.MarshalUint16(dst, uint16(len(s)))
|
||||
dst = append(dst, s...)
|
||||
return dst
|
||||
}
|
||||
|
||||
func unmarshalBytesFast(src []byte) ([]byte, []byte, error) {
|
||||
if len(src) < 2 {
|
||||
return src, nil, fmt.Errorf("cannot decode size form src=%X; it must be at least 2 bytes", src)
|
||||
}
|
||||
n := encoding.UnmarshalUint16(src)
|
||||
src = src[2:]
|
||||
if len(src) < int(n) {
|
||||
return src, nil, fmt.Errorf("too short src=%X; it must be at least %d bytes", src, n)
|
||||
}
|
||||
return src[n:], src[:n], nil
|
||||
}
|
||||
|
||||
func stringMetricName(mn *storage.MetricName) string {
|
||||
var dst []byte
|
||||
dst = append(dst, mn.MetricGroup...)
|
||||
sortMetricTags(mn.Tags)
|
||||
dst = appendStringMetricTags(dst, mn.Tags)
|
||||
return string(dst)
|
||||
}
|
||||
|
||||
func stringMetricTags(mn *storage.MetricName) string {
|
||||
var dst []byte
|
||||
sortMetricTags(mn.Tags)
|
||||
dst = appendStringMetricTags(dst, mn.Tags)
|
||||
return string(dst)
|
||||
}
|
||||
|
||||
func appendStringMetricTags(dst []byte, tags []storage.Tag) []byte {
|
||||
dst = append(dst, '{')
|
||||
for i := range tags {
|
||||
tag := &tags[i]
|
||||
dst = append(dst, tag.Key...)
|
||||
dst = append(dst, '=')
|
||||
value := bytesutil.ToUnsafeString(tag.Value)
|
||||
dst = strconv.AppendQuote(dst, value)
|
||||
if i+1 < len(tags) {
|
||||
dst = append(dst, ", "...)
|
||||
}
|
||||
}
|
||||
dst = append(dst, '}')
|
||||
return dst
|
||||
}
|
||||
|
||||
func assertIdenticalTimestamps(tss []*timeseries, step int64) {
|
||||
if len(tss) == 0 {
|
||||
return
|
||||
}
|
||||
tsGolden := tss[0]
|
||||
if len(tsGolden.Values) != len(tsGolden.Timestamps) {
|
||||
logger.Panicf("BUG: len(tsGolden.Values) must match len(tsGolden.Timestamps); got %d vs %d", len(tsGolden.Values), len(tsGolden.Timestamps))
|
||||
}
|
||||
if len(tsGolden.Timestamps) > 0 {
|
||||
prevTimestamp := tsGolden.Timestamps[0]
|
||||
for _, timestamp := range tsGolden.Timestamps[1:] {
|
||||
if timestamp-prevTimestamp != step {
|
||||
logger.Panicf("BUG: invalid step between timestamps; got %d; want %d; tsGolden.Timestamps=%d", timestamp-prevTimestamp, step, tsGolden.Timestamps)
|
||||
}
|
||||
prevTimestamp = timestamp
|
||||
}
|
||||
}
|
||||
for _, ts := range tss {
|
||||
if len(ts.Values) != len(tsGolden.Values) {
|
||||
logger.Panicf("BUG: unexpected len(ts.Values); got %d; want %d; ts.Values=%g", len(ts.Values), len(tsGolden.Values), ts.Values)
|
||||
}
|
||||
if len(ts.Timestamps) != len(tsGolden.Timestamps) {
|
||||
logger.Panicf("BUG: unexpected len(ts.Timestamps); got %d; want %d; ts.Timestamps=%d", len(ts.Timestamps), len(tsGolden.Timestamps), ts.Timestamps)
|
||||
}
|
||||
if len(ts.Timestamps) == 0 {
|
||||
continue
|
||||
}
|
||||
if &ts.Timestamps[0] == &tsGolden.Timestamps[0] {
|
||||
// Fast path - shared timestamps.
|
||||
continue
|
||||
}
|
||||
for i := range ts.Timestamps {
|
||||
if ts.Timestamps[i] != tsGolden.Timestamps[i] {
|
||||
logger.Panicf("BUG: timestamps mismatch at position %d; got %d; want %d; ts.Timestamps=%d, tsGolden.Timestamps=%d",
|
||||
i, ts.Timestamps[i], tsGolden.Timestamps[i], ts.Timestamps, tsGolden.Timestamps)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
104
app/vmselect/promql/timeseries_test.go
Normal file
104
app/vmselect/promql/timeseries_test.go
Normal file
|
@ -0,0 +1,104 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
n := m.Run()
|
||||
os.Exit(n)
|
||||
}
|
||||
|
||||
func TestTimeseriesMarshalUnmarshalFast(t *testing.T) {
|
||||
t.Run("single", func(t *testing.T) {
|
||||
var tsOrig timeseries
|
||||
buf := tsOrig.marshalFastNoTimestamps(nil)
|
||||
n := tsOrig.marshaledFastSizeNoTimestamps()
|
||||
if n != len(buf) {
|
||||
t.Fatalf("unexpected marshaled size; got %d; want %d", n, len(buf))
|
||||
}
|
||||
|
||||
var tsGot timeseries
|
||||
tail, err := tsGot.unmarshalFastNoTimestamps(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot unmarshal timeseries: %s", err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
t.Fatalf("unexpected non-empty tail left: len(tail)=%d; tail=%X", len(tail), tail)
|
||||
}
|
||||
tsOrig.denyReuse = true
|
||||
tsOrig.MetricName.MetricGroup = []byte{}
|
||||
if !reflect.DeepEqual(&tsOrig, &tsGot) {
|
||||
t.Fatalf("unexpected ts\ngot:\n%s\nwant:\n%s", &tsGot, &tsOrig)
|
||||
}
|
||||
})
|
||||
t.Run("multiple", func(t *testing.T) {
|
||||
var dst []byte
|
||||
var tssOrig []*timeseries
|
||||
timestamps := []int64{2}
|
||||
for i := 0; i < 10; i++ {
|
||||
var ts timeseries
|
||||
ts.denyReuse = true
|
||||
ts.MetricName.MetricGroup = []byte(fmt.Sprintf("metricGroup %d", i))
|
||||
ts.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte(fmt.Sprintf("key %d", i)),
|
||||
Value: []byte(fmt.Sprintf("value %d", i)),
|
||||
}}
|
||||
ts.Values = []float64{float64(i) + 0.2}
|
||||
ts.Timestamps = timestamps
|
||||
|
||||
dstLen := len(dst)
|
||||
dst = ts.marshalFastNoTimestamps(dst)
|
||||
n := ts.marshaledFastSizeNoTimestamps()
|
||||
if n != len(dst)-dstLen {
|
||||
t.Fatalf("unexpected marshaled size on iteration %d; got %d; want %d", i, n, len(dst)-dstLen)
|
||||
}
|
||||
|
||||
var tsGot timeseries
|
||||
tsGot.Timestamps = ts.Timestamps
|
||||
tail, err := tsGot.unmarshalFastNoTimestamps(dst[dstLen:])
|
||||
if err != nil {
|
||||
t.Fatalf("cannot unmarshal timeseries on iteration %d: %s", i, err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
t.Fatalf("unexpected non-empty tail left on iteration %d: len(tail)=%d; tail=%x", i, len(tail), tail)
|
||||
}
|
||||
if !reflect.DeepEqual(&ts, &tsGot) {
|
||||
t.Fatalf("unexpected ts on iteration %d\ngot:\n%s\nwant:\n%s", i, &tsGot, &ts)
|
||||
}
|
||||
|
||||
tssOrig = append(tssOrig, &ts)
|
||||
}
|
||||
buf := marshalTimeseriesFast(tssOrig, 1e6, 123)
|
||||
tssGot, err := unmarshalTimeseriesFast(buf)
|
||||
if err != nil {
|
||||
t.Fatalf("error in unmarshalTimeseriesFast: %s", err)
|
||||
}
|
||||
if !reflect.DeepEqual(tssOrig, tssGot) {
|
||||
t.Fatalf("unexpected unmarshaled timeseries\ngot:\n%s\nwant:\n%s", tssGot, tssOrig)
|
||||
}
|
||||
|
||||
src := dst
|
||||
for i := 0; i < 10; i++ {
|
||||
tsOrig := tssOrig[i]
|
||||
var ts timeseries
|
||||
ts.Timestamps = tsOrig.Timestamps
|
||||
tail, err := ts.unmarshalFastNoTimestamps(src)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot unmarshal timeseries[%d]: %s", i, err)
|
||||
}
|
||||
src = tail
|
||||
if !reflect.DeepEqual(tsOrig, &ts) {
|
||||
t.Fatalf("unexpected ts on iteration %d:\n%+v\nwant:\n%+v", i, &ts, tsOrig)
|
||||
}
|
||||
}
|
||||
if len(src) > 0 {
|
||||
t.Fatalf("unexpected tail left; len(tail)=%d; tail=%X", len(src), src)
|
||||
}
|
||||
})
|
||||
}
|
1200
app/vmselect/promql/transform.go
Normal file
1200
app/vmselect/promql/transform.go
Normal file
File diff suppressed because it is too large
Load diff
5
app/vmstorage/README.md
Normal file
5
app/vmstorage/README.md
Normal file
|
@ -0,0 +1,5 @@
|
|||
`vmstorage` performs the following tasks:
|
||||
|
||||
- Accepts inserts from `vminsert` and stores them to local storage.
|
||||
|
||||
- Performs select requests from `vmselect`.
|
455
app/vmstorage/main.go
Normal file
455
app/vmstorage/main.go
Normal file
|
@ -0,0 +1,455 @@
|
|||
package vmstorage
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
retentionPeriod = flag.Int("retentionPeriod", 1, "Retention period in months")
|
||||
snapshotAuthKey = flag.String("snapshotAuthKey", "", "authKey, which must be passed in query string to /snapshot* pages")
|
||||
|
||||
precisionBits = flag.Int("precisionBits", 64, "The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss")
|
||||
|
||||
// DataPath is a path to storage data.
|
||||
DataPath = flag.String("storageDataPath", "victoria-metrics-data", "Path to storage data")
|
||||
)
|
||||
|
||||
// Init initializes vmstorage.
|
||||
func Init() {
|
||||
if err := encoding.CheckPrecisionBits(uint8(*precisionBits)); err != nil {
|
||||
logger.Fatalf("invalid `-precisionBits`: %s", err)
|
||||
}
|
||||
logger.Infof("opening storage at %q with retention period %d months", *DataPath, *retentionPeriod)
|
||||
startTime := time.Now()
|
||||
strg, err := storage.OpenStorage(*DataPath, *retentionPeriod)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot open a storage at %s with retention period %d months: %s", *DataPath, *retentionPeriod, err)
|
||||
}
|
||||
Storage = strg
|
||||
|
||||
var m storage.Metrics
|
||||
Storage.UpdateMetrics(&m)
|
||||
tm := &m.TableMetrics
|
||||
partsCount := tm.SmallPartsCount + tm.BigPartsCount
|
||||
blocksCount := tm.SmallBlocksCount + tm.BigBlocksCount
|
||||
rowsCount := tm.SmallRowsCount + tm.BigRowsCount
|
||||
logger.Infof("successfully opened storage %q in %s; partsCount: %d; blocksCount: %d; rowsCount: %d",
|
||||
*DataPath, time.Since(startTime), partsCount, blocksCount, rowsCount)
|
||||
|
||||
registerStorageMetrics(Storage)
|
||||
}
|
||||
|
||||
// Storage is a storage.
|
||||
//
|
||||
// Every storage call must be wrapped into WG.Add(1) ... WG.Done()
|
||||
// for proper graceful shutdown when Stop is called.
|
||||
var Storage *storage.Storage
|
||||
|
||||
// WG must be incremented before Storage call.
|
||||
//
|
||||
// Use syncwg instead of sync, since Add is called from concurrent goroutines.
|
||||
var WG syncwg.WaitGroup
|
||||
|
||||
// AddRows adds mrs to the storage.
|
||||
func AddRows(mrs []storage.MetricRow) error {
|
||||
WG.Add(1)
|
||||
err := Storage.AddRows(mrs, uint8(*precisionBits))
|
||||
WG.Done()
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteMetrics deletes metrics matching tfss.
|
||||
//
|
||||
// Returns the number of deleted metrics.
|
||||
func DeleteMetrics(tfss []*storage.TagFilters) (int, error) {
|
||||
WG.Add(1)
|
||||
n, err := Storage.DeleteMetrics(tfss)
|
||||
WG.Done()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// SearchTagKeys searches for tag keys
|
||||
func SearchTagKeys(maxTagKeys int) ([]string, error) {
|
||||
WG.Add(1)
|
||||
keys, err := Storage.SearchTagKeys(maxTagKeys)
|
||||
WG.Done()
|
||||
return keys, err
|
||||
}
|
||||
|
||||
// SearchTagValues searches for tag values for the given tagKey
|
||||
func SearchTagValues(tagKey []byte, maxTagValues int) ([]string, error) {
|
||||
WG.Add(1)
|
||||
values, err := Storage.SearchTagValues(tagKey, maxTagValues)
|
||||
WG.Done()
|
||||
return values, err
|
||||
}
|
||||
|
||||
// GetSeriesCount returns the number of time series in the storage.
|
||||
func GetSeriesCount() (uint64, error) {
|
||||
WG.Add(1)
|
||||
n, err := Storage.GetSeriesCount()
|
||||
WG.Done()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Stop stops the vmstorage
|
||||
func Stop() {
|
||||
logger.Infof("gracefully closing the storage at %s", *DataPath)
|
||||
startTime := time.Now()
|
||||
WG.WaitAndBlock()
|
||||
Storage.MustClose()
|
||||
logger.Infof("successfully closed the storage in %s", time.Since(startTime))
|
||||
|
||||
logger.Infof("the storage has been stopped")
|
||||
}
|
||||
|
||||
// RequestHandler is a storage request handler.
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
path := r.URL.Path
|
||||
prometheusCompatibleResponse := false
|
||||
if path == "/api/v1/admin/tsdb/snapshot" {
|
||||
// Handle Prometheus API - https://prometheus.io/docs/prometheus/latest/querying/api/#snapshot .
|
||||
prometheusCompatibleResponse = true
|
||||
path = "/snapshot/create"
|
||||
}
|
||||
if !strings.HasPrefix(path, "/snapshot") {
|
||||
return false
|
||||
}
|
||||
authKey := r.FormValue("authKey")
|
||||
if authKey != *snapshotAuthKey {
|
||||
httpserver.Errorf(w, "invalid authKey %q. It must match the value from -snapshotAuthKey command line flag", authKey)
|
||||
return true
|
||||
}
|
||||
path = path[len("/snapshot"):]
|
||||
|
||||
switch path {
|
||||
case "/create":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshotPath, err := Storage.CreateSnapshot()
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("cannot create snapshot: %s", err)
|
||||
logger.Errorf("%s", msg)
|
||||
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
||||
return true
|
||||
}
|
||||
if prometheusCompatibleResponse {
|
||||
fmt.Fprintf(w, `{"status":"success","data":{"name":%q}}`, snapshotPath)
|
||||
} else {
|
||||
fmt.Fprintf(w, `{"status":"ok","snapshot":%q}`, snapshotPath)
|
||||
}
|
||||
return true
|
||||
case "/list":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshots, err := Storage.ListSnapshots()
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("cannot list snapshots: %s", err)
|
||||
logger.Errorf("%s", msg)
|
||||
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
||||
return true
|
||||
}
|
||||
fmt.Fprintf(w, `{"status":"ok","snapshots":[`)
|
||||
if len(snapshots) > 0 {
|
||||
for _, snapshot := range snapshots[:len(snapshots)-1] {
|
||||
fmt.Fprintf(w, "\n%q,", snapshot)
|
||||
}
|
||||
fmt.Fprintf(w, "\n%q\n", snapshots[len(snapshots)-1])
|
||||
}
|
||||
fmt.Fprintf(w, `]}`)
|
||||
return true
|
||||
case "/delete":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshotName := r.FormValue("snapshot")
|
||||
if err := Storage.DeleteSnapshot(snapshotName); err != nil {
|
||||
msg := fmt.Sprintf("cannot delete snapshot %q: %s", snapshotName, err)
|
||||
logger.Errorf("%s", msg)
|
||||
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
||||
return true
|
||||
}
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
case "/delete_all":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
snapshots, err := Storage.ListSnapshots()
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("cannot list snapshots: %s", err)
|
||||
logger.Errorf("%s", msg)
|
||||
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
||||
return true
|
||||
}
|
||||
for _, snapshotName := range snapshots {
|
||||
if err := Storage.DeleteSnapshot(snapshotName); err != nil {
|
||||
msg := fmt.Sprintf("cannot delete snapshot %q: %s", snapshotName, err)
|
||||
logger.Errorf("%s", msg)
|
||||
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
||||
return true
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, `{"status":"ok"}`)
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func registerStorageMetrics(strg *storage.Storage) {
|
||||
mCache := &storage.Metrics{}
|
||||
var mCacheLock sync.Mutex
|
||||
var lastUpdateTime time.Time
|
||||
|
||||
m := func() *storage.Metrics {
|
||||
mCacheLock.Lock()
|
||||
defer mCacheLock.Unlock()
|
||||
if time.Since(lastUpdateTime) < time.Second {
|
||||
return mCache
|
||||
}
|
||||
var mc storage.Metrics
|
||||
strg.UpdateMetrics(&mc)
|
||||
mCache = &mc
|
||||
lastUpdateTime = time.Now()
|
||||
return mCache
|
||||
}
|
||||
tm := func() *storage.TableMetrics {
|
||||
sm := m()
|
||||
return &sm.TableMetrics
|
||||
}
|
||||
idbm := func() *storage.IndexDBMetrics {
|
||||
sm := m()
|
||||
return &sm.IndexDBMetrics
|
||||
}
|
||||
|
||||
metrics.NewGauge(`vm_active_merges{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().ActiveBigMerges)
|
||||
})
|
||||
metrics.NewGauge(`vm_active_merges{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().ActiveSmallMerges)
|
||||
})
|
||||
metrics.NewGauge(`vm_active_merges{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().ActiveMerges)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_merges_total{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigMergesCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_merges_total{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallMergesCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_merges_total{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().MergesCount)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_rows_merged_total{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigRowsMerged)
|
||||
})
|
||||
metrics.NewGauge(`vm_rows_merged_total{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallRowsMerged)
|
||||
})
|
||||
metrics.NewGauge(`vm_rows_merged_total{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().ItemsMerged)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_rows_deleted_total{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigRowsDeleted)
|
||||
})
|
||||
metrics.NewGauge(`vm_rows_deleted_total{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallRowsDeleted)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_references{type="storage/big", name="parts"}`, func() float64 {
|
||||
return float64(tm().BigPartsRefCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_references{type="storage/small", name="parts"}`, func() float64 {
|
||||
return float64(tm().SmallPartsRefCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_references{type="storage", name="partitions"}`, func() float64 {
|
||||
return float64(tm().PartitionsRefCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_references{type="indexdb", name="objects"}`, func() float64 {
|
||||
return float64(idbm().IndexDBRefCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_references{type="indexdb", name="parts"}`, func() float64 {
|
||||
return float64(idbm().PartsRefCount)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_missing_tsids_for_metric_id_total`, func() float64 {
|
||||
return float64(idbm().MissingTSIDsForMetricID)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_assisted_merges_total{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallAssistedMerges)
|
||||
})
|
||||
metrics.NewGauge(`vm_assisted_merges_total{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().AssistedMerges)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_pending_rows{type="storage"}`, func() float64 {
|
||||
return float64(tm().PendingRows)
|
||||
})
|
||||
metrics.NewGauge(`vm_pending_rows{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().PendingItems)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_parts{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigPartsCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_parts{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallPartsCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_parts{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().PartsCount)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_blocks{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigBlocksCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_blocks{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallBlocksCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_blocks{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().BlocksCount)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_rows{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigRowsCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_rows{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallRowsCount)
|
||||
})
|
||||
metrics.NewGauge(`vm_rows{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().ItemsCount)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_cache_entries{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="storage/metricIDs"}`, func() float64 {
|
||||
return float64(m().MetricIDCacheSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="storage/date_metricID"}`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="storage/bigIndexBlocks"}`, func() float64 {
|
||||
return float64(tm().BigIndexBlocksCacheSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="storage/smallIndexBlocks"}`, func() float64 {
|
||||
return float64(tm().SmallIndexBlocksCacheSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="indexdb/dataBlocks"}`, func() float64 {
|
||||
return float64(idbm().DataBlocksCacheSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="indexdb/indexBlocks"}`, func() float64 {
|
||||
return float64(idbm().IndexBlocksCacheSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="indexdb/tagFilters"}`, func() float64 {
|
||||
return float64(idbm().TagCacheSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_entries{type="storage/regexps"}`, func() float64 {
|
||||
return float64(storage.RegexpCacheSize())
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheBytesSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/metricIDs"}`, func() float64 {
|
||||
return float64(m().MetricIDCacheBytesSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheBytesSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/date_metricID"}`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheBytesSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_size_bytes{type="indexdb/tagFilters"}`, func() float64 {
|
||||
return float64(idbm().TagCacheBytesSize)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheRequests)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="storage/metricIDs"}`, func() float64 {
|
||||
return float64(m().MetricIDCacheRequests)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheRequests)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="storage/date_metricID"}`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheRequests)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="storage/bigIndexBlocks"}`, func() float64 {
|
||||
return float64(tm().BigIndexBlocksCacheRequests)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="storage/smallIndexBlocks"}`, func() float64 {
|
||||
return float64(tm().SmallIndexBlocksCacheRequests)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="indexdb/dataBlocks"}`, func() float64 {
|
||||
return float64(idbm().DataBlocksCacheRequests)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="indexdb/indexBlocks"}`, func() float64 {
|
||||
return float64(idbm().IndexBlocksCacheRequests)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="indexdb/tagFilters"}`, func() float64 {
|
||||
return float64(idbm().TagCacheRequests)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_requests_total{type="storage/regexps"}`, func() float64 {
|
||||
return float64(storage.RegexpCacheRequests())
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheMisses)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="storage/metricIDs"}`, func() float64 {
|
||||
return float64(m().MetricIDCacheMisses)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheMisses)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="storage/date_metricID"}`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheMisses)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="storage/bigIndexBlocks"}`, func() float64 {
|
||||
return float64(tm().BigIndexBlocksCacheMisses)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="storage/smallIndexBlocks"}`, func() float64 {
|
||||
return float64(tm().SmallIndexBlocksCacheMisses)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="indexdb/dataBlocks"}`, func() float64 {
|
||||
return float64(idbm().DataBlocksCacheMisses)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="indexdb/indexBlocks"}`, func() float64 {
|
||||
return float64(idbm().IndexBlocksCacheMisses)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="indexdb/tagFilters"}`, func() float64 {
|
||||
return float64(idbm().TagCacheMisses)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_misses_total{type="storage/regexps"}`, func() float64 {
|
||||
return float64(storage.RegexpCacheMisses())
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_deleted_metrics_total{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().DeletedMetricsCount)
|
||||
})
|
||||
|
||||
metrics.NewGauge(`vm_cache_collisions_total{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheCollisions)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_collisions_total{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheCollisions)
|
||||
})
|
||||
metrics.NewGauge(`vm_cache_collisions_total{type="storage/date_metricID"}`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheCollisions)
|
||||
})
|
||||
}
|
42
deployment/docker/Makefile
Normal file
42
deployment/docker/Makefile
Normal file
|
@ -0,0 +1,42 @@
|
|||
DOCKER_NAMESPACE := valyala
|
||||
BUILDER_IMAGE := local/builder:go1.12.5
|
||||
CERTS_IMAGE := local/certs:1.0.2
|
||||
|
||||
package-certs:
|
||||
(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(CERTS_IMAGE)') \
|
||||
|| docker build -t $(CERTS_IMAGE) deployment/docker/certs
|
||||
|
||||
package-builder:
|
||||
(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(BUILDER_IMAGE)') \
|
||||
|| docker build -t $(BUILDER_IMAGE) deployment/docker/builder
|
||||
|
||||
app-via-docker: package-certs package-builder
|
||||
mkdir -p gocache-for-docker
|
||||
docker run --rm \
|
||||
--user $(shell id -u):$(shell id -g) \
|
||||
--mount type=bind,src="$(shell pwd)",dst=/VictoriaMetrics \
|
||||
-w /VictoriaMetrics \
|
||||
--mount type=bind,src="$(shell pwd)/gocache-for-docker",dst=/gocache \
|
||||
--env GOCACHE=/gocache \
|
||||
$(BUILDER_IMAGE) \
|
||||
go build $(RACE) -mod=vendor -ldflags "-s -w -extldflags '-static' $(GO_BUILDINFO)" -tags 'netgo osusergo' -o bin/$(APP_NAME)-prod $(PKG_PREFIX)/app/$(APP_NAME)
|
||||
|
||||
package-via-docker:
|
||||
(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE)') || (\
|
||||
$(MAKE) app-via-docker && \
|
||||
docker build -t $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) -f app/$(APP_NAME)/deployment/Dockerfile .)
|
||||
|
||||
publish-via-docker: package-via-docker
|
||||
docker push $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE)
|
||||
docker tag $(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) $(DOCKER_NAMESPACE)/$(APP_NAME):latest
|
||||
docker push $(DOCKER_NAMESPACE)/$(APP_NAME):latest
|
||||
|
||||
run-via-docker: package-via-docker
|
||||
docker run -it --rm \
|
||||
--user $(shell id -u):$(shell id -g) \
|
||||
--net host \
|
||||
$(DOCKER_OPTS) \
|
||||
$(DOCKER_NAMESPACE)/$(APP_NAME):$(PKG_TAG)$(RACE) $(ARGS)
|
||||
|
||||
remove-docker-images:
|
||||
docker image ls --format '{{.Repository}}\t{{.ID}}' | grep $(DOCKER_NAMESPACE)/ | grep -v /builder | awk '{print $$2}' | xargs docker image rm -f
|
1
deployment/docker/builder/Dockerfile
Normal file
1
deployment/docker/builder/Dockerfile
Normal file
|
@ -0,0 +1 @@
|
|||
FROM golang:1.12.5
|
3
deployment/docker/certs/Dockerfile
Normal file
3
deployment/docker/certs/Dockerfile
Normal file
|
@ -0,0 +1,3 @@
|
|||
# See https://medium.com/on-docker/use-multi-stage-builds-to-inject-ca-certs-ad1e8f01de1b
|
||||
FROM alpine:3.9 as certs
|
||||
RUN apk --update add ca-certificates
|
2
errcheck_excludes.txt
Normal file
2
errcheck_excludes.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
fmt.Fprintf
|
||||
(net/http.ResponseWriter).Write
|
16
go.mod
Normal file
16
go.mod
Normal file
|
@ -0,0 +1,16 @@
|
|||
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
require (
|
||||
github.com/VictoriaMetrics/fastcache v1.5.0
|
||||
github.com/VictoriaMetrics/metrics v1.4.0
|
||||
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18
|
||||
github.com/golang/snappy v0.0.1
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/valyala/fastjson v1.4.1
|
||||
github.com/valyala/gozstd v1.5.0
|
||||
github.com/valyala/histogram v1.0.1
|
||||
github.com/valyala/quicktemplate v1.1.1
|
||||
golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82
|
||||
)
|
||||
|
||||
go 1.12
|
48
go.sum
Normal file
48
go.sum
Normal file
|
@ -0,0 +1,48 @@
|
|||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/OneOfOne/xxhash v1.2.5 h1:zl/OfRA6nftbBK9qTohYBJ5xvw6C/oNKizR7cZGl3cI=
|
||||
github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
|
||||
github.com/VictoriaMetrics/fastcache v1.5.0 h1:z8t2QV/CDXWVJ9vy9yRtGGDoOvk9W2aXQBijbLk0KCc=
|
||||
github.com/VictoriaMetrics/fastcache v1.5.0/go.mod h1:+jv9Ckb+za/P1ZRg/sulP5Ni1v49daAVERr0H3CuscE=
|
||||
github.com/VictoriaMetrics/metrics v1.4.0 h1:3+XdciC4E8sywx+0PStXhtIdWxXP2bdJ06Whw0mViQE=
|
||||
github.com/VictoriaMetrics/metrics v1.4.0/go.mod h1:QZAL5yLaXvhSPeib0ahluGo9VK0HXDZHovKaKlpuWvs=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18 h1:pl4eWIqvFe/Kg3zkn7NxevNzILnZYWDCG7qbA1CJik0=
|
||||
github.com/cespare/xxhash/v2 v2.0.1-0.20190104013014-3767db7a7e18/go.mod h1:HD5P3vAIAh+Y2GAxg0PrPN1P8WkepXGpjbUPDHJqqKM=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.0.1-0.20190317074736-539464a789e9/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
|
||||
github.com/valyala/fastjson v1.4.1 h1:hrltpHpIpkaxll8QltMU8c3QZ5+qIiCL8yKqPFJI/yE=
|
||||
github.com/valyala/fastjson v1.4.1/go.mod h1:nV6MsjxL2IMJQUoHDIrjEI7oLyeqK6aBD7EFWPsvP8o=
|
||||
github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=
|
||||
github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
||||
github.com/valyala/gozstd v1.5.0 h1:OI7Z2e+GkvmmdRFiJeyuByhT/WMTLuRLa43Z2Tjzenw=
|
||||
github.com/valyala/gozstd v1.5.0/go.mod h1:oYOS+oJovjw9ewtrwEYb9+ybolEXd6pHyLMuAWN5zts=
|
||||
github.com/valyala/histogram v1.0.1 h1:FzA7n2Tz/wKRMejgu3PV1vw3htAklTjjuoI6z3d4KDg=
|
||||
github.com/valyala/histogram v1.0.1/go.mod h1:lQy0xA4wUz2+IUnf97SivorsJIp8FxsnRd6x25q7Mto=
|
||||
github.com/valyala/quicktemplate v1.1.1 h1:C58y/wN0FMTi2PR0n3onltemfFabany53j7M6SDDB8k=
|
||||
github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
|
||||
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
|
||||
golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82 h1:vsphBvatvfbhlb4PO1BYSr9dzugGxJ/SQHoNufZJq1w=
|
||||
golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
32
lib/buildinfo/version.go
Normal file
32
lib/buildinfo/version.go
Normal file
|
@ -0,0 +1,32 @@
|
|||
package buildinfo
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
var version = flag.Bool("version", false, "Show VictoriaMetrics version")
|
||||
|
||||
// Version must be set via -ldflags '-X'
|
||||
var Version string
|
||||
|
||||
// Init must be called after flag.Parse call.
|
||||
func Init() {
|
||||
if *version {
|
||||
printVersion()
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
oldUsage := flag.Usage
|
||||
flag.Usage = func() {
|
||||
printVersion()
|
||||
oldUsage()
|
||||
}
|
||||
}
|
||||
|
||||
func printVersion() {
|
||||
fmt.Fprintf(flag.CommandLine.Output(), "%s\n", Version)
|
||||
}
|
105
lib/bytesutil/bytebuffer.go
Normal file
105
lib/bytesutil/bytebuffer.go
Normal file
|
@ -0,0 +1,105 @@
|
|||
package bytesutil
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
var (
|
||||
// Verify ByteBuffer implements the given interfaces.
|
||||
_ io.Writer = &ByteBuffer{}
|
||||
_ fs.ReadAtCloser = &ByteBuffer{}
|
||||
|
||||
// Verify reader implement filestream.ReadCloser interface.
|
||||
_ filestream.ReadCloser = &reader{}
|
||||
)
|
||||
|
||||
// ByteBuffer implements a simple byte buffer.
|
||||
type ByteBuffer struct {
|
||||
// B is the underlying byte slice.
|
||||
B []byte
|
||||
}
|
||||
|
||||
// Reset resets bb.
|
||||
func (bb *ByteBuffer) Reset() {
|
||||
bb.B = bb.B[:0]
|
||||
}
|
||||
|
||||
// Write appends p to bb.
|
||||
func (bb *ByteBuffer) Write(p []byte) (int, error) {
|
||||
bb.B = append(bb.B, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// ReadAt reads len(p) bytes starting from the given offset.
|
||||
func (bb *ByteBuffer) ReadAt(p []byte, offset int64) {
|
||||
if offset < 0 {
|
||||
logger.Panicf("BUG: cannot read at negative offset=%d", offset)
|
||||
}
|
||||
if offset > int64(len(bb.B)) {
|
||||
logger.Panicf("BUG: too big offset=%d; cannot exceed len(bb.B)=%d", offset, len(bb.B))
|
||||
}
|
||||
if n := copy(p, bb.B[offset:]); n < len(p) {
|
||||
logger.Panicf("BUG: EOF occured after reading %d bytes out of %d bytes at offset %d", n, len(p), offset)
|
||||
}
|
||||
}
|
||||
|
||||
// MustClose closes bb for subsequent re-use.
|
||||
func (bb *ByteBuffer) MustClose() {
|
||||
// Do nothing, since certain code rely on bb reading after MustClose call.
|
||||
}
|
||||
|
||||
// NewReader returns new reader for the given bb.
|
||||
func (bb *ByteBuffer) NewReader() filestream.ReadCloser {
|
||||
return &reader{
|
||||
bb: bb,
|
||||
}
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
bb *ByteBuffer
|
||||
|
||||
// readOffset is the offset in bb.B for read.
|
||||
readOffset int
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes from bb.
|
||||
func (r *reader) Read(p []byte) (int, error) {
|
||||
var err error
|
||||
n := copy(p, r.bb.B[r.readOffset:])
|
||||
if n < len(p) {
|
||||
err = io.EOF
|
||||
}
|
||||
r.readOffset += n
|
||||
return n, err
|
||||
}
|
||||
|
||||
// MustClose closes bb for subsequent re-use.
|
||||
func (r *reader) MustClose() {
|
||||
r.bb = nil
|
||||
r.readOffset = 0
|
||||
}
|
||||
|
||||
// ByteBufferPool is a pool of ByteBuffers.
|
||||
type ByteBufferPool struct {
|
||||
p sync.Pool
|
||||
}
|
||||
|
||||
// Get obtains a ByteBuffer from bbp.
|
||||
func (bbp *ByteBufferPool) Get() *ByteBuffer {
|
||||
bbv := bbp.p.Get()
|
||||
if bbv == nil {
|
||||
return &ByteBuffer{}
|
||||
}
|
||||
return bbv.(*ByteBuffer)
|
||||
}
|
||||
|
||||
// Put puts bb into bbp.
|
||||
func (bbp *ByteBufferPool) Put(bb *ByteBuffer) {
|
||||
bb.Reset()
|
||||
bbp.p.Put(bb)
|
||||
}
|
217
lib/bytesutil/bytebuffer_test.go
Normal file
217
lib/bytesutil/bytebuffer_test.go
Normal file
|
@ -0,0 +1,217 @@
|
|||
package bytesutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestByteBuffer(t *testing.T) {
|
||||
var bb ByteBuffer
|
||||
|
||||
n, err := bb.Write(nil)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot write nil: %s", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatalf("unexpected n when writing nil; got %d; want %d", n, 0)
|
||||
}
|
||||
if len(bb.B) != 0 {
|
||||
t.Fatalf("unexpected len(bb.B) after writing nil; got %d; want %d", len(bb.B), 0)
|
||||
}
|
||||
|
||||
n, err = bb.Write([]byte{})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot write empty slice: %s", err)
|
||||
}
|
||||
if n != 0 {
|
||||
t.Fatalf("unexpected n when writing empty slice; got %d; want %d", n, 0)
|
||||
}
|
||||
if len(bb.B) != 0 {
|
||||
t.Fatalf("unexpected len(bb.B) after writing empty slice; got %d; want %d", len(bb.B), 0)
|
||||
}
|
||||
|
||||
data1 := []byte("123")
|
||||
n, err = bb.Write(data1)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot write %q: %s", data1, err)
|
||||
}
|
||||
if n != len(data1) {
|
||||
t.Fatalf("unexpected n when writing %q; got %d; want %d", data1, n, len(data1))
|
||||
}
|
||||
if string(bb.B) != string(data1) {
|
||||
t.Fatalf("unexpected bb.B; got %q; want %q", bb.B, data1)
|
||||
}
|
||||
|
||||
data2 := []byte("1")
|
||||
n, err = bb.Write(data2)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot write %q: %s", data2, err)
|
||||
}
|
||||
if n != len(data2) {
|
||||
t.Fatalf("unexpected n when writing %q; got %d; want %d", data2, n, len(data2))
|
||||
}
|
||||
if string(bb.B) != string(data1)+string(data2) {
|
||||
t.Fatalf("unexpected bb.B; got %q; want %q", bb.B, string(data1)+string(data2))
|
||||
}
|
||||
|
||||
bb.Reset()
|
||||
if string(bb.B) != "" {
|
||||
t.Fatalf("unexpected bb.B after reset; got %q; want %q", bb.B, "")
|
||||
}
|
||||
r := bb.NewReader().(*reader)
|
||||
if r.readOffset != 0 {
|
||||
t.Fatalf("unexpected r.readOffset after reset; got %d; want %d", r.readOffset, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestByteBufferRead(t *testing.T) {
|
||||
var bb ByteBuffer
|
||||
|
||||
n, err := fmt.Fprintf(&bb, "foo, %s, baz", "bar")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error after fmt.Fprintf: %s", err)
|
||||
}
|
||||
if n != len(bb.B) {
|
||||
t.Fatalf("unexpected len(bb.B); got %d; want %d", len(bb.B), n)
|
||||
}
|
||||
if string(bb.B) != "foo, bar, baz" {
|
||||
t.Fatalf("unexpected bb.B; got %q; want %q", bb.B, "foo, bar, baz")
|
||||
}
|
||||
r := bb.NewReader().(*reader)
|
||||
if r.readOffset != 0 {
|
||||
t.Fatalf("unexpected r.readOffset; got %d; want %q", r.readOffset, 0)
|
||||
}
|
||||
|
||||
rCopy := bb.NewReader().(*reader)
|
||||
|
||||
var bb1 ByteBuffer
|
||||
n1, err := io.Copy(&bb1, r)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error after io.Copy: %s", err)
|
||||
}
|
||||
if int64(r.readOffset) != n1 {
|
||||
t.Fatalf("unexpected r.readOffset after io.Copy; got %d; want %d", r.readOffset, n1)
|
||||
}
|
||||
if n1 != int64(n) {
|
||||
t.Fatalf("unexpected number of bytes copied; got %d; want %d", n1, n)
|
||||
}
|
||||
if string(bb1.B) != "foo, bar, baz" {
|
||||
t.Fatalf("unexpected bb1.B; got %q; want %q", bb1.B, "foo, bar, baz")
|
||||
}
|
||||
|
||||
// Make read returns io.EOF
|
||||
buf := make([]byte, n)
|
||||
n2, err := r.Read(buf)
|
||||
if err != io.EOF {
|
||||
t.Fatalf("unexpected error returned: got %q; want %q", err, io.EOF)
|
||||
}
|
||||
if n2 != 0 {
|
||||
t.Fatalf("unexpected n1 returned; got %d; want %d", n2, 0)
|
||||
}
|
||||
|
||||
// Read data from rCopy
|
||||
if rCopy.readOffset != 0 {
|
||||
t.Fatalf("unexpected rCopy.readOffset; got %d; want %d", rCopy.readOffset, 0)
|
||||
}
|
||||
buf = make([]byte, n+13)
|
||||
n2, err = rCopy.Read(buf)
|
||||
if err != io.EOF {
|
||||
t.Fatalf("unexpected error when reading from rCopy: got %q; want %q", err, io.EOF)
|
||||
}
|
||||
if n2 != n {
|
||||
t.Fatalf("unexpected number of bytes read from rCopy; got %d; want %d", n2, n)
|
||||
}
|
||||
if string(buf[:n2]) != "foo, bar, baz" {
|
||||
t.Fatalf("unexpected data read: got %q; want %q", buf[:n2], "foo, bar, baz")
|
||||
}
|
||||
if rCopy.readOffset != n2 {
|
||||
t.Fatalf("unexpected rCopy.readOffset; got %d; want %d", rCopy.readOffset, n2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestByteBufferReadAt(t *testing.T) {
|
||||
testStr := "foobar baz"
|
||||
|
||||
var bb ByteBuffer
|
||||
bb.B = append(bb.B, testStr...)
|
||||
|
||||
// Try reading at negative offset
|
||||
p := make([]byte, 1)
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatalf("expecting non-nil error when reading at negative offset")
|
||||
}
|
||||
}()
|
||||
bb.ReadAt(p, -1)
|
||||
}()
|
||||
|
||||
// Try reading past the end of buffer
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatalf("expecting non-nil error when reading past the end of buffer")
|
||||
}
|
||||
}()
|
||||
bb.ReadAt(p, int64(len(testStr))+1)
|
||||
}()
|
||||
|
||||
// Try reading the first byte
|
||||
n := len(p)
|
||||
bb.ReadAt(p, 0)
|
||||
if string(p) != testStr[:n] {
|
||||
t.Fatalf("unexpected value read: %q; want %q", p, testStr[:n])
|
||||
}
|
||||
|
||||
// Try reading the last byte
|
||||
bb.ReadAt(p, int64(len(testStr))-1)
|
||||
if string(p) != testStr[len(testStr)-1:] {
|
||||
t.Fatalf("unexpected value read: %q; want %q", p, testStr[len(testStr)-1:])
|
||||
}
|
||||
|
||||
// Try reading non-full p
|
||||
func() {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Fatalf("expecting non-nil error when reading non-full p")
|
||||
}
|
||||
}()
|
||||
p := make([]byte, 10)
|
||||
bb.ReadAt(p, int64(len(testStr))-3)
|
||||
}()
|
||||
|
||||
// Try reading multiple bytes from the middle
|
||||
p = make([]byte, 3)
|
||||
bb.ReadAt(p, 2)
|
||||
if string(p) != testStr[2:2+len(p)] {
|
||||
t.Fatalf("unexpected value read: %q; want %q", p, testStr[2:2+len(p)])
|
||||
}
|
||||
}
|
||||
|
||||
func TestByteBufferReadAtParallel(t *testing.T) {
|
||||
ch := make(chan error, 10)
|
||||
var bb ByteBuffer
|
||||
bb.B = []byte("foo bar baz adsf adsf dsakjlkjlkj2l34324")
|
||||
for i := 0; i < cap(ch); i++ {
|
||||
go func() {
|
||||
p := make([]byte, 3)
|
||||
for i := 0; i < len(bb.B)-len(p); i++ {
|
||||
bb.ReadAt(p, int64(i))
|
||||
}
|
||||
ch <- nil
|
||||
}()
|
||||
}
|
||||
|
||||
for i := 0; i < cap(ch); i++ {
|
||||
select {
|
||||
case err := <-ch:
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
case <-time.After(3 * time.Second):
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
}
|
||||
}
|
33
lib/bytesutil/bytesutil.go
Normal file
33
lib/bytesutil/bytesutil.go
Normal file
|
@ -0,0 +1,33 @@
|
|||
package bytesutil
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Resize resizes b to n bytes and returns b (which may be newly allocated).
|
||||
func Resize(b []byte, n int) []byte {
|
||||
if nn := n - cap(b); nn > 0 {
|
||||
b = append(b[:cap(b)], make([]byte, nn)...)
|
||||
}
|
||||
return b[:n]
|
||||
}
|
||||
|
||||
// ToUnsafeString converts b to string without memory allocations.
|
||||
//
|
||||
// The returned string is valid only until b is reachable and unmodified.
|
||||
func ToUnsafeString(b []byte) string {
|
||||
return *(*string)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
// ToUnsafeBytes converts s to a byte slice without memory allocations.
|
||||
//
|
||||
// The returned byte slice is valid only until s is reachable and unmodified.
|
||||
func ToUnsafeBytes(s string) []byte {
|
||||
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
var slh reflect.SliceHeader
|
||||
slh.Data = sh.Data
|
||||
slh.Len = sh.Len
|
||||
slh.Cap = sh.Len
|
||||
return *(*[]byte)(unsafe.Pointer(&slh))
|
||||
}
|
22
lib/bytesutil/bytesutil_test.go
Normal file
22
lib/bytesutil/bytesutil_test.go
Normal file
|
@ -0,0 +1,22 @@
|
|||
package bytesutil
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestResize(t *testing.T) {
|
||||
for i := 0; i < 1000; i++ {
|
||||
b := Resize(nil, i)
|
||||
if len(b) != i {
|
||||
t.Fatalf("invalid b size; got %d; expecting %d", len(b), i)
|
||||
}
|
||||
b1 := Resize(b, i)
|
||||
if len(b1) != len(b) || (len(b) > 0 && &b1[0] != &b[0]) {
|
||||
t.Fatalf("invalid b1; got %x; expecting %x", b1, b)
|
||||
}
|
||||
b2 := Resize(b[:0], i)
|
||||
if len(b2) != len(b) || (len(b) > 0 && &b2[0] != &b[0]) {
|
||||
t.Fatalf("invalid b2; got %x; expecting %x", b2, b)
|
||||
}
|
||||
}
|
||||
}
|
353
lib/decimal/decimal.go
Normal file
353
lib/decimal/decimal.go
Normal file
|
@ -0,0 +1,353 @@
|
|||
package decimal
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// CalibrateScale calibrates a and b with the corresponding exponents ae, be
|
||||
// and returns the resulting exponent e.
|
||||
func CalibrateScale(a []int64, ae int16, b []int64, be int16) (e int16) {
|
||||
if ae == be {
|
||||
// Fast path - exponents are equal.
|
||||
return ae
|
||||
}
|
||||
if len(a) == 0 {
|
||||
return be
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return ae
|
||||
}
|
||||
|
||||
if ae < be {
|
||||
a, b = b, a
|
||||
ae, be = be, ae
|
||||
}
|
||||
|
||||
upExp := ae - be
|
||||
downExp := int16(0)
|
||||
for _, v := range a {
|
||||
maxUpExp := maxUpExponent(v)
|
||||
if upExp-maxUpExp > downExp {
|
||||
downExp = upExp - maxUpExp
|
||||
}
|
||||
}
|
||||
upExp -= downExp
|
||||
for i, v := range a {
|
||||
adjExp := upExp
|
||||
for adjExp > 0 {
|
||||
v *= 10
|
||||
adjExp--
|
||||
}
|
||||
a[i] = v
|
||||
}
|
||||
if downExp > 0 {
|
||||
for i, v := range b {
|
||||
if v == vInfPos || v == vInfNeg {
|
||||
// Special case for these values - do not touch them.
|
||||
continue
|
||||
}
|
||||
adjExp := downExp
|
||||
for adjExp > 0 {
|
||||
v /= 10
|
||||
adjExp--
|
||||
}
|
||||
b[i] = v
|
||||
}
|
||||
}
|
||||
return be + downExp
|
||||
}
|
||||
|
||||
// ExtendFloat64sCapacity extends dst capacity to hold additionalItems
|
||||
// and returns the extended dst.
|
||||
func ExtendFloat64sCapacity(dst []float64, additionalItems int) []float64 {
|
||||
dstLen := len(dst)
|
||||
if n := dstLen + additionalItems - cap(dst); n > 0 {
|
||||
dst = append(dst[:cap(dst)], make([]float64, n)...)
|
||||
}
|
||||
return dst[:dstLen]
|
||||
}
|
||||
|
||||
// ExtendInt64sCapacity extends dst capacity to hold additionalItems
|
||||
// and returns the extended dst.
|
||||
func ExtendInt64sCapacity(dst []int64, additionalItems int) []int64 {
|
||||
dstLen := len(dst)
|
||||
if n := dstLen + additionalItems - cap(dst); n > 0 {
|
||||
dst = append(dst[:cap(dst)], make([]int64, n)...)
|
||||
}
|
||||
return dst[:dstLen]
|
||||
}
|
||||
|
||||
// AppendDecimalToFloat converts each item in va to f=v*10^e, appends it
|
||||
// to dst and returns the resulting dst.
|
||||
func AppendDecimalToFloat(dst []float64, va []int64, e int16) []float64 {
|
||||
// Extend dst capacity in order to eliminate memory allocations below.
|
||||
dst = ExtendFloat64sCapacity(dst, len(va))
|
||||
|
||||
e10 := math.Pow10(int(e))
|
||||
for _, v := range va {
|
||||
// Manually inline ToFloat here for better performance
|
||||
var f float64
|
||||
if v == vInfPos {
|
||||
f = infPos
|
||||
} else if v == vInfNeg {
|
||||
f = infNeg
|
||||
} else {
|
||||
f = float64(v) * e10
|
||||
}
|
||||
dst = append(dst, f)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// AppendFloatToDecimal converts each item in src to v*10^e and appends
|
||||
// each v to dst returning it as va.
|
||||
//
|
||||
// It tries minimizing each item in dst.
|
||||
func AppendFloatToDecimal(dst []int64, src []float64) (va []int64, e int16) {
|
||||
if len(src) == 0 {
|
||||
return dst, 0
|
||||
}
|
||||
|
||||
// Extend dst capacity in order to eliminate memory allocations below.
|
||||
dst = ExtendInt64sCapacity(dst, len(src))
|
||||
|
||||
vaev := vaeBufPool.Get()
|
||||
if vaev == nil {
|
||||
vaev = &vaeBuf{
|
||||
va: make([]int64, len(src)),
|
||||
ea: make([]int16, len(src)),
|
||||
}
|
||||
}
|
||||
vae := vaev.(*vaeBuf)
|
||||
vae.va = vae.va[:0]
|
||||
vae.ea = vae.ea[:0]
|
||||
|
||||
// Determine the minimum exponent across all src items.
|
||||
v, exp := FromFloat(src[0])
|
||||
vae.va = append(vae.va, v)
|
||||
vae.ea = append(vae.ea, exp)
|
||||
minExp := exp
|
||||
for _, f := range src[1:] {
|
||||
v, exp := FromFloat(f)
|
||||
vae.va = append(vae.va, v)
|
||||
vae.ea = append(vae.ea, exp)
|
||||
if exp < minExp {
|
||||
minExp = exp
|
||||
}
|
||||
}
|
||||
|
||||
// Determine whether all the src items may be upscaled to minExp.
|
||||
// If not, adjust minExp accordingly.
|
||||
downExp := int16(0)
|
||||
for i, v := range vae.va {
|
||||
exp := vae.ea[i]
|
||||
upExp := exp - minExp
|
||||
maxUpExp := maxUpExponent(v)
|
||||
if upExp-maxUpExp > downExp {
|
||||
downExp = upExp - maxUpExp
|
||||
}
|
||||
}
|
||||
minExp += downExp
|
||||
|
||||
// Scale each item in src to minExp and append it to dst.
|
||||
for i, v := range vae.va {
|
||||
exp := vae.ea[i]
|
||||
adjExp := exp - minExp
|
||||
for adjExp > 0 {
|
||||
v *= 10
|
||||
adjExp--
|
||||
}
|
||||
for adjExp < 0 {
|
||||
v /= 10
|
||||
adjExp++
|
||||
}
|
||||
dst = append(dst, v)
|
||||
}
|
||||
|
||||
vaeBufPool.Put(vae)
|
||||
|
||||
return dst, minExp
|
||||
}
|
||||
|
||||
type vaeBuf struct {
|
||||
va []int64
|
||||
ea []int16
|
||||
}
|
||||
|
||||
var vaeBufPool sync.Pool
|
||||
|
||||
func maxUpExponent(v int64) int16 {
|
||||
if v == 0 {
|
||||
// Any exponent allowed.
|
||||
return 1024
|
||||
}
|
||||
if v < 0 {
|
||||
v = -v
|
||||
}
|
||||
if v < 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
maxMultiplier := ((1 << 63) - 1) / uint64(v)
|
||||
switch {
|
||||
case maxMultiplier >= 1e19:
|
||||
return 19
|
||||
case maxMultiplier >= 1e18:
|
||||
return 18
|
||||
case maxMultiplier >= 1e17:
|
||||
return 17
|
||||
case maxMultiplier >= 1e16:
|
||||
return 16
|
||||
case maxMultiplier >= 1e15:
|
||||
return 15
|
||||
case maxMultiplier >= 1e14:
|
||||
return 14
|
||||
case maxMultiplier >= 1e13:
|
||||
return 13
|
||||
case maxMultiplier >= 1e12:
|
||||
return 12
|
||||
case maxMultiplier >= 1e11:
|
||||
return 11
|
||||
case maxMultiplier >= 1e10:
|
||||
return 10
|
||||
case maxMultiplier >= 1e9:
|
||||
return 9
|
||||
case maxMultiplier >= 1e8:
|
||||
return 8
|
||||
case maxMultiplier >= 1e7:
|
||||
return 7
|
||||
case maxMultiplier >= 1e6:
|
||||
return 6
|
||||
case maxMultiplier >= 1e5:
|
||||
return 5
|
||||
case maxMultiplier >= 1e4:
|
||||
return 4
|
||||
case maxMultiplier >= 1e3:
|
||||
return 3
|
||||
case maxMultiplier >= 1e2:
|
||||
return 2
|
||||
case maxMultiplier >= 1e1:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// ToFloat returns f=v*10^e.
|
||||
func ToFloat(v int64, e int16) float64 {
|
||||
if v == vInfPos {
|
||||
return infPos
|
||||
}
|
||||
if v == vInfNeg {
|
||||
return infNeg
|
||||
}
|
||||
return float64(v) * math.Pow10(int(e))
|
||||
}
|
||||
|
||||
const (
|
||||
vInfPos = 1<<63 - 1
|
||||
vInfNeg = -1 << 63
|
||||
|
||||
vMax = 1<<63 - 3
|
||||
vMin = -1<<63 + 1
|
||||
)
|
||||
|
||||
var (
|
||||
infPos = math.Inf(1)
|
||||
infNeg = math.Inf(-1)
|
||||
)
|
||||
|
||||
// FromFloat converts f to v*10^e.
|
||||
//
|
||||
// It tries minimizing v.
|
||||
// For instance, for f = -1.234 it returns v = -1234, e = -3.
|
||||
//
|
||||
// FromFloat doesn't work properly with NaN values, so don't pass them here.
|
||||
func FromFloat(f float64) (v int64, e int16) {
|
||||
if math.IsInf(f, 0) {
|
||||
// Special case for Inf
|
||||
if math.IsInf(f, 1) {
|
||||
return vInfPos, 0
|
||||
}
|
||||
return vInfNeg, 0
|
||||
}
|
||||
|
||||
minus := false
|
||||
if f < 0 {
|
||||
f = -f
|
||||
minus = true
|
||||
}
|
||||
if f == 0 {
|
||||
// Special case for 0.0 and -0.0
|
||||
return 0, 0
|
||||
}
|
||||
v, e = positiveFloatToDecimal(f)
|
||||
if minus {
|
||||
v = -v
|
||||
}
|
||||
if v == 0 {
|
||||
e = 0
|
||||
} else if v > vMax {
|
||||
v = vMax
|
||||
} else if v < vMin {
|
||||
v = vMin
|
||||
}
|
||||
return v, e
|
||||
}
|
||||
|
||||
func positiveFloatToDecimal(f float64) (int64, int16) {
|
||||
var scale int16
|
||||
v := int64(f)
|
||||
if f == float64(v) {
|
||||
// Fast path for integers.
|
||||
u := uint64(v)
|
||||
if u%10 != 0 {
|
||||
return v, 0
|
||||
}
|
||||
// Minimize v by converting trailing zeros to scale.
|
||||
u /= 10
|
||||
scale++
|
||||
for u != 0 && u%10 == 0 {
|
||||
u /= 10
|
||||
scale++
|
||||
}
|
||||
return int64(u), scale
|
||||
}
|
||||
|
||||
// Slow path for floating point numbers.
|
||||
if f > 1e6 || f < 1e-6 {
|
||||
// Normalize f, so it is in the small range suitable
|
||||
// for the next loop.
|
||||
_, exp := math.Frexp(f)
|
||||
scale = int16(float64(exp) * math.Ln2 / math.Ln10)
|
||||
f *= math.Pow10(-int(scale))
|
||||
}
|
||||
|
||||
// Multiply f by 100 until the fractional part becomes
|
||||
// too small comparing to integer part.
|
||||
for f < conversionPrecision {
|
||||
x, frac := math.Modf(f)
|
||||
if frac*conversionPrecision < x {
|
||||
f = x
|
||||
break
|
||||
}
|
||||
if (1-frac)*conversionPrecision < x {
|
||||
f = x + 1
|
||||
break
|
||||
}
|
||||
f *= 100
|
||||
scale -= 2
|
||||
}
|
||||
u := uint64(f)
|
||||
if u%10 != 0 {
|
||||
return int64(u), scale
|
||||
}
|
||||
|
||||
// Minimize u by converting trailing zero to scale.
|
||||
u /= 10
|
||||
scale++
|
||||
return int64(u), scale
|
||||
}
|
||||
|
||||
const conversionPrecision = 1e12
|
317
lib/decimal/decimal_test.go
Normal file
317
lib/decimal/decimal_test.go
Normal file
|
@ -0,0 +1,317 @@
|
|||
package decimal
|
||||
|
||||
import (
|
||||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestAppendDecimalToFloat(t *testing.T) {
|
||||
testAppendDecimalToFloat(t, []int64{}, 0, nil)
|
||||
testAppendDecimalToFloat(t, []int64{0}, 0, []float64{0})
|
||||
testAppendDecimalToFloat(t, []int64{0}, 10, []float64{0})
|
||||
testAppendDecimalToFloat(t, []int64{0}, -10, []float64{0})
|
||||
testAppendDecimalToFloat(t, []int64{-1, -10, 0, 100}, 2, []float64{-1e2, -1e3, 0, 1e4})
|
||||
testAppendDecimalToFloat(t, []int64{-1, -10, 0, 100}, -2, []float64{-1e-2, -1e-1, 0, 1})
|
||||
}
|
||||
|
||||
func testAppendDecimalToFloat(t *testing.T, va []int64, e int16, fExpected []float64) {
|
||||
f := AppendDecimalToFloat(nil, va, e)
|
||||
if !reflect.DeepEqual(f, fExpected) {
|
||||
t.Fatalf("unexpected f for va=%d, e=%d: got\n%v; expecting\n%v", va, e, f, fExpected)
|
||||
}
|
||||
|
||||
prefix := []float64{1, 2, 3, 4}
|
||||
f = AppendDecimalToFloat(prefix, va, e)
|
||||
if !reflect.DeepEqual(f[:len(prefix)], prefix) {
|
||||
t.Fatalf("unexpected prefix for va=%d, e=%d; got\n%v; expecting\n%v", va, e, f[:len(prefix)], prefix)
|
||||
}
|
||||
if fExpected == nil {
|
||||
fExpected = []float64{}
|
||||
}
|
||||
if !reflect.DeepEqual(f[len(prefix):], fExpected) {
|
||||
t.Fatalf("unexpected prefixed f for va=%d, e=%d: got\n%v; expecting\n%v", va, e, f[len(prefix):], fExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCalibrateScale(t *testing.T) {
|
||||
testCalibrateScale(t, []int64{}, []int64{}, 0, 0, []int64{}, []int64{}, 0)
|
||||
testCalibrateScale(t, []int64{0}, []int64{0}, 0, 0, []int64{0}, []int64{0}, 0)
|
||||
testCalibrateScale(t, []int64{0}, []int64{1}, 0, 0, []int64{0}, []int64{1}, 0)
|
||||
testCalibrateScale(t, []int64{1, 0, 2}, []int64{5, -3}, 0, 1, []int64{1, 0, 2}, []int64{50, -30}, 0)
|
||||
testCalibrateScale(t, []int64{-1, 2}, []int64{5, 6, 3}, 2, -1, []int64{-1000, 2000}, []int64{5, 6, 3}, -1)
|
||||
testCalibrateScale(t, []int64{123, -456, 94}, []int64{-9, 4, -3, 45}, -3, -3, []int64{123, -456, 94}, []int64{-9, 4, -3, 45}, -3)
|
||||
testCalibrateScale(t, []int64{1e18, 1, 0}, []int64{3, 456}, 0, -2, []int64{1e18, 1, 0}, []int64{0, 4}, 0)
|
||||
testCalibrateScale(t, []int64{12345, 678}, []int64{12, -1e17, -3}, -3, 0, []int64{123, 6}, []int64{120, -1e18, -30}, -1)
|
||||
testCalibrateScale(t, []int64{1, 2}, nil, 12, 34, []int64{1, 2}, nil, 12)
|
||||
testCalibrateScale(t, nil, []int64{3, 1}, 12, 34, nil, []int64{3, 1}, 34)
|
||||
testCalibrateScale(t, []int64{923}, []int64{2, 3}, 100, -100, []int64{923e15}, []int64{0, 0}, 85)
|
||||
testCalibrateScale(t, []int64{923}, []int64{2, 3}, -100, 100, []int64{0}, []int64{2e18, 3e18}, 82)
|
||||
testCalibrateScale(t, []int64{123, 456, 789, 135}, []int64{}, -12, -10, []int64{123, 456, 789, 135}, []int64{}, -12)
|
||||
testCalibrateScale(t, []int64{123, 456, 789, 135}, []int64{}, -10, -12, []int64{123, 456, 789, 135}, []int64{}, -10)
|
||||
|
||||
testCalibrateScale(t, []int64{vInfPos, 1200}, []int64{500, 100}, 0, 0, []int64{vInfPos, 1200}, []int64{500, 100}, 0)
|
||||
testCalibrateScale(t, []int64{vInfPos, 1200}, []int64{500, 100}, 0, 2, []int64{vInfPos, 1200}, []int64{500e2, 100e2}, 0)
|
||||
testCalibrateScale(t, []int64{vInfPos, 1200}, []int64{500, 100}, 0, -2, []int64{vInfPos, 1200}, []int64{5, 1}, 0)
|
||||
testCalibrateScale(t, []int64{vInfPos, 1200}, []int64{3500, 100}, 0, -3, []int64{vInfPos, 1200}, []int64{3, 0}, 0)
|
||||
testCalibrateScale(t, []int64{vInfPos, 1200}, []int64{35, 1}, 0, 40, []int64{vInfPos, 0}, []int64{35e17, 1e17}, 23)
|
||||
testCalibrateScale(t, []int64{vInfPos, 1200}, []int64{35, 1}, 40, 0, []int64{vInfPos, 1200}, []int64{0, 0}, 40)
|
||||
testCalibrateScale(t, []int64{vInfNeg, 1200}, []int64{35, 1}, 35, -5, []int64{vInfNeg, 1200}, []int64{0, 0}, 35)
|
||||
testCalibrateScale(t, []int64{vMax, vMin, 123}, []int64{100}, 0, 3, []int64{vMax, vMin, 123}, []int64{100e3}, 0)
|
||||
testCalibrateScale(t, []int64{vMax, vMin, 123}, []int64{100}, 3, 0, []int64{vMax, vMin, 123}, []int64{0}, 3)
|
||||
testCalibrateScale(t, []int64{vMax, vMin, 123}, []int64{100}, 0, 30, []int64{92233, -92233, 0}, []int64{100e16}, 14)
|
||||
}
|
||||
|
||||
func testCalibrateScale(t *testing.T, a, b []int64, ae, be int16, aExpected, bExpected []int64, eExpected int16) {
|
||||
t.Helper()
|
||||
|
||||
if a == nil {
|
||||
a = []int64{}
|
||||
}
|
||||
if b == nil {
|
||||
b = []int64{}
|
||||
}
|
||||
if aExpected == nil {
|
||||
aExpected = []int64{}
|
||||
}
|
||||
if bExpected == nil {
|
||||
bExpected = []int64{}
|
||||
}
|
||||
|
||||
aCopy := append([]int64{}, a...)
|
||||
bCopy := append([]int64{}, b...)
|
||||
e := CalibrateScale(aCopy, ae, bCopy, be)
|
||||
if e != eExpected {
|
||||
t.Fatalf("unexpected e for a=%d, b=%d, ae=%d, be=%d; got %d; expecting %d", a, b, ae, be, e, eExpected)
|
||||
}
|
||||
if !reflect.DeepEqual(aCopy, aExpected) {
|
||||
t.Fatalf("unexpected a for b=%d, ae=%d, be=%d; got\n%d; expecting\n%d", b, ae, be, aCopy, aExpected)
|
||||
}
|
||||
if !reflect.DeepEqual(bCopy, bExpected) {
|
||||
t.Fatalf("unexpected b for a=%d, ae=%d, be=%d; got\n%d; expecting\n%d", a, ae, be, bCopy, bExpected)
|
||||
}
|
||||
|
||||
// Try reverse args.
|
||||
aCopy = append([]int64{}, a...)
|
||||
bCopy = append([]int64{}, b...)
|
||||
e = CalibrateScale(bCopy, be, aCopy, ae)
|
||||
if e != eExpected {
|
||||
t.Fatalf("revers: unexpected e for a=%d, b=%d, ae=%d, be=%d; got %d; expecting %d", a, b, ae, be, e, eExpected)
|
||||
}
|
||||
if !reflect.DeepEqual(aCopy, aExpected) {
|
||||
t.Fatalf("reverse: unexpected a for b=%d, ae=%d, be=%d; got\n%d; expecting\n%d", b, ae, be, aCopy, aExpected)
|
||||
}
|
||||
if !reflect.DeepEqual(bCopy, bExpected) {
|
||||
t.Fatalf("reverse: unexpected b for a=%d, ae=%d, be=%d; got\n%d; expecting\n%d", a, ae, be, bCopy, bExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaxUpExponent(t *testing.T) {
|
||||
testMaxUpExponent(t, 0, 1024)
|
||||
testMaxUpExponent(t, -1<<63, 0)
|
||||
testMaxUpExponent(t, (-1<<63)+1, 0)
|
||||
testMaxUpExponent(t, (1<<63)-1, 0)
|
||||
testMaxUpExponent(t, 1, 18)
|
||||
testMaxUpExponent(t, 12, 17)
|
||||
testMaxUpExponent(t, 123, 16)
|
||||
testMaxUpExponent(t, 1234, 15)
|
||||
testMaxUpExponent(t, 12345, 14)
|
||||
testMaxUpExponent(t, 123456, 13)
|
||||
testMaxUpExponent(t, 1234567, 12)
|
||||
testMaxUpExponent(t, 12345678, 11)
|
||||
testMaxUpExponent(t, 123456789, 10)
|
||||
testMaxUpExponent(t, 1234567890, 9)
|
||||
testMaxUpExponent(t, 12345678901, 8)
|
||||
testMaxUpExponent(t, 123456789012, 7)
|
||||
testMaxUpExponent(t, 1234567890123, 6)
|
||||
testMaxUpExponent(t, 12345678901234, 5)
|
||||
testMaxUpExponent(t, 123456789012345, 4)
|
||||
testMaxUpExponent(t, 1234567890123456, 3)
|
||||
testMaxUpExponent(t, 12345678901234567, 2)
|
||||
testMaxUpExponent(t, 123456789012345678, 1)
|
||||
testMaxUpExponent(t, 1234567890123456789, 0)
|
||||
testMaxUpExponent(t, 923456789012345678, 0)
|
||||
testMaxUpExponent(t, 92345678901234567, 1)
|
||||
testMaxUpExponent(t, 9234567890123456, 2)
|
||||
testMaxUpExponent(t, 923456789012345, 3)
|
||||
testMaxUpExponent(t, 92345678901234, 4)
|
||||
testMaxUpExponent(t, 9234567890123, 5)
|
||||
testMaxUpExponent(t, 923456789012, 6)
|
||||
testMaxUpExponent(t, 92345678901, 7)
|
||||
testMaxUpExponent(t, 9234567890, 8)
|
||||
testMaxUpExponent(t, 923456789, 9)
|
||||
testMaxUpExponent(t, 92345678, 10)
|
||||
testMaxUpExponent(t, 9234567, 11)
|
||||
testMaxUpExponent(t, 923456, 12)
|
||||
testMaxUpExponent(t, 92345, 13)
|
||||
testMaxUpExponent(t, 9234, 14)
|
||||
testMaxUpExponent(t, 923, 15)
|
||||
testMaxUpExponent(t, 92, 17)
|
||||
testMaxUpExponent(t, 9, 18)
|
||||
}
|
||||
|
||||
func testMaxUpExponent(t *testing.T, v int64, eExpected int16) {
|
||||
t.Helper()
|
||||
|
||||
e := maxUpExponent(v)
|
||||
if e != eExpected {
|
||||
t.Fatalf("unexpected e for v=%d; got %d; epxecting %d", v, e, eExpected)
|
||||
}
|
||||
e = maxUpExponent(-v)
|
||||
if e != eExpected {
|
||||
t.Fatalf("unexpected e for v=%d; got %d; expecting %d", -v, e, eExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppendFloatToDecimal(t *testing.T) {
|
||||
// no-op
|
||||
testAppendFloatToDecimal(t, []float64{}, nil, 0)
|
||||
testAppendFloatToDecimal(t, []float64{0}, []int64{0}, 0)
|
||||
testAppendFloatToDecimal(t, []float64{0, 1, -1, 12345678, -123456789}, []int64{0, 1, -1, 12345678, -123456789}, 0)
|
||||
|
||||
// upExp
|
||||
testAppendFloatToDecimal(t, []float64{-24, 0, 4.123, 0.3}, []int64{-24000, 0, 4123, 300}, -3)
|
||||
testAppendFloatToDecimal(t, []float64{0, 10.23456789, 1e2, 1e-3, 1e-4}, []int64{0, 1023456789, 1e10, 1e5, 1e4}, -8)
|
||||
|
||||
// downExp
|
||||
testAppendFloatToDecimal(t, []float64{3e17, 7e-2, 5e-7, 45, 7e-1}, []int64{3e18, 0, 0, 450, 7}, -1)
|
||||
testAppendFloatToDecimal(t, []float64{3e18, 1, 0.1, 13}, []int64{3e18, 1, 0, 13}, 0)
|
||||
}
|
||||
|
||||
func testAppendFloatToDecimal(t *testing.T, fa []float64, daExpected []int64, eExpected int16) {
|
||||
t.Helper()
|
||||
|
||||
da, e := AppendFloatToDecimal(nil, fa)
|
||||
if e != eExpected {
|
||||
t.Fatalf("unexpected e for fa=%f; got %d; expecting %d", fa, e, eExpected)
|
||||
}
|
||||
if !reflect.DeepEqual(da, daExpected) {
|
||||
t.Fatalf("unexpected da for fa=%f; got\n%d; expecting\n%d", fa, da, daExpected)
|
||||
}
|
||||
|
||||
daPrefix := []int64{1, 2, 3}
|
||||
da, e = AppendFloatToDecimal(daPrefix, fa)
|
||||
if e != eExpected {
|
||||
t.Fatalf("unexpected e for fa=%f; got %d; expecting %d", fa, e, eExpected)
|
||||
}
|
||||
if !reflect.DeepEqual(da[:len(daPrefix)], daPrefix) {
|
||||
t.Fatalf("unexpected daPrefix for fa=%f; got\n%d; expecting\n%d", fa, da[:len(daPrefix)], daPrefix)
|
||||
}
|
||||
if daExpected == nil {
|
||||
daExpected = []int64{}
|
||||
}
|
||||
if !reflect.DeepEqual(da[len(daPrefix):], daExpected) {
|
||||
t.Fatalf("unexpected da for fa=%f; got\n%d; expecting\n%d", fa, da[len(daPrefix):], daExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloatToDecimal(t *testing.T) {
|
||||
testFloatToDecimal(t, 0, 0, 0)
|
||||
testFloatToDecimal(t, 1, 1, 0)
|
||||
testFloatToDecimal(t, -1, -1, 0)
|
||||
testFloatToDecimal(t, 0.9, 9, -1)
|
||||
testFloatToDecimal(t, 0.99, 99, -2)
|
||||
testFloatToDecimal(t, 9, 9, 0)
|
||||
testFloatToDecimal(t, 99, 99, 0)
|
||||
testFloatToDecimal(t, 20, 2, 1)
|
||||
testFloatToDecimal(t, 100, 1, 2)
|
||||
testFloatToDecimal(t, 3000, 3, 3)
|
||||
|
||||
testFloatToDecimal(t, 0.123, 123, -3)
|
||||
testFloatToDecimal(t, -0.123, -123, -3)
|
||||
testFloatToDecimal(t, 1.2345, 12345, -4)
|
||||
testFloatToDecimal(t, -1.2345, -12345, -4)
|
||||
testFloatToDecimal(t, 12000, 12, 3)
|
||||
testFloatToDecimal(t, -12000, -12, 3)
|
||||
testFloatToDecimal(t, 1e-30, 1, -30)
|
||||
testFloatToDecimal(t, -1e-30, -1, -30)
|
||||
testFloatToDecimal(t, 1e-260, 1, -260)
|
||||
testFloatToDecimal(t, -1e-260, -1, -260)
|
||||
testFloatToDecimal(t, 321e260, 321, 260)
|
||||
testFloatToDecimal(t, -321e260, -321, 260)
|
||||
testFloatToDecimal(t, 1234567890123, 1234567890123, 0)
|
||||
testFloatToDecimal(t, -1234567890123, -1234567890123, 0)
|
||||
testFloatToDecimal(t, 123e5, 123, 5)
|
||||
testFloatToDecimal(t, 15e18, 15, 18)
|
||||
|
||||
testFloatToDecimal(t, math.Inf(1), vInfPos, 0)
|
||||
testFloatToDecimal(t, math.Inf(-1), vInfNeg, 0)
|
||||
testFloatToDecimal(t, 1<<63-1, 922337203685, 7)
|
||||
testFloatToDecimal(t, -1<<63, -922337203685, 7)
|
||||
}
|
||||
|
||||
func testFloatToDecimal(t *testing.T, f float64, vExpected int64, eExpected int16) {
|
||||
t.Helper()
|
||||
|
||||
v, e := FromFloat(f)
|
||||
if v != vExpected {
|
||||
t.Fatalf("unexpected v for f=%e; got %d; expecting %d", f, v, vExpected)
|
||||
}
|
||||
if e != eExpected {
|
||||
t.Fatalf("unexpected e for f=%e; got %d; expecting %d", f, e, eExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFloatToDecimalRoundtrip(t *testing.T) {
|
||||
testFloatToDecimalRoundtrip(t, 0)
|
||||
testFloatToDecimalRoundtrip(t, 1)
|
||||
testFloatToDecimalRoundtrip(t, 0.123)
|
||||
testFloatToDecimalRoundtrip(t, 1.2345)
|
||||
testFloatToDecimalRoundtrip(t, 12000)
|
||||
testFloatToDecimalRoundtrip(t, 1e-30)
|
||||
testFloatToDecimalRoundtrip(t, 1e-260)
|
||||
testFloatToDecimalRoundtrip(t, 321e260)
|
||||
testFloatToDecimalRoundtrip(t, 1234567890123)
|
||||
testFloatToDecimalRoundtrip(t, 12.34567890125)
|
||||
testFloatToDecimalRoundtrip(t, 15e18)
|
||||
|
||||
testFloatToDecimalRoundtrip(t, math.Inf(1))
|
||||
testFloatToDecimalRoundtrip(t, math.Inf(-1))
|
||||
testFloatToDecimalRoundtrip(t, 1<<63-1)
|
||||
testFloatToDecimalRoundtrip(t, -1<<63)
|
||||
|
||||
for i := 0; i < 1e4; i++ {
|
||||
f := rand.NormFloat64()
|
||||
testFloatToDecimalRoundtrip(t, f)
|
||||
testFloatToDecimalRoundtrip(t, f*1e-6)
|
||||
testFloatToDecimalRoundtrip(t, f*1e6)
|
||||
|
||||
testFloatToDecimalRoundtrip(t, roundFloat(f, 20))
|
||||
testFloatToDecimalRoundtrip(t, roundFloat(f, 10))
|
||||
testFloatToDecimalRoundtrip(t, roundFloat(f, 5))
|
||||
testFloatToDecimalRoundtrip(t, roundFloat(f, 0))
|
||||
testFloatToDecimalRoundtrip(t, roundFloat(f, -5))
|
||||
testFloatToDecimalRoundtrip(t, roundFloat(f, -10))
|
||||
testFloatToDecimalRoundtrip(t, roundFloat(f, -20))
|
||||
}
|
||||
}
|
||||
|
||||
func roundFloat(f float64, exp int) float64 {
|
||||
f *= math.Pow10(-exp)
|
||||
return math.Trunc(f) * math.Pow10(exp)
|
||||
}
|
||||
|
||||
func testFloatToDecimalRoundtrip(t *testing.T, f float64) {
|
||||
t.Helper()
|
||||
|
||||
v, e := FromFloat(f)
|
||||
fNew := ToFloat(v, e)
|
||||
if !equalFloat(fNew, f) {
|
||||
t.Fatalf("unexpected fNew for v=%d, e=%d; got %g; expecting %g", v, e, fNew, f)
|
||||
}
|
||||
|
||||
v, e = FromFloat(-f)
|
||||
fNew = ToFloat(v, e)
|
||||
if !equalFloat(fNew, -f) {
|
||||
t.Fatalf("unexepcted fNew for v=%d, e=%d; got %g; expecting %g", v, e, fNew, -f)
|
||||
}
|
||||
}
|
||||
|
||||
func equalFloat(f1, f2 float64) bool {
|
||||
if math.IsInf(f1, 0) {
|
||||
return math.IsInf(f1, 1) == math.IsInf(f2, 1) || math.IsInf(f1, -1) == math.IsInf(f2, -1)
|
||||
}
|
||||
eps := math.Abs(f1 - f2)
|
||||
return eps == 0 || eps*conversionPrecision < math.Abs(f1)+math.Abs(f2)
|
||||
}
|
86
lib/decimal/decimal_timing_test.go
Normal file
86
lib/decimal/decimal_timing_test.go
Normal file
|
@ -0,0 +1,86 @@
|
|||
package decimal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkAppendDecimalToFloat(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(testVA)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var fa []float64
|
||||
for pb.Next() {
|
||||
fa = AppendDecimalToFloat(fa[:0], testVA, 0)
|
||||
atomic.AddUint64(&Sink, uint64(len(fa)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkAppendFloatToDecimal(b *testing.B) {
|
||||
b.Run("RealFloat", func(b *testing.B) {
|
||||
benchmarkAppendFloatToDecimal(b, testFAReal)
|
||||
})
|
||||
b.Run("Integers", func(b *testing.B) {
|
||||
benchmarkAppendFloatToDecimal(b, testFAInteger)
|
||||
})
|
||||
}
|
||||
|
||||
func benchmarkAppendFloatToDecimal(b *testing.B, fa []float64) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(fa)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var da []int64
|
||||
var e int16
|
||||
var sink uint64
|
||||
for pb.Next() {
|
||||
da, e = AppendFloatToDecimal(da[:0], fa)
|
||||
sink += uint64(len(da))
|
||||
sink += uint64(e)
|
||||
}
|
||||
atomic.AddUint64(&Sink, sink)
|
||||
})
|
||||
}
|
||||
|
||||
var testFAReal = func() []float64 {
|
||||
fa := make([]float64, 8*1024)
|
||||
for i := 0; i < len(fa); i++ {
|
||||
fa[i] = rand.NormFloat64() * 1e6
|
||||
}
|
||||
return fa
|
||||
}()
|
||||
|
||||
var testFAInteger = func() []float64 {
|
||||
fa := make([]float64, 8*1024)
|
||||
for i := 0; i < len(fa); i++ {
|
||||
fa[i] = float64(int(rand.NormFloat64() * 1e6))
|
||||
}
|
||||
return fa
|
||||
}()
|
||||
|
||||
var testVA = func() []int64 {
|
||||
va, _ := AppendFloatToDecimal(nil, testFAReal)
|
||||
return va
|
||||
}()
|
||||
|
||||
func BenchmarkFromFloat(b *testing.B) {
|
||||
for _, f := range []float64{0, 1234, 12334345, 12343.4344, 123.45678901e12, 12.3454435e30} {
|
||||
b.Run(fmt.Sprintf("%g", f), func(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(1)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var sink uint64
|
||||
for pb.Next() {
|
||||
v, e := FromFloat(f)
|
||||
sink += uint64(v)
|
||||
sink += uint64(e)
|
||||
}
|
||||
atomic.AddUint64(&Sink, sink)
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var Sink uint64
|
27
lib/encoding/compress.go
Normal file
27
lib/encoding/compress.go
Normal file
|
@ -0,0 +1,27 @@
|
|||
package encoding
|
||||
|
||||
import (
|
||||
"github.com/valyala/gozstd"
|
||||
)
|
||||
|
||||
// CompressZSTD compresses src, appends the result to dst and returns
|
||||
// the appended dst.
|
||||
//
|
||||
// src must be non-empty.
|
||||
func CompressZSTD(dst, src []byte) []byte {
|
||||
return gozstd.CompressLevel(dst, src, 5)
|
||||
}
|
||||
|
||||
// CompressZSTDLevel appends compressed src to dst and returns
|
||||
// the appended dst.
|
||||
//
|
||||
// The given compressLevel is used for the compression.
|
||||
func CompressZSTDLevel(dst, src []byte, compressLevel int) []byte {
|
||||
return gozstd.CompressLevel(dst, src, compressLevel)
|
||||
}
|
||||
|
||||
// DecompressZSTD decompresses src, appends the result to dst and returns
|
||||
// the appended dst.
|
||||
func DecompressZSTD(dst, src []byte) ([]byte, error) {
|
||||
return gozstd.Decompress(dst, src)
|
||||
}
|
48
lib/encoding/compress_test.go
Normal file
48
lib/encoding/compress_test.go
Normal file
|
@ -0,0 +1,48 @@
|
|||
package encoding
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCompressDecompressZSTD(t *testing.T) {
|
||||
testCompressDecompressZSTD(t, []byte("a"))
|
||||
testCompressDecompressZSTD(t, []byte("foobarbaz"))
|
||||
|
||||
var b []byte
|
||||
for i := 0; i < 64*1024; i++ {
|
||||
b = append(b, byte(rand.Int31n(256)))
|
||||
}
|
||||
testCompressDecompressZSTD(t, b)
|
||||
}
|
||||
|
||||
func testCompressDecompressZSTD(t *testing.T, b []byte) {
|
||||
bc := CompressZSTD(nil, b)
|
||||
bNew, err := DecompressZSTD(nil, bc)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when decompressing b=%x from bc=%x: %s", b, bc, err)
|
||||
}
|
||||
if string(bNew) != string(b) {
|
||||
t.Fatalf("invalid bNew; got\n%x; expecting\n%x", bNew, b)
|
||||
}
|
||||
|
||||
prefix := []byte{1, 2, 33}
|
||||
bcNew := CompressZSTD(prefix, b)
|
||||
if string(bcNew[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("invalid prefix for b=%x; got\n%x; expecting\n%x", b, bcNew[:len(prefix)], prefix)
|
||||
}
|
||||
if string(bcNew[len(prefix):]) != string(bc) {
|
||||
t.Fatalf("invalid prefixed bcNew for b=%x; got\n%x; expecting\n%x", b, bcNew[len(prefix):], bc)
|
||||
}
|
||||
|
||||
bNew, err = DecompressZSTD(prefix, bc)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when decompressing b=%x from bc=%x with prefix: %s", b, bc, err)
|
||||
}
|
||||
if string(bNew[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("invalid bNew prefix when decompressing bc=%x; got\n%x; expecting\n%x", bc, bNew[:len(prefix)], prefix)
|
||||
}
|
||||
if string(bNew[len(prefix):]) != string(b) {
|
||||
t.Fatalf("invalid prefixed bNew; got\n%x; expecting\n%x", bNew[len(prefix):], b)
|
||||
}
|
||||
}
|
350
lib/encoding/encoding.go
Normal file
350
lib/encoding/encoding.go
Normal file
|
@ -0,0 +1,350 @@
|
|||
package encoding
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
// minCompressibleBlockSize is the minimum block size in bytes for trying compression.
|
||||
//
|
||||
// There is no sense in compressing smaller blocks.
|
||||
const minCompressibleBlockSize = 128
|
||||
|
||||
// MarshalType is the type used for the marshaling.
|
||||
type MarshalType byte
|
||||
|
||||
const (
|
||||
// MarshalTypeZSTDNearestDelta2 is used for marshaling counter
|
||||
// timeseries.
|
||||
MarshalTypeZSTDNearestDelta2 = MarshalType(1)
|
||||
|
||||
// MarshalTypeDeltaConst is used for marshaling constantly changed
|
||||
// time series with constant delta.
|
||||
MarshalTypeDeltaConst = MarshalType(2)
|
||||
|
||||
// MarshalTypeConst is used for marshaling time series containing only
|
||||
// a single constant.
|
||||
MarshalTypeConst = MarshalType(3)
|
||||
|
||||
// MarshalTypeZSTDNearestDelta is used for marshaling gauge timeseries.
|
||||
MarshalTypeZSTDNearestDelta = MarshalType(4)
|
||||
|
||||
// MarshalTypeNearestDelta2 is used instead of MarshalTypeZSTDNearestDelta2
|
||||
// if compression doesn't help.
|
||||
MarshalTypeNearestDelta2 = MarshalType(5)
|
||||
|
||||
// MarshalTypeNearestDelta is used instead of MarshalTypeZSTDNearestDelta
|
||||
// if compression doesn't help.
|
||||
MarshalTypeNearestDelta = MarshalType(6)
|
||||
)
|
||||
|
||||
// CheckMarshalType verifies whether the mt is valid.
|
||||
func CheckMarshalType(mt MarshalType) error {
|
||||
if mt < 0 || mt > 6 {
|
||||
return fmt.Errorf("MarshalType should be in range [0..6]; got %d", mt)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckPrecisionBits makes sure precisionBits is in the range [1..64].
|
||||
func CheckPrecisionBits(precisionBits uint8) error {
|
||||
if precisionBits < 1 || precisionBits > 64 {
|
||||
return fmt.Errorf("precisionBits must be in the range [1...64]; got %d", precisionBits)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalTimestamps marshals timestamps, appends the marshaled result
|
||||
// to dst and returns the dst.
|
||||
//
|
||||
// timestamps must contain non-decreasing values.
|
||||
//
|
||||
// precisionBits must be in the range [1...64], where 1 means 50% precision,
|
||||
// while 64 means 100% precision, i.e. lossless encoding.
|
||||
func MarshalTimestamps(dst []byte, timestamps []int64, precisionBits uint8) (result []byte, mt MarshalType, firstTimestamp int64) {
|
||||
return marshalInt64Array(dst, timestamps, precisionBits)
|
||||
}
|
||||
|
||||
// UnmarshalTimestamps unmarshals timestamps from src, appends them to dst
|
||||
// and returns the resulting dst.
|
||||
//
|
||||
// firstTimestamp must be the timestamp returned from MarshalTimestamps.
|
||||
func UnmarshalTimestamps(dst []int64, src []byte, mt MarshalType, firstTimestamp int64, itemsCount int) ([]int64, error) {
|
||||
dst, err := unmarshalInt64Array(dst, src, mt, firstTimestamp, itemsCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal %d timestamps from len(src)=%d bytes: %s", itemsCount, len(src), err)
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// MarshalValues marshals values, appends the marshaled result to dst
|
||||
// and returns the dst.
|
||||
//
|
||||
// precisionBits must be in the range [1...64], where 1 means 50% precision,
|
||||
// while 64 means 100% precision, i.e. lossless encoding.
|
||||
func MarshalValues(dst []byte, values []int64, precisionBits uint8) (result []byte, mt MarshalType, firstValue int64) {
|
||||
return marshalInt64Array(dst, values, precisionBits)
|
||||
}
|
||||
|
||||
// UnmarshalValues unmarshals values from src, appends them to dst and returns
|
||||
// the resulting dst.
|
||||
//
|
||||
// firstValue must be the value returned from MarshalValues.
|
||||
func UnmarshalValues(dst []int64, src []byte, mt MarshalType, firstValue int64, itemsCount int) ([]int64, error) {
|
||||
dst, err := unmarshalInt64Array(dst, src, mt, firstValue, itemsCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal %d values from len(src)=%d bytes: %s", itemsCount, len(src), err)
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
func marshalInt64Array(dst []byte, a []int64, precisionBits uint8) (result []byte, mt MarshalType, firstValue int64) {
|
||||
if len(a) == 0 {
|
||||
logger.Panicf("BUG: a must contain at least one item")
|
||||
}
|
||||
if isConst(a) {
|
||||
firstValue = a[0]
|
||||
return dst, MarshalTypeConst, firstValue
|
||||
}
|
||||
if isDeltaConst(a) {
|
||||
firstValue = a[0]
|
||||
dst = MarshalVarInt64(dst, a[1]-a[0])
|
||||
return dst, MarshalTypeDeltaConst, firstValue
|
||||
}
|
||||
|
||||
bb := bbPool.Get()
|
||||
if isGauge(a) {
|
||||
// Guage values are better compressed with delta encoding.
|
||||
mt = MarshalTypeZSTDNearestDelta
|
||||
pb := precisionBits
|
||||
if pb < 6 {
|
||||
// Increase precision bits for gauges, since they suffer more
|
||||
// from low precision bits comparing to counters.
|
||||
pb += 2
|
||||
}
|
||||
bb.B, firstValue = marshalInt64NearestDelta(bb.B[:0], a, pb)
|
||||
} else {
|
||||
// Non-gauge values, i.e. counters are better compressed with delta2 encoding.
|
||||
mt = MarshalTypeZSTDNearestDelta2
|
||||
bb.B, firstValue = marshalInt64NearestDelta2(bb.B[:0], a, precisionBits)
|
||||
}
|
||||
|
||||
// Try compressing the result.
|
||||
dstOrig := dst
|
||||
if len(bb.B) >= minCompressibleBlockSize {
|
||||
compressLevel := getCompressLevel(len(a))
|
||||
dst = CompressZSTDLevel(dst, bb.B, compressLevel)
|
||||
}
|
||||
if len(bb.B) < minCompressibleBlockSize || float64(len(dst)-len(dstOrig)) > 0.9*float64(len(bb.B)) {
|
||||
// Ineffective compression. Store plain data.
|
||||
switch mt {
|
||||
case MarshalTypeZSTDNearestDelta2:
|
||||
mt = MarshalTypeNearestDelta2
|
||||
case MarshalTypeZSTDNearestDelta:
|
||||
mt = MarshalTypeNearestDelta
|
||||
default:
|
||||
logger.Panicf("BUG: unexpected mt=%d", mt)
|
||||
}
|
||||
dst = append(dstOrig, bb.B...)
|
||||
}
|
||||
bbPool.Put(bb)
|
||||
|
||||
return dst, mt, firstValue
|
||||
}
|
||||
|
||||
func unmarshalInt64Array(dst []int64, src []byte, mt MarshalType, firstValue int64, itemsCount int) ([]int64, error) {
|
||||
// Extend dst capacity in order to eliminate memory allocations below.
|
||||
dst = decimal.ExtendInt64sCapacity(dst, itemsCount)
|
||||
|
||||
var err error
|
||||
switch mt {
|
||||
case MarshalTypeZSTDNearestDelta:
|
||||
bb := bbPool.Get()
|
||||
bb.B, err = DecompressZSTD(bb.B[:0], src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s", len(src), err)
|
||||
}
|
||||
dst, err = unmarshalInt64NearestDelta(dst, bb.B, firstValue, itemsCount)
|
||||
bbPool.Put(bb)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta data after zstd decompression: %s", err)
|
||||
}
|
||||
return dst, nil
|
||||
case MarshalTypeZSTDNearestDelta2:
|
||||
bb := bbPool.Get()
|
||||
bb.B, err = DecompressZSTD(bb.B[:0], src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot decompress zstd data of size %d: %s", len(src), err)
|
||||
}
|
||||
dst, err = unmarshalInt64NearestDelta2(dst, bb.B, firstValue, itemsCount)
|
||||
bbPool.Put(bb)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data after zstd decompression: %s", err)
|
||||
}
|
||||
return dst, nil
|
||||
case MarshalTypeNearestDelta:
|
||||
dst, err = unmarshalInt64NearestDelta(dst, src, firstValue, itemsCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta data: %s", err)
|
||||
}
|
||||
return dst, nil
|
||||
case MarshalTypeNearestDelta2:
|
||||
dst, err = unmarshalInt64NearestDelta2(dst, src, firstValue, itemsCount)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal nearest delta2 data: %s", err)
|
||||
}
|
||||
return dst, nil
|
||||
case MarshalTypeConst:
|
||||
if len(src) > 0 {
|
||||
return nil, fmt.Errorf("unexpected data left in const encoding: %d bytes", len(src))
|
||||
}
|
||||
for itemsCount > 0 {
|
||||
dst = append(dst, firstValue)
|
||||
itemsCount--
|
||||
}
|
||||
return dst, nil
|
||||
case MarshalTypeDeltaConst:
|
||||
v := firstValue
|
||||
tail, d, err := UnmarshalVarInt64(src)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot unmarshal delta value for delta const: %s", err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
return nil, fmt.Errorf("unexpected trailing data after delta const (d=%d): %d bytes", d, len(tail))
|
||||
}
|
||||
for itemsCount > 0 {
|
||||
dst = append(dst, v)
|
||||
itemsCount--
|
||||
v += d
|
||||
}
|
||||
return dst, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown MarshalType=%d", mt)
|
||||
}
|
||||
}
|
||||
|
||||
var bbPool bytesutil.ByteBufferPool
|
||||
|
||||
// EnsureNonDecreasingSequence makes sure the first item in a is vMin, the last
|
||||
// item in a is vMax and all the items in a are non-decreasing.
|
||||
//
|
||||
// If this isn't the case the a is fixed accordingly.
|
||||
func EnsureNonDecreasingSequence(a []int64, vMin, vMax int64) {
|
||||
if vMax < vMin {
|
||||
logger.Panicf("BUG: vMax cannot be smaller than vMin; got %d vs %d", vMax, vMin)
|
||||
}
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
if a[0] != vMin {
|
||||
a[0] = vMin
|
||||
}
|
||||
vPrev := a[0]
|
||||
aa := a[1:]
|
||||
for i, v := range aa {
|
||||
if v < vPrev {
|
||||
aa[i] = vPrev
|
||||
v = vPrev
|
||||
}
|
||||
vPrev = v
|
||||
}
|
||||
i := len(a) - 1
|
||||
if a[i] != vMax {
|
||||
a[i] = vMax
|
||||
i--
|
||||
for i >= 0 && a[i] > vMax {
|
||||
a[i] = vMax
|
||||
i--
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// isConst returns true if a contains only equal values.
|
||||
func isConst(a []int64) bool {
|
||||
if len(a) == 0 {
|
||||
return false
|
||||
}
|
||||
v1 := a[0]
|
||||
for _, v := range a {
|
||||
if v != v1 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isDeltaConst returns true if a contains counter with constant delta.
|
||||
func isDeltaConst(a []int64) bool {
|
||||
if len(a) < 2 {
|
||||
return false
|
||||
}
|
||||
d1 := a[1] - a[0]
|
||||
prev := a[1]
|
||||
for _, next := range a[2:] {
|
||||
if next-prev != d1 {
|
||||
return false
|
||||
}
|
||||
prev = next
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isGauge returns true if a contains gauge values,
|
||||
// i.e. arbitrary changing values.
|
||||
//
|
||||
// It is OK if a few gauges aren't detected (i.e. detected as counters),
|
||||
// since misdetected counters as gauges are much worse condition.
|
||||
func isGauge(a []int64) bool {
|
||||
// Check all the items in a, since a part of items may lead
|
||||
// to incorrect gauge detection.
|
||||
|
||||
if len(a) < 2 {
|
||||
return false
|
||||
}
|
||||
|
||||
extremes := 0
|
||||
plus := a[0] <= a[1]
|
||||
v1 := a[1]
|
||||
for _, v2 := range a[2:] {
|
||||
if plus {
|
||||
if v2 < v1 {
|
||||
extremes++
|
||||
plus = false
|
||||
}
|
||||
} else {
|
||||
if v2 > v1 {
|
||||
extremes++
|
||||
plus = true
|
||||
}
|
||||
}
|
||||
v1 = v2
|
||||
}
|
||||
if extremes <= 2 {
|
||||
// Probably counter reset.
|
||||
return false
|
||||
}
|
||||
|
||||
// A few extremes may indicate counter resets.
|
||||
// Let it be a gauge if extremes exceed len(a)/32,
|
||||
// otherwise assume counter reset.
|
||||
return extremes > (len(a) >> 5)
|
||||
}
|
||||
|
||||
func getCompressLevel(itemsCount int) int {
|
||||
if itemsCount <= 1<<6 {
|
||||
return 1
|
||||
}
|
||||
if itemsCount <= 1<<8 {
|
||||
return 2
|
||||
}
|
||||
if itemsCount <= 1<<10 {
|
||||
return 3
|
||||
}
|
||||
if itemsCount <= 1<<12 {
|
||||
return 4
|
||||
}
|
||||
return 5
|
||||
}
|
290
lib/encoding/encoding_test.go
Normal file
290
lib/encoding/encoding_test.go
Normal file
|
@ -0,0 +1,290 @@
|
|||
package encoding
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsConst(t *testing.T) {
|
||||
f := func(a []int64, okExpected bool) {
|
||||
t.Helper()
|
||||
ok := isConst(a)
|
||||
if ok != okExpected {
|
||||
t.Fatalf("unexpected isConst for a=%d; got %v; want %v", a, ok, okExpected)
|
||||
}
|
||||
}
|
||||
f([]int64{}, false)
|
||||
f([]int64{1}, true)
|
||||
f([]int64{1, 2}, false)
|
||||
f([]int64{1, 1}, true)
|
||||
f([]int64{1, 1, 1}, true)
|
||||
f([]int64{1, 1, 2}, false)
|
||||
}
|
||||
|
||||
func TestIsDeltaConst(t *testing.T) {
|
||||
f := func(a []int64, okExpected bool) {
|
||||
t.Helper()
|
||||
ok := isDeltaConst(a)
|
||||
if ok != okExpected {
|
||||
t.Fatalf("unexpected isDeltaConst for a=%d; got %v; want %v", a, ok, okExpected)
|
||||
}
|
||||
}
|
||||
f([]int64{}, false)
|
||||
f([]int64{1}, false)
|
||||
f([]int64{1, 2}, true)
|
||||
f([]int64{1, 2, 3}, true)
|
||||
f([]int64{3, 2, 1}, true)
|
||||
f([]int64{3, 2, 1, 0, -1, -2}, true)
|
||||
f([]int64{3, 2, 1, 0, -1, -2, 2}, false)
|
||||
f([]int64{1, 1}, true)
|
||||
f([]int64{1, 2, 1}, false)
|
||||
f([]int64{1, 2, 4}, false)
|
||||
}
|
||||
|
||||
func TestIsGauge(t *testing.T) {
|
||||
testIsGauge(t, []int64{}, false)
|
||||
testIsGauge(t, []int64{0}, false)
|
||||
testIsGauge(t, []int64{1, 2}, false)
|
||||
testIsGauge(t, []int64{0, 1, 2, 3, 4, 5}, false)
|
||||
testIsGauge(t, []int64{0, -1, -2, -3, -4}, false)
|
||||
testIsGauge(t, []int64{0, 0, 0, 0, 0, 0, 0}, false)
|
||||
testIsGauge(t, []int64{1, 1, 1, 1, 1}, false)
|
||||
testIsGauge(t, []int64{1, 1, 2, 2, 2, 2}, false)
|
||||
testIsGauge(t, []int64{1, 5, 2, 3}, false) // a single counter reset
|
||||
testIsGauge(t, []int64{1, 5, 2, 3, 2}, true)
|
||||
testIsGauge(t, []int64{-1, -5, -2, -3}, false) // a single counter reset
|
||||
testIsGauge(t, []int64{-1, -5, -2, -3, -2}, true)
|
||||
}
|
||||
|
||||
func testIsGauge(t *testing.T, a []int64, okExpected bool) {
|
||||
t.Helper()
|
||||
|
||||
ok := isGauge(a)
|
||||
if ok != okExpected {
|
||||
t.Fatalf("unexpected result for isGauge(%d); got %v; expecting %v", a, ok, okExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureNonDecreasingSequence(t *testing.T) {
|
||||
testEnsureNonDecreasingSequence(t, []int64{}, -1234, -34, []int64{})
|
||||
testEnsureNonDecreasingSequence(t, []int64{123}, -1234, -1234, []int64{-1234})
|
||||
testEnsureNonDecreasingSequence(t, []int64{123}, -1234, 345, []int64{345})
|
||||
testEnsureNonDecreasingSequence(t, []int64{-23, -14}, -23, -14, []int64{-23, -14})
|
||||
testEnsureNonDecreasingSequence(t, []int64{-23, -14}, -25, 0, []int64{-25, 0})
|
||||
testEnsureNonDecreasingSequence(t, []int64{0, -1, 10, 5, 6, 7}, 2, 8, []int64{2, 2, 8, 8, 8, 8})
|
||||
testEnsureNonDecreasingSequence(t, []int64{0, -1, 10, 5, 6, 7}, -2, 8, []int64{-2, -1, 8, 8, 8, 8})
|
||||
testEnsureNonDecreasingSequence(t, []int64{0, -1, 10, 5, 6, 7}, -2, 12, []int64{-2, -1, 10, 10, 10, 12})
|
||||
testEnsureNonDecreasingSequence(t, []int64{1, 2, 1, 3, 4, 5}, 1, 5, []int64{1, 2, 2, 3, 4, 5})
|
||||
}
|
||||
|
||||
func testEnsureNonDecreasingSequence(t *testing.T, a []int64, vMin, vMax int64, aExpected []int64) {
|
||||
t.Helper()
|
||||
|
||||
EnsureNonDecreasingSequence(a, vMin, vMax)
|
||||
if !reflect.DeepEqual(a, aExpected) {
|
||||
t.Fatalf("unexpected a; got\n%d; expecting\n%d", a, aExpected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalInt64Array(t *testing.T) {
|
||||
testMarshalUnmarshalInt64Array(t, []int64{1, 20, 234}, 4, MarshalTypeNearestDelta2)
|
||||
testMarshalUnmarshalInt64Array(t, []int64{1, 20, -2345, 678934, 342}, 4, MarshalTypeNearestDelta)
|
||||
testMarshalUnmarshalInt64Array(t, []int64{1, 20, 2345, 6789, 12342}, 4, MarshalTypeNearestDelta2)
|
||||
|
||||
// Constant encoding
|
||||
testMarshalUnmarshalInt64Array(t, []int64{1}, 4, MarshalTypeConst)
|
||||
testMarshalUnmarshalInt64Array(t, []int64{1, 2}, 4, MarshalTypeDeltaConst)
|
||||
testMarshalUnmarshalInt64Array(t, []int64{-1, 0, 1, 2, 3, 4, 5}, 4, MarshalTypeDeltaConst)
|
||||
testMarshalUnmarshalInt64Array(t, []int64{-10, -1, 8, 17, 26}, 4, MarshalTypeDeltaConst)
|
||||
testMarshalUnmarshalInt64Array(t, []int64{0, 0, 0, 0, 0, 0}, 4, MarshalTypeConst)
|
||||
testMarshalUnmarshalInt64Array(t, []int64{100, 100, 100, 100}, 4, MarshalTypeConst)
|
||||
|
||||
var va []int64
|
||||
var v int64
|
||||
|
||||
// Verify nearest delta encoding.
|
||||
va = va[:0]
|
||||
v = 0
|
||||
for i := 0; i < 8*1024; i++ {
|
||||
v += int64(rand.NormFloat64() * 1e6)
|
||||
va = append(va, v)
|
||||
}
|
||||
for precisionBits := uint8(1); precisionBits < 23; precisionBits++ {
|
||||
testMarshalUnmarshalInt64Array(t, va, precisionBits, MarshalTypeZSTDNearestDelta)
|
||||
}
|
||||
for precisionBits := uint8(23); precisionBits < 65; precisionBits++ {
|
||||
testMarshalUnmarshalInt64Array(t, va, precisionBits, MarshalTypeNearestDelta)
|
||||
}
|
||||
|
||||
// Verify nearest delta2 encoding.
|
||||
va = va[:0]
|
||||
v = 0
|
||||
for i := 0; i < 8*1024; i++ {
|
||||
v += 30e6 + int64(rand.NormFloat64()*1e6)
|
||||
va = append(va, v)
|
||||
}
|
||||
for precisionBits := uint8(1); precisionBits < 24; precisionBits++ {
|
||||
testMarshalUnmarshalInt64Array(t, va, precisionBits, MarshalTypeZSTDNearestDelta2)
|
||||
}
|
||||
for precisionBits := uint8(24); precisionBits < 65; precisionBits++ {
|
||||
testMarshalUnmarshalInt64Array(t, va, precisionBits, MarshalTypeNearestDelta2)
|
||||
}
|
||||
|
||||
// Verify nearest delta encoding.
|
||||
va = va[:0]
|
||||
v = 1000
|
||||
for i := 0; i < 6; i++ {
|
||||
v += int64(rand.NormFloat64() * 100)
|
||||
va = append(va, v)
|
||||
}
|
||||
for precisionBits := uint8(1); precisionBits < 65; precisionBits++ {
|
||||
testMarshalUnmarshalInt64Array(t, va, precisionBits, MarshalTypeNearestDelta)
|
||||
}
|
||||
|
||||
// Verify nearest delta2 encoding.
|
||||
va = va[:0]
|
||||
v = 0
|
||||
for i := 0; i < 6; i++ {
|
||||
v += 3000 + int64(rand.NormFloat64()*100)
|
||||
va = append(va, v)
|
||||
}
|
||||
for precisionBits := uint8(5); precisionBits < 65; precisionBits++ {
|
||||
testMarshalUnmarshalInt64Array(t, va, precisionBits, MarshalTypeNearestDelta2)
|
||||
}
|
||||
}
|
||||
|
||||
func testMarshalUnmarshalInt64Array(t *testing.T, va []int64, precisionBits uint8, mtExpected MarshalType) {
|
||||
t.Helper()
|
||||
|
||||
b, mt, firstValue := marshalInt64Array(nil, va, precisionBits)
|
||||
if mt != mtExpected {
|
||||
t.Fatalf("unexpected MarshalType for va=%d, precisionBits=%d: got %d; expecting %d", va, precisionBits, mt, mtExpected)
|
||||
}
|
||||
vaNew, err := unmarshalInt64Array(nil, b, mt, firstValue, len(va))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when unmarshaling va=%d, precisionBits=%d: %s", va, precisionBits, err)
|
||||
}
|
||||
if vaNew == nil && va != nil {
|
||||
vaNew = []int64{}
|
||||
}
|
||||
switch mt {
|
||||
case MarshalTypeZSTDNearestDelta, MarshalTypeZSTDNearestDelta2,
|
||||
MarshalTypeNearestDelta, MarshalTypeNearestDelta2:
|
||||
if err = checkPrecisionBits(va, vaNew, precisionBits); err != nil {
|
||||
t.Fatalf("too low precision for vaNew: %s", err)
|
||||
}
|
||||
default:
|
||||
if !reflect.DeepEqual(va, vaNew) {
|
||||
t.Fatalf("unexpected vaNew for va=%d, precisionBits=%d; got\n%d; expecting\n%d", va, precisionBits, vaNew, va)
|
||||
}
|
||||
}
|
||||
|
||||
bPrefix := []byte{1, 2, 3}
|
||||
bNew, mtNew, firstValueNew := marshalInt64Array(bPrefix, va, precisionBits)
|
||||
if firstValueNew != firstValue {
|
||||
t.Fatalf("unexpected firstValue for prefixed va=%d, precisionBits=%d; got %d; want %d", va, precisionBits, firstValueNew, firstValue)
|
||||
}
|
||||
if string(bNew[:len(bPrefix)]) != string(bPrefix) {
|
||||
t.Fatalf("unexpected prefix for va=%d, precisionBits=%d; got\n%d; expecting\n%d", va, precisionBits, bNew[:len(bPrefix)], bPrefix)
|
||||
}
|
||||
if string(bNew[len(bPrefix):]) != string(b) {
|
||||
t.Fatalf("unexpected b for prefixed va=%d, precisionBits=%d; got\n%d; expecting\n%d", va, precisionBits, bNew[len(bPrefix):], b)
|
||||
}
|
||||
if mtNew != mt {
|
||||
t.Fatalf("unexpected mt for prefixed va=%d, precisionBits=%d; got %d; expecting %d", va, precisionBits, mtNew, mt)
|
||||
}
|
||||
|
||||
vaPrefix := []int64{4, 5, 6, 8}
|
||||
vaNew, err = unmarshalInt64Array(vaPrefix, b, mt, firstValue, len(va))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when unmarshaling prefixed va=%d, precisionBits=%d: %s", va, precisionBits, err)
|
||||
}
|
||||
if !reflect.DeepEqual(vaNew[:len(vaPrefix)], vaPrefix) {
|
||||
t.Fatalf("unexpected prefix for va=%d, precisionBits=%d; got\n%d; expecting\n%d", va, precisionBits, vaNew[:len(vaPrefix)], vaPrefix)
|
||||
}
|
||||
if va == nil {
|
||||
va = []int64{}
|
||||
}
|
||||
switch mt {
|
||||
case MarshalTypeZSTDNearestDelta, MarshalTypeZSTDNearestDelta2,
|
||||
MarshalTypeNearestDelta, MarshalTypeNearestDelta2:
|
||||
if err = checkPrecisionBits(vaNew[len(vaPrefix):], va, precisionBits); err != nil {
|
||||
t.Fatalf("too low precision for prefixed vaNew: %s", err)
|
||||
}
|
||||
default:
|
||||
if !reflect.DeepEqual(vaNew[len(vaPrefix):], va) {
|
||||
t.Fatalf("unexpected prefixed vaNew for va=%d, precisionBits=%d; got\n%d; expecting\n%d", va, precisionBits, vaNew[len(vaPrefix):], va)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalTimestamps(t *testing.T) {
|
||||
const precisionBits = 3
|
||||
|
||||
var timestamps []int64
|
||||
v := int64(0)
|
||||
for i := 0; i < 8*1024; i++ {
|
||||
v += 30e3 * int64(rand.NormFloat64()*5e2)
|
||||
timestamps = append(timestamps, v)
|
||||
}
|
||||
result, mt, firstTimestamp := MarshalTimestamps(nil, timestamps, precisionBits)
|
||||
timestamps2, err := UnmarshalTimestamps(nil, result, mt, firstTimestamp, len(timestamps))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot unmarshal timestamps: %s", err)
|
||||
}
|
||||
if err := checkPrecisionBits(timestamps, timestamps2, precisionBits); err != nil {
|
||||
t.Fatalf("too low precision for timestamps: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalValues(t *testing.T) {
|
||||
const precisionBits = 3
|
||||
|
||||
var values []int64
|
||||
v := int64(0)
|
||||
for i := 0; i < 8*1024; i++ {
|
||||
v += int64(rand.NormFloat64() * 1e2)
|
||||
values = append(values, v)
|
||||
}
|
||||
result, mt, firstValue := MarshalValues(nil, values, precisionBits)
|
||||
values2, err := UnmarshalValues(nil, result, mt, firstValue, len(values))
|
||||
if err != nil {
|
||||
t.Fatalf("cannot unmarshal values: %s", err)
|
||||
}
|
||||
if err := checkPrecisionBits(values, values2, precisionBits); err != nil {
|
||||
t.Fatalf("too low precision for values: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalInt64ArraySize(t *testing.T) {
|
||||
var va []int64
|
||||
v := int64(rand.Float64() * 1e9)
|
||||
for i := 0; i < 8*1024; i++ {
|
||||
va = append(va, v)
|
||||
v += 30e3 + int64(rand.NormFloat64()*1e3)
|
||||
}
|
||||
|
||||
testMarshalInt64ArraySize(t, va, 1, 500, 1300)
|
||||
testMarshalInt64ArraySize(t, va, 2, 600, 1400)
|
||||
testMarshalInt64ArraySize(t, va, 3, 900, 1800)
|
||||
testMarshalInt64ArraySize(t, va, 4, 1300, 2100)
|
||||
testMarshalInt64ArraySize(t, va, 5, 2000, 3200)
|
||||
testMarshalInt64ArraySize(t, va, 6, 3000, 4800)
|
||||
testMarshalInt64ArraySize(t, va, 7, 4000, 6400)
|
||||
testMarshalInt64ArraySize(t, va, 8, 6000, 8000)
|
||||
testMarshalInt64ArraySize(t, va, 9, 7000, 8800)
|
||||
testMarshalInt64ArraySize(t, va, 10, 8000, 10000)
|
||||
}
|
||||
|
||||
func testMarshalInt64ArraySize(t *testing.T, va []int64, precisionBits uint8, minSizeExpected, maxSizeExpected int) {
|
||||
t.Helper()
|
||||
|
||||
b, _, _ := marshalInt64Array(nil, va, precisionBits)
|
||||
if len(b) > maxSizeExpected {
|
||||
t.Fatalf("too big size for marshaled %d items with precisionBits %d: got %d; expecting %d", len(va), precisionBits, len(b), maxSizeExpected)
|
||||
}
|
||||
if len(b) < minSizeExpected {
|
||||
t.Fatalf("too small size for marshaled %d items with precisionBits %d: got %d; epxecting %d", len(va), precisionBits, len(b), minSizeExpected)
|
||||
}
|
||||
}
|
240
lib/encoding/encoding_timing_test.go
Normal file
240
lib/encoding/encoding_timing_test.go
Normal file
|
@ -0,0 +1,240 @@
|
|||
package encoding
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkMarshalGaugeArray(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchGaugeArray)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []byte
|
||||
var mt MarshalType
|
||||
for pb.Next() {
|
||||
dst, mt, _ = marshalInt64Array(dst[:0], benchGaugeArray, 4)
|
||||
if mt != MarshalTypeZSTDNearestDelta {
|
||||
panic(fmt.Errorf("unexpected marshal type; got %d; expecting %d", mt, MarshalTypeZSTDNearestDelta))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var Sink uint64
|
||||
|
||||
func BenchmarkUnmarshalGaugeArray(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchGaugeArray)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []int64
|
||||
var err error
|
||||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledGaugeArray, MarshalTypeZSTDNearestDelta, 0, len(benchGaugeArray))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal gauge array: %s", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var benchGaugeArray = func() []int64 {
|
||||
a := make([]int64, 8*1024)
|
||||
v := int64(0)
|
||||
for i := 0; i < len(a); i++ {
|
||||
v += int64(rand.NormFloat64() * 100)
|
||||
a[i] = v
|
||||
}
|
||||
return a
|
||||
}()
|
||||
|
||||
var benchMarshaledGaugeArray = func() []byte {
|
||||
b, _, _ := marshalInt64Array(nil, benchGaugeArray, 4)
|
||||
return b
|
||||
}()
|
||||
|
||||
func BenchmarkMarshalDeltaConstArray(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchDeltaConstArray)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []byte
|
||||
var mt MarshalType
|
||||
for pb.Next() {
|
||||
dst, mt, _ = marshalInt64Array(dst[:0], benchDeltaConstArray, 4)
|
||||
if mt != MarshalTypeDeltaConst {
|
||||
panic(fmt.Errorf("unexpected marshal type; got %d; expecting %d", mt, MarshalTypeDeltaConst))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalDeltaConstArray(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchDeltaConstArray)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []int64
|
||||
var err error
|
||||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledDeltaConstArray, MarshalTypeDeltaConst, 0, len(benchDeltaConstArray))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal delta const array: %s", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var benchDeltaConstArray = func() []int64 {
|
||||
a := make([]int64, 8*1024)
|
||||
v := int64(0)
|
||||
for i := 0; i < len(a); i++ {
|
||||
v += 12345
|
||||
a[i] = v
|
||||
}
|
||||
return a
|
||||
}()
|
||||
|
||||
var benchMarshaledDeltaConstArray = func() []byte {
|
||||
b, _, _ := marshalInt64Array(nil, benchDeltaConstArray, 4)
|
||||
return b
|
||||
}()
|
||||
|
||||
func BenchmarkMarshalConstArray(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchConstArray)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []byte
|
||||
var mt MarshalType
|
||||
for pb.Next() {
|
||||
dst, mt, _ = marshalInt64Array(dst[:0], benchConstArray, 4)
|
||||
if mt != MarshalTypeConst {
|
||||
panic(fmt.Errorf("unexpected marshal type; got %d; expecting %d", mt, MarshalTypeConst))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalConstArray(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchConstArray)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []int64
|
||||
var err error
|
||||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledConstArray, MarshalTypeConst, 0, len(benchConstArray))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal const array: %s", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var benchConstArray = func() []int64 {
|
||||
a := make([]int64, 8*1024)
|
||||
for i := 0; i < len(a); i++ {
|
||||
a[i] = 1234567890
|
||||
}
|
||||
return a
|
||||
}()
|
||||
|
||||
var benchMarshaledConstArray = func() []byte {
|
||||
b, _, _ := marshalInt64Array(nil, benchConstArray, 4)
|
||||
return b
|
||||
}()
|
||||
|
||||
func BenchmarkMarshalZeroConstArray(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchZeroConstArray)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []byte
|
||||
var mt MarshalType
|
||||
for pb.Next() {
|
||||
dst, mt, _ = marshalInt64Array(dst[:0], benchZeroConstArray, 4)
|
||||
if mt != MarshalTypeConst {
|
||||
panic(fmt.Errorf("unexpected marshal type; got %d; expecting %d", mt, MarshalTypeConst))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalZeroConstArray(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchZeroConstArray)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []int64
|
||||
var err error
|
||||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledZeroConstArray, MarshalTypeConst, 0, len(benchZeroConstArray))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal zero const array: %s", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var benchZeroConstArray = make([]int64, 8*1024)
|
||||
|
||||
var benchMarshaledZeroConstArray = func() []byte {
|
||||
b, _, _ := marshalInt64Array(nil, benchZeroConstArray, 4)
|
||||
return b
|
||||
}()
|
||||
|
||||
func BenchmarkMarshalInt64Array(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchInt64Array)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []byte
|
||||
var mt MarshalType
|
||||
for pb.Next() {
|
||||
dst, mt, _ = marshalInt64Array(dst[:0], benchInt64Array, 4)
|
||||
if mt != benchMarshalType {
|
||||
panic(fmt.Errorf("unexpected marshal type; got %d; expecting %d", mt, benchMarshalType))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkUnmarshalInt64Array(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(benchInt64Array)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var dst []int64
|
||||
var err error
|
||||
for pb.Next() {
|
||||
dst, err = unmarshalInt64Array(dst[:0], benchMarshaledInt64Array, benchMarshalType, 0, len(benchInt64Array))
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot unmarshal int64 array: %s", err))
|
||||
}
|
||||
atomic.AddUint64(&Sink, uint64(len(dst)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var benchMarshaledInt64Array = func() []byte {
|
||||
b, _, _ := marshalInt64Array(nil, benchInt64Array, 4)
|
||||
return b
|
||||
}()
|
||||
|
||||
var benchMarshalType = func() MarshalType {
|
||||
_, mt, _ := marshalInt64Array(nil, benchInt64Array, 4)
|
||||
return mt
|
||||
}()
|
||||
|
||||
var benchInt64Array = func() []int64 {
|
||||
var a []int64
|
||||
var v int64
|
||||
for i := 0; i < 8*1024; i++ {
|
||||
v += 30e3 + int64(rand.NormFloat64()*1e3)
|
||||
a = append(a, v)
|
||||
}
|
||||
return a
|
||||
}()
|
296
lib/encoding/int.go
Normal file
296
lib/encoding/int.go
Normal file
|
@ -0,0 +1,296 @@
|
|||
package encoding
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// MarshalUint16 appends marshaled v to dst and returns the result.
|
||||
func MarshalUint16(dst []byte, u uint16) []byte {
|
||||
return append(dst, byte(u>>8), byte(u))
|
||||
}
|
||||
|
||||
// UnmarshalUint16 returns unmarshaled uint32 from src.
|
||||
func UnmarshalUint16(src []byte) uint16 {
|
||||
_ = src[1]
|
||||
return uint16(src[0])<<8 | uint16(src[1])
|
||||
}
|
||||
|
||||
// MarshalUint32 appends marshaled v to dst and returns the result.
|
||||
func MarshalUint32(dst []byte, u uint32) []byte {
|
||||
return append(dst, byte(u>>24), byte(u>>16), byte(u>>8), byte(u))
|
||||
}
|
||||
|
||||
// UnmarshalUint32 returns unmarshaled uint32 from src.
|
||||
func UnmarshalUint32(src []byte) uint32 {
|
||||
_ = src[3]
|
||||
return uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||
}
|
||||
|
||||
// MarshalUint64 appends marshaled v to dst and returns the result.
|
||||
func MarshalUint64(dst []byte, u uint64) []byte {
|
||||
return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u))
|
||||
}
|
||||
|
||||
// UnmarshalUint64 returns unmarshaled uint64 from src.
|
||||
func UnmarshalUint64(src []byte) uint64 {
|
||||
_ = src[7]
|
||||
return uint64(src[0])<<56 | uint64(src[1])<<48 | uint64(src[2])<<40 | uint64(src[3])<<32 | uint64(src[4])<<24 | uint64(src[5])<<16 | uint64(src[6])<<8 | uint64(src[7])
|
||||
}
|
||||
|
||||
// MarshalInt16 appends marshaled v to dst and returns the result.
|
||||
func MarshalInt16(dst []byte, v int16) []byte {
|
||||
// Such encoding for negative v must improve compression.
|
||||
v = (v << 1) ^ (v >> 15) // zig-zag encoding without branching.
|
||||
u := uint16(v)
|
||||
return append(dst, byte(u>>8), byte(u))
|
||||
}
|
||||
|
||||
// UnmarshalInt16 returns unmarshaled int16 from src.
|
||||
func UnmarshalInt16(src []byte) int16 {
|
||||
_ = src[1]
|
||||
u := uint16(src[0])<<8 | uint16(src[1])
|
||||
v := int16(u>>1) ^ (int16(u<<15) >> 15) // zig-zag decoding without branching.
|
||||
return v
|
||||
}
|
||||
|
||||
// MarshalInt64 appends marshaled v to dst and returns the result.
|
||||
func MarshalInt64(dst []byte, v int64) []byte {
|
||||
// Such encoding for negative v must improve compression.
|
||||
v = (v << 1) ^ (v >> 63) // zig-zag encoding without branching.
|
||||
u := uint64(v)
|
||||
return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u))
|
||||
}
|
||||
|
||||
// UnmarshalInt64 returns unmarshaled int64 from src.
|
||||
func UnmarshalInt64(src []byte) int64 {
|
||||
_ = src[7]
|
||||
u := uint64(src[0])<<56 | uint64(src[1])<<48 | uint64(src[2])<<40 | uint64(src[3])<<32 | uint64(src[4])<<24 | uint64(src[5])<<16 | uint64(src[6])<<8 | uint64(src[7])
|
||||
v := int64(u>>1) ^ (int64(u<<63) >> 63) // zig-zag decoding without branching.
|
||||
return v
|
||||
}
|
||||
|
||||
// MarshalVarInt64 appends marshalsed v to dst and returns the result.
|
||||
func MarshalVarInt64(dst []byte, v int64) []byte {
|
||||
var tmp [1]int64
|
||||
tmp[0] = v
|
||||
return MarshalVarInt64s(dst, tmp[:])
|
||||
}
|
||||
|
||||
// MarshalVarInt64s appends marshaled vs to dst and returns the result.
|
||||
func MarshalVarInt64s(dst []byte, vs []int64) []byte {
|
||||
for _, v := range vs {
|
||||
if v < 0x40 && v > -0x40 {
|
||||
// Fast path
|
||||
c := int8(v)
|
||||
v := (c << 1) ^ (c >> 7) // zig-zag encoding without branching.
|
||||
dst = append(dst, byte(v))
|
||||
continue
|
||||
}
|
||||
|
||||
v = (v << 1) ^ (v >> 63) // zig-zag encoding without branching.
|
||||
u := uint64(v)
|
||||
for u > 0x7f {
|
||||
dst = append(dst, 0x80|byte(u))
|
||||
u >>= 7
|
||||
}
|
||||
dst = append(dst, byte(u))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UnmarshalVarInt64 returns unmarshaled int64 from src and returns
|
||||
// the remaining tail from src.
|
||||
func UnmarshalVarInt64(src []byte) ([]byte, int64, error) {
|
||||
var tmp [1]int64
|
||||
tail, err := UnmarshalVarInt64s(tmp[:], src)
|
||||
return tail, tmp[0], err
|
||||
}
|
||||
|
||||
// UnmarshalVarInt64s unmarshals len(dst) int64 values from src to dst
|
||||
// and returns the remaining tail from src.
|
||||
func UnmarshalVarInt64s(dst []int64, src []byte) ([]byte, error) {
|
||||
idx := uint(0)
|
||||
for i := range dst {
|
||||
if idx >= uint(len(src)) {
|
||||
return nil, fmt.Errorf("cannot unmarshal varint from empty data")
|
||||
}
|
||||
c := src[idx]
|
||||
idx++
|
||||
if c < 0x80 {
|
||||
// Fast path
|
||||
v := int8(c>>1) ^ (int8(c<<7) >> 7) // zig-zag decoding without branching.
|
||||
dst[i] = int64(v)
|
||||
continue
|
||||
}
|
||||
|
||||
// Slow path
|
||||
u := uint64(c & 0x7f)
|
||||
startIdx := idx - 1
|
||||
shift := uint8(0)
|
||||
for c >= 0x80 {
|
||||
if idx >= uint(len(src)) {
|
||||
return nil, fmt.Errorf("unexpected end of encoded varint at byte %d; src=%x", idx-startIdx, src[startIdx:])
|
||||
}
|
||||
if idx-startIdx > 9 {
|
||||
return src[idx:], fmt.Errorf("too long encoded varint; the maximum allowed length is 10 bytes; got %d bytes; src=%x",
|
||||
(idx-startIdx)+1, src[startIdx:])
|
||||
}
|
||||
c = src[idx]
|
||||
idx++
|
||||
shift += 7
|
||||
u |= uint64(c&0x7f) << shift
|
||||
}
|
||||
v := int64(u>>1) ^ (int64(u<<63) >> 63) // zig-zag decoding without branching.
|
||||
dst[i] = v
|
||||
}
|
||||
return src[idx:], nil
|
||||
}
|
||||
|
||||
// MarshalVarUint64 appends marshaled u to dst and returns the result.
|
||||
func MarshalVarUint64(dst []byte, u uint64) []byte {
|
||||
var tmp [1]uint64
|
||||
tmp[0] = u
|
||||
return MarshalVarUint64s(dst, tmp[:])
|
||||
}
|
||||
|
||||
// MarshalVarUint64s appends marshaled us to dst and returns the result.
|
||||
func MarshalVarUint64s(dst []byte, us []uint64) []byte {
|
||||
for _, u := range us {
|
||||
if u < 0x80 {
|
||||
// Fast path
|
||||
dst = append(dst, byte(u))
|
||||
continue
|
||||
}
|
||||
for u > 0x7f {
|
||||
dst = append(dst, 0x80|byte(u))
|
||||
u >>= 7
|
||||
}
|
||||
dst = append(dst, byte(u))
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// UnmarshalVarUint64 returns unmarshaled uint64 from src and returns
|
||||
// the remaining tail from src.
|
||||
func UnmarshalVarUint64(src []byte) ([]byte, uint64, error) {
|
||||
var tmp [1]uint64
|
||||
tail, err := UnmarshalVarUint64s(tmp[:], src)
|
||||
return tail, tmp[0], err
|
||||
}
|
||||
|
||||
// UnmarshalVarUint64s unmarshals len(dst) uint64 values from src to dst
|
||||
// and returns the remaining tail from src.
|
||||
func UnmarshalVarUint64s(dst []uint64, src []byte) ([]byte, error) {
|
||||
idx := uint(0)
|
||||
for i := range dst {
|
||||
if idx >= uint(len(src)) {
|
||||
return nil, fmt.Errorf("cannot unmarshal varuint from empty data")
|
||||
}
|
||||
c := src[idx]
|
||||
idx++
|
||||
if c < 0x80 {
|
||||
// Fast path
|
||||
dst[i] = uint64(c)
|
||||
continue
|
||||
}
|
||||
|
||||
// Slow path
|
||||
u := uint64(c & 0x7f)
|
||||
startIdx := idx - 1
|
||||
shift := uint8(0)
|
||||
for c >= 0x80 {
|
||||
if idx >= uint(len(src)) {
|
||||
return nil, fmt.Errorf("unexpected end of encoded varint at byte %d; src=%x", idx-startIdx, src[startIdx:])
|
||||
}
|
||||
if idx-startIdx > 9 {
|
||||
return src[idx:], fmt.Errorf("too long encoded varint; the maximum allowed length is 10 bytes; got %d bytes; src=%x",
|
||||
(idx-startIdx)+1, src[startIdx:])
|
||||
}
|
||||
c = src[idx]
|
||||
idx++
|
||||
shift += 7
|
||||
u |= uint64(c&0x7f) << shift
|
||||
}
|
||||
dst[i] = u
|
||||
}
|
||||
return src[idx:], nil
|
||||
}
|
||||
|
||||
// MarshalBytes appends marshaled b to dst and returns the result.
|
||||
func MarshalBytes(dst, b []byte) []byte {
|
||||
dst = MarshalVarUint64(dst, uint64(len(b)))
|
||||
dst = append(dst, b...)
|
||||
return dst
|
||||
}
|
||||
|
||||
// UnmarshalBytes returns unmarshaled bytes from src.
|
||||
func UnmarshalBytes(src []byte) ([]byte, []byte, error) {
|
||||
tail, n, err := UnmarshalVarUint64(src)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("cannot unmarshal string size: %d", err)
|
||||
}
|
||||
src = tail
|
||||
if uint64(len(src)) < n {
|
||||
return nil, nil, fmt.Errorf("src is too short for reading string with size %d; len(src)=%d", n, len(src))
|
||||
}
|
||||
return src[n:], src[:n], nil
|
||||
}
|
||||
|
||||
// GetInt64s returns an int64 slice with the given size.
|
||||
// The slice contents isn't initialized - it may contain garbage.
|
||||
func GetInt64s(size int) *Int64s {
|
||||
v := int64sPool.Get()
|
||||
if v == nil {
|
||||
return &Int64s{
|
||||
A: make([]int64, size),
|
||||
}
|
||||
}
|
||||
is := v.(*Int64s)
|
||||
if n := size - cap(is.A); n > 0 {
|
||||
is.A = append(is.A[:cap(is.A)], make([]int64, n)...)
|
||||
}
|
||||
is.A = is.A[:size]
|
||||
return is
|
||||
}
|
||||
|
||||
// PutInt64s returns is to the pool.
|
||||
func PutInt64s(is *Int64s) {
|
||||
int64sPool.Put(is)
|
||||
}
|
||||
|
||||
// Int64s holds an int64 slice
|
||||
type Int64s struct {
|
||||
A []int64
|
||||
}
|
||||
|
||||
var int64sPool sync.Pool
|
||||
|
||||
// GetUint64s returns an uint64 slice with the given size.
|
||||
// The slice contents isn't initialized - it may contain garbage.
|
||||
func GetUint64s(size int) *Uint64s {
|
||||
v := uint64sPool.Get()
|
||||
if v == nil {
|
||||
return &Uint64s{
|
||||
A: make([]uint64, size),
|
||||
}
|
||||
}
|
||||
is := v.(*Uint64s)
|
||||
if n := size - cap(is.A); n > 0 {
|
||||
is.A = append(is.A[:cap(is.A)], make([]uint64, n)...)
|
||||
}
|
||||
is.A = is.A[:size]
|
||||
return is
|
||||
}
|
||||
|
||||
// PutUint64s returns is to the pool.
|
||||
func PutUint64s(is *Uint64s) {
|
||||
uint64sPool.Put(is)
|
||||
}
|
||||
|
||||
// Uint64s holds an uint64 slice
|
||||
type Uint64s struct {
|
||||
A []uint64
|
||||
}
|
||||
|
||||
var uint64sPool sync.Pool
|
315
lib/encoding/int_test.go
Normal file
315
lib/encoding/int_test.go
Normal file
|
@ -0,0 +1,315 @@
|
|||
package encoding
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMarshalUnmarshalUint16(t *testing.T) {
|
||||
testMarshalUnmarshalUint16(t, 0)
|
||||
testMarshalUnmarshalUint16(t, 1)
|
||||
testMarshalUnmarshalUint16(t, (1<<16)-1)
|
||||
testMarshalUnmarshalUint16(t, (1<<15)+1)
|
||||
testMarshalUnmarshalUint16(t, (1<<15)-1)
|
||||
testMarshalUnmarshalUint16(t, 1<<15)
|
||||
|
||||
for i := uint16(0); i < 1e4; i++ {
|
||||
testMarshalUnmarshalUint16(t, i)
|
||||
}
|
||||
}
|
||||
|
||||
func testMarshalUnmarshalUint16(t *testing.T, u uint16) {
|
||||
t.Helper()
|
||||
|
||||
b := MarshalUint16(nil, u)
|
||||
if len(b) != 2 {
|
||||
t.Fatalf("unexpected b length: %d; expecting %d", len(b), 2)
|
||||
}
|
||||
uNew := UnmarshalUint16(b)
|
||||
if uNew != u {
|
||||
t.Fatalf("unexpected uNew from b=%x; got %d; expecting %d", b, uNew, u)
|
||||
}
|
||||
|
||||
prefix := []byte{1, 2, 3}
|
||||
b1 := MarshalUint16(prefix, u)
|
||||
if string(b1[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("unexpected prefix for u=%d; got\n%x; expecting\n%x", u, b1[:len(prefix)], prefix)
|
||||
}
|
||||
if string(b1[len(prefix):]) != string(b) {
|
||||
t.Fatalf("unexpected b for u=%d; got\n%x; expecting\n%x", u, b1[len(prefix):], b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalUint32(t *testing.T) {
|
||||
testMarshalUnmarshalUint32(t, 0)
|
||||
testMarshalUnmarshalUint32(t, 1)
|
||||
testMarshalUnmarshalUint32(t, (1<<32)-1)
|
||||
testMarshalUnmarshalUint32(t, (1<<31)+1)
|
||||
testMarshalUnmarshalUint32(t, (1<<31)-1)
|
||||
testMarshalUnmarshalUint32(t, 1<<31)
|
||||
|
||||
for i := uint32(0); i < 1e4; i++ {
|
||||
testMarshalUnmarshalUint32(t, i)
|
||||
}
|
||||
}
|
||||
|
||||
func testMarshalUnmarshalUint32(t *testing.T, u uint32) {
|
||||
t.Helper()
|
||||
|
||||
b := MarshalUint32(nil, u)
|
||||
if len(b) != 4 {
|
||||
t.Fatalf("unexpected b length: %d; expecting %d", len(b), 4)
|
||||
}
|
||||
uNew := UnmarshalUint32(b)
|
||||
if uNew != u {
|
||||
t.Fatalf("unexpected uNew from b=%x; got %d; expecting %d", b, uNew, u)
|
||||
}
|
||||
|
||||
prefix := []byte{1, 2, 3}
|
||||
b1 := MarshalUint32(prefix, u)
|
||||
if string(b1[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("unexpected prefix for u=%d; got\n%x; expecting\n%x", u, b1[:len(prefix)], prefix)
|
||||
}
|
||||
if string(b1[len(prefix):]) != string(b) {
|
||||
t.Fatalf("unexpected b for u=%d; got\n%x; expecting\n%x", u, b1[len(prefix):], b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalUint64(t *testing.T) {
|
||||
testMarshalUnmarshalUint64(t, 0)
|
||||
testMarshalUnmarshalUint64(t, 1)
|
||||
testMarshalUnmarshalUint64(t, (1<<64)-1)
|
||||
testMarshalUnmarshalUint64(t, (1<<63)+1)
|
||||
testMarshalUnmarshalUint64(t, (1<<63)-1)
|
||||
testMarshalUnmarshalUint64(t, 1<<63)
|
||||
|
||||
for i := uint64(0); i < 1e4; i++ {
|
||||
testMarshalUnmarshalUint64(t, i)
|
||||
}
|
||||
}
|
||||
|
||||
func testMarshalUnmarshalUint64(t *testing.T, u uint64) {
|
||||
t.Helper()
|
||||
|
||||
b := MarshalUint64(nil, u)
|
||||
if len(b) != 8 {
|
||||
t.Fatalf("unexpected b length: %d; expecting %d", len(b), 8)
|
||||
}
|
||||
uNew := UnmarshalUint64(b)
|
||||
if uNew != u {
|
||||
t.Fatalf("unexpected uNew from b=%x; got %d; expecting %d", b, uNew, u)
|
||||
}
|
||||
|
||||
prefix := []byte{1, 2, 3}
|
||||
b1 := MarshalUint64(prefix, u)
|
||||
if string(b1[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("unexpected prefix for u=%d; got\n%x; expecting\n%x", u, b1[:len(prefix)], prefix)
|
||||
}
|
||||
if string(b1[len(prefix):]) != string(b) {
|
||||
t.Fatalf("unexpected b for u=%d; got\n%x; expecting\n%x", u, b1[len(prefix):], b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalInt16(t *testing.T) {
|
||||
testMarshalUnmarshalInt16(t, 0)
|
||||
testMarshalUnmarshalInt16(t, 1)
|
||||
testMarshalUnmarshalInt16(t, -1)
|
||||
testMarshalUnmarshalInt16(t, -1<<15)
|
||||
testMarshalUnmarshalInt16(t, (-1<<15)+1)
|
||||
testMarshalUnmarshalInt16(t, (1<<15)-1)
|
||||
|
||||
for i := int16(0); i < 1e4; i++ {
|
||||
testMarshalUnmarshalInt16(t, i)
|
||||
testMarshalUnmarshalInt16(t, -i)
|
||||
}
|
||||
}
|
||||
|
||||
func testMarshalUnmarshalInt16(t *testing.T, v int16) {
|
||||
t.Helper()
|
||||
|
||||
b := MarshalInt16(nil, v)
|
||||
if len(b) != 2 {
|
||||
t.Fatalf("unexpected b length: %d; expecting %d", len(b), 2)
|
||||
}
|
||||
vNew := UnmarshalInt16(b)
|
||||
if vNew != v {
|
||||
t.Fatalf("unexpected vNew from b=%x; got %d; expecting %d", b, vNew, v)
|
||||
}
|
||||
|
||||
prefix := []byte{1, 2, 3}
|
||||
b1 := MarshalInt16(prefix, v)
|
||||
if string(b1[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("unexpected prefix for v=%d; got\n%x; expecting\n%x", v, b1[:len(prefix)], prefix)
|
||||
}
|
||||
if string(b1[len(prefix):]) != string(b) {
|
||||
t.Fatalf("unexpected b for v=%d; got\n%x; expecting\n%x", v, b1[len(prefix):], b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalInt64(t *testing.T) {
|
||||
testMarshalUnmarshalInt64(t, 0)
|
||||
testMarshalUnmarshalInt64(t, 1)
|
||||
testMarshalUnmarshalInt64(t, -1)
|
||||
testMarshalUnmarshalInt64(t, -1<<63)
|
||||
testMarshalUnmarshalInt64(t, (-1<<63)+1)
|
||||
testMarshalUnmarshalInt64(t, (1<<63)-1)
|
||||
|
||||
for i := int64(0); i < 1e4; i++ {
|
||||
testMarshalUnmarshalInt64(t, i)
|
||||
testMarshalUnmarshalInt64(t, -i)
|
||||
}
|
||||
}
|
||||
|
||||
func testMarshalUnmarshalInt64(t *testing.T, v int64) {
|
||||
t.Helper()
|
||||
|
||||
b := MarshalInt64(nil, v)
|
||||
if len(b) != 8 {
|
||||
t.Fatalf("unexpected b length: %d; expecting %d", len(b), 8)
|
||||
}
|
||||
vNew := UnmarshalInt64(b)
|
||||
if vNew != v {
|
||||
t.Fatalf("unexpected vNew from b=%x; got %d; expecting %d", b, vNew, v)
|
||||
}
|
||||
|
||||
prefix := []byte{1, 2, 3}
|
||||
b1 := MarshalInt64(prefix, v)
|
||||
if string(b1[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("unexpected prefix for v=%d; got\n%x; expecting\n%x", v, b1[:len(prefix)], prefix)
|
||||
}
|
||||
if string(b1[len(prefix):]) != string(b) {
|
||||
t.Fatalf("unexpected b for v=%d; got\n%x; expecting\n%x", v, b1[len(prefix):], b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalVarInt64(t *testing.T) {
|
||||
testMarshalUnmarshalVarInt64(t, 0)
|
||||
testMarshalUnmarshalVarInt64(t, 1)
|
||||
testMarshalUnmarshalVarInt64(t, -1)
|
||||
testMarshalUnmarshalVarInt64(t, -1<<63)
|
||||
testMarshalUnmarshalVarInt64(t, (-1<<63)+1)
|
||||
testMarshalUnmarshalVarInt64(t, (1<<63)-1)
|
||||
|
||||
for i := int64(0); i < 1e4; i++ {
|
||||
testMarshalUnmarshalVarInt64(t, i)
|
||||
testMarshalUnmarshalVarInt64(t, -i)
|
||||
testMarshalUnmarshalVarInt64(t, i<<8)
|
||||
testMarshalUnmarshalVarInt64(t, -i<<8)
|
||||
testMarshalUnmarshalVarInt64(t, i<<16)
|
||||
testMarshalUnmarshalVarInt64(t, -i<<16)
|
||||
testMarshalUnmarshalVarInt64(t, i<<23)
|
||||
testMarshalUnmarshalVarInt64(t, -i<<23)
|
||||
testMarshalUnmarshalVarInt64(t, i<<33)
|
||||
testMarshalUnmarshalVarInt64(t, -i<<33)
|
||||
testMarshalUnmarshalVarInt64(t, i<<43)
|
||||
testMarshalUnmarshalVarInt64(t, -i<<43)
|
||||
testMarshalUnmarshalVarInt64(t, i<<53)
|
||||
testMarshalUnmarshalVarInt64(t, -i<<53)
|
||||
}
|
||||
}
|
||||
|
||||
func testMarshalUnmarshalVarInt64(t *testing.T, v int64) {
|
||||
t.Helper()
|
||||
|
||||
b := MarshalVarInt64(nil, v)
|
||||
tail, vNew, err := UnmarshalVarInt64(b)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when unmarshaling v=%d from b=%x: %s", v, b, err)
|
||||
}
|
||||
if vNew != v {
|
||||
t.Fatalf("unexpected vNew from b=%x; got %d; expecting %d", b, vNew, v)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
t.Fatalf("unexpected data left after unmarshaling v=%d from b=%x: %x", v, b, tail)
|
||||
}
|
||||
|
||||
prefix := []byte{1, 2, 3}
|
||||
b1 := MarshalVarInt64(prefix, v)
|
||||
if string(b1[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("unexpected prefix for v=%d; got\n%x; expecting\n%x", v, b1[:len(prefix)], prefix)
|
||||
}
|
||||
if string(b1[len(prefix):]) != string(b) {
|
||||
t.Fatalf("unexpected b for v=%d; got\n%x; expecting\n%x", v, b1[len(prefix):], b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalVarUint64(t *testing.T) {
|
||||
testMarshalUnmarshalVarUint64(t, 0)
|
||||
testMarshalUnmarshalVarUint64(t, 1)
|
||||
testMarshalUnmarshalVarUint64(t, (1<<63)-1)
|
||||
|
||||
for i := uint64(0); i < 1024; i++ {
|
||||
testMarshalUnmarshalVarUint64(t, i)
|
||||
testMarshalUnmarshalVarUint64(t, i<<8)
|
||||
testMarshalUnmarshalVarUint64(t, i<<16)
|
||||
testMarshalUnmarshalVarUint64(t, i<<23)
|
||||
testMarshalUnmarshalVarUint64(t, i<<33)
|
||||
testMarshalUnmarshalVarUint64(t, i<<41)
|
||||
testMarshalUnmarshalVarUint64(t, i<<49)
|
||||
testMarshalUnmarshalVarUint64(t, i<<54)
|
||||
}
|
||||
}
|
||||
|
||||
func testMarshalUnmarshalVarUint64(t *testing.T, u uint64) {
|
||||
t.Helper()
|
||||
|
||||
b := MarshalVarUint64(nil, u)
|
||||
tail, uNew, err := UnmarshalVarUint64(b)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when unmarshaling u=%d from b=%x: %s", u, b, err)
|
||||
}
|
||||
if uNew != u {
|
||||
t.Fatalf("unexpected uNew from b=%x; got %d; expecting %d", b, uNew, u)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
t.Fatalf("unexpected data left after unmarshaling u=%d from b=%x: %x", u, b, tail)
|
||||
}
|
||||
|
||||
prefix := []byte{1, 2, 3}
|
||||
b1 := MarshalVarUint64(prefix, u)
|
||||
if string(b1[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("unexpected prefix for u=%d; got\n%x; expecting\n%x", u, b1[:len(prefix)], prefix)
|
||||
}
|
||||
if string(b1[len(prefix):]) != string(b) {
|
||||
t.Fatalf("unexpected b for u=%d; got\n%x; expecting\n%x", u, b1[len(prefix):], b)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalUnmarshalBytes(t *testing.T) {
|
||||
testMarshalUnmarshalBytes(t, "")
|
||||
testMarshalUnmarshalBytes(t, "x")
|
||||
testMarshalUnmarshalBytes(t, "xy")
|
||||
|
||||
var bb bytes.Buffer
|
||||
for i := 0; i < 100; i++ {
|
||||
fmt.Fprintf(&bb, " %d ", i)
|
||||
s := bb.String()
|
||||
testMarshalUnmarshalBytes(t, s)
|
||||
}
|
||||
}
|
||||
|
||||
func testMarshalUnmarshalBytes(t *testing.T, s string) {
|
||||
t.Helper()
|
||||
|
||||
b := MarshalBytes(nil, []byte(s))
|
||||
tail, bNew, err := UnmarshalBytes(b)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when unmarshaling s=%q from b=%x: %s", s, b, err)
|
||||
}
|
||||
if string(bNew) != s {
|
||||
t.Fatalf("unexpected sNew from b=%x; got %q; expecting %q", b, bNew, s)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
t.Fatalf("unexepcted data left after unmarshaling s=%q from b=%x: %x", s, b, tail)
|
||||
}
|
||||
|
||||
prefix := []byte("abcde")
|
||||
b1 := MarshalBytes(prefix, []byte(s))
|
||||
if string(b1[:len(prefix)]) != string(prefix) {
|
||||
t.Fatalf("unexpected prefix for s=%q; got\n%x; expecting\n%x", s, b1[:len(prefix)], prefix)
|
||||
}
|
||||
if string(b1[len(prefix):]) != string(b) {
|
||||
t.Fatalf("unexpected b for s=%q; got\n%x; expecting\n%x", s, b1[len(prefix):], b)
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue