Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2023-03-12 01:01:41 -08:00
commit 3822d83276
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
35 changed files with 569 additions and 150 deletions

View file

@ -17,7 +17,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@main
with:
go-version: 1.20.1
go-version: 1.20.2
id: go
- name: Code checkout
uses: actions/checkout@master

View file

@ -57,7 +57,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.20.1
go-version: 1.20.2
check-latest: true
cache: true
if: ${{ matrix.language == 'go' }}

View file

@ -32,7 +32,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: 1.20.1
go-version: 1.20.2
check-latest: true
cache: true
@ -56,7 +56,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: 1.20.1
go-version: 1.20.2
check-latest: true
cache: true
@ -81,7 +81,7 @@ jobs:
id: go
uses: actions/setup-go@v3
with:
go-version: 1.20.1
go-version: 1.20.2
check-latest: true
cache: true

View file

@ -2,6 +2,7 @@
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
[![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics.svg?maxAge=604800)](https://hub.docker.com/r/victoriametrics/victoria-metrics)
[![victoriametrics](https://snapcraft.io/victoriametrics/badge.svg)](https://snapcraft.io/victoriametrics)
[![Slack](https://img.shields.io/badge/join%20slack-%23victoriametrics-brightgreen.svg)](https://slack.victoriametrics.com/)
[![GitHub license](https://img.shields.io/github/license/VictoriaMetrics/VictoriaMetrics.svg)](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/VictoriaMetrics)](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)

View file

@ -621,6 +621,10 @@ can read the same rules configuration as normal, evaluate them on the given time
results via remote write to the configured storage. vmalert supports any PromQL/MetricsQL compatible
data source for backfilling.
Please note, that response caching may lead to unexpected results during and after backfilling process.
In order to avoid this you need to reset cache contents or disable caching when using backfilling
as described in [backfilling docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#backfilling).
See a blogpost about [Rules backfilling via vmalert](https://victoriametrics.com/blog/rules-replay/).
### How it works

View file

@ -199,12 +199,32 @@ func (r *Rule) Validate() error {
// ValidateTplFn must validate the given annotations
type ValidateTplFn func(annotations map[string]string) error
// Parse parses rule configs from given file patterns
func Parse(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressions bool) ([]Group, error) {
files, err := readFromFS(pathPatterns)
// ParseSilent parses rule configs from given file patterns without emitting logs
func ParseSilent(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressions bool) ([]Group, error) {
files, err := readFromFS(pathPatterns, true)
if err != nil {
return nil, fmt.Errorf("failed to read from the config: %s", err)
}
return parse(files, validateTplFn, validateExpressions)
}
// Parse parses rule configs from given file patterns
func Parse(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressions bool) ([]Group, error) {
files, err := readFromFS(pathPatterns, false)
if err != nil {
return nil, fmt.Errorf("failed to read from the config: %s", err)
}
groups, err := parse(files, validateTplFn, validateExpressions)
if err != nil {
return nil, fmt.Errorf("failed to parse %s: %s", pathPatterns, err)
}
if len(groups) < 1 {
logger.Warnf("no groups found in %s", strings.Join(pathPatterns, ";"))
}
return groups, nil
}
func parse(files map[string][]byte, validateTplFn ValidateTplFn, validateExpressions bool) ([]Group, error) {
errGroup := new(utils.ErrGroup)
var groups []Group
for file, data := range files {
@ -231,9 +251,12 @@ func Parse(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressio
if err := errGroup.Err(); err != nil {
return nil, err
}
if len(groups) < 1 {
logger.Warnf("no groups found in %s", strings.Join(pathPatterns, ";"))
sort.SliceStable(groups, func(i, j int) bool {
if groups[i].File != groups[j].File {
return groups[i].File < groups[j].File
}
return groups[i].Name < groups[j].Name
})
return groups, nil
}

View file

@ -2,9 +2,11 @@ package config
import (
"fmt"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config/fslocal"
"strings"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config/fslocal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
// FS represent a file system abstract for reading files.
@ -15,10 +17,13 @@ type FS interface {
// String must return human-readable representation of FS.
String() string
// List returns the list of file names which will be read via Read fn
List() ([]string, error)
// Read returns a list of read files in form of a map
// where key is a file name and value is a content of read file.
// Read must be called only after the successful Init call.
Read() (map[string][]byte, error)
Read(files []string) (map[string][]byte, error)
}
var (
@ -31,9 +36,10 @@ var (
// readFromFS returns an error if at least one FS failed to init.
// The function can be called multiple times but each unique path
// will be inited only once.
// If silent == true, readFromFS will not emit any logs.
//
// It is allowed to mix different FS types in path list.
func readFromFS(paths []string) (map[string][]byte, error) {
func readFromFS(paths []string, silent bool) (map[string][]byte, error) {
var err error
result := make(map[string][]byte)
for _, path := range paths {
@ -54,7 +60,20 @@ func readFromFS(paths []string) (map[string][]byte, error) {
}
fsRegistryMu.Unlock()
files, err := fs.Read()
list, err := fs.List()
if err != nil {
return nil, fmt.Errorf("failed to list files from %q", fs)
}
if !silent {
logger.Infof("found %d files to read from %q", len(list), fs)
}
if len(list) < 1 {
continue
}
files, err := fs.Read(list)
if err != nil {
return nil, fmt.Errorf("error while reading files from %q: %w", fs, err)
}

View file

@ -25,15 +25,20 @@ func (fs *FS) String() string {
return fmt.Sprintf("Local FS{MatchPattern: %q}", fs.Pattern)
}
// Read returns a map of read files where
// key is the file name and value is file's content.
func (fs *FS) Read() (map[string][]byte, error) {
// List returns the list of file names which will be read via Read fn
func (fs *FS) List() ([]string, error) {
matches, err := filepath.Glob(fs.Pattern)
if err != nil {
return nil, fmt.Errorf("error while matching files via pattern %s: %w", fs.Pattern, err)
}
return matches, nil
}
// Read returns a map of read files where
// key is the file name and value is file's content.
func (fs *FS) Read(files []string) (map[string][]byte, error) {
result := make(map[string][]byte)
for _, path := range matches {
for _, path := range files {
data, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("error while reading file %q: %w", path, err)

View file

@ -345,7 +345,7 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
logger.Errorf("failed to load new templates: %s", err)
continue
}
newGroupsCfg, err := config.Parse(*rulePath, validateTplFn, *validateExpressions)
newGroupsCfg, err := config.ParseSilent(*rulePath, validateTplFn, *validateExpressions)
if err != nil {
configReloadErrors.Inc()
configSuccess.Set(0)

View file

@ -788,6 +788,11 @@ Please note that each import request can load up to a single vCPU core on Victor
to allocated CPU resources of your VictoriaMetrics installation.
8. `vmctl` supports `--vm-native-src-headers` and `--vm-native-dst-headers` which defines headers to send with each request
to the corresponding source address.
9. `vmctl` supports `--vm-native-disable-http-keep-alive` to allow `vmctl` to use non-persistent HTTP connections to avoid
error `use of closed network connection` when run a longer export.
10. Migrating data with overlapping time range for destination data can produce duplicates series at destination.
To avoid duplicates on the destination set `-dedup.minScrapeInterval=1ms` for `vmselect` and `vmstorage`.
This will instruct `vmselect` and `vmstorage` to ignore duplicates with match timestamps.
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
and processing is done by "destination" (`dst`). So no extra memory or CPU resources required on `vmctl` side. Only

222
app/vmctl/auth/auth.go Normal file
View file

@ -0,0 +1,222 @@
package auth
import (
"encoding/base64"
"fmt"
"net/http"
"strings"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
)
// HTTPClientConfig represents http client config.
type HTTPClientConfig struct {
BasicAuth *BasicAuthConfig
BearerToken string
Headers string
}
// NewConfig creates auth config for the given hcc.
func (hcc *HTTPClientConfig) NewConfig() (*Config, error) {
opts := &Options{
BasicAuth: hcc.BasicAuth,
BearerToken: hcc.BearerToken,
Headers: hcc.Headers,
}
return opts.NewConfig()
}
// BasicAuthConfig represents basic auth config.
type BasicAuthConfig struct {
Username string
Password string
PasswordFile string
}
// ConfigOptions options which helps build Config
type ConfigOptions func(config *HTTPClientConfig)
// Generate returns Config based on the given params
func Generate(filterOptions ...ConfigOptions) (*Config, error) {
authCfg := &HTTPClientConfig{}
for _, option := range filterOptions {
option(authCfg)
}
return authCfg.NewConfig()
}
// WithBasicAuth returns AuthConfigOptions and initialized BasicAuthConfig based on given params
func WithBasicAuth(username, password string) ConfigOptions {
return func(config *HTTPClientConfig) {
if username != "" || password != "" {
config.BasicAuth = &BasicAuthConfig{
Username: username,
Password: password,
}
}
}
}
// WithBearer returns AuthConfigOptions and set BearerToken or BearerTokenFile based on given params
func WithBearer(token string) ConfigOptions {
return func(config *HTTPClientConfig) {
if token != "" {
config.BearerToken = token
}
}
}
// WithHeaders returns AuthConfigOptions and set Headers based on the given params
func WithHeaders(headers string) ConfigOptions {
return func(config *HTTPClientConfig) {
if headers != "" {
config.Headers = headers
}
}
}
// Config is auth config.
type Config struct {
getAuthHeader func() string
authHeaderLock sync.Mutex
authHeader string
authHeaderDeadline uint64
headers []keyValue
authDigest string
}
// SetHeaders sets the configured ac headers to req.
func (ac *Config) SetHeaders(req *http.Request, setAuthHeader bool) {
reqHeaders := req.Header
for _, h := range ac.headers {
reqHeaders.Set(h.key, h.value)
}
if setAuthHeader {
if ah := ac.GetAuthHeader(); ah != "" {
reqHeaders.Set("Authorization", ah)
}
}
}
// GetAuthHeader returns optional `Authorization: ...` http header.
func (ac *Config) GetAuthHeader() string {
f := ac.getAuthHeader
if f == nil {
return ""
}
ac.authHeaderLock.Lock()
defer ac.authHeaderLock.Unlock()
if fasttime.UnixTimestamp() > ac.authHeaderDeadline {
ac.authHeader = f()
// Cache the authHeader for a second.
ac.authHeaderDeadline = fasttime.UnixTimestamp() + 1
}
return ac.authHeader
}
type authContext struct {
// getAuthHeader must return <value> for 'Authorization: <value>' http request header
getAuthHeader func() string
// authDigest must contain the digest for the used authorization
// The digest must be changed whenever the original config changes.
authDigest string
}
func (ac *authContext) initFromBasicAuthConfig(ba *BasicAuthConfig) error {
if ba.Username == "" {
return fmt.Errorf("missing `username` in `basic_auth` section")
}
if ba.Password != "" {
ac.getAuthHeader = func() string {
// See https://en.wikipedia.org/wiki/Basic_access_authentication
token := ba.Username + ":" + ba.Password
token64 := base64.StdEncoding.EncodeToString([]byte(token))
return "Basic " + token64
}
ac.authDigest = fmt.Sprintf("basic(username=%q, password=%q)", ba.Username, ba.Password)
return nil
}
return nil
}
func (ac *authContext) initFromBearerToken(bearerToken string) error {
ac.getAuthHeader = func() string {
return "Bearer " + bearerToken
}
ac.authDigest = fmt.Sprintf("bearer(token=%q)", bearerToken)
return nil
}
// Options contain options, which must be passed to NewConfig.
type Options struct {
// BasicAuth contains optional BasicAuthConfig.
BasicAuth *BasicAuthConfig
// BearerToken contains optional bearer token.
BearerToken string
// Headers contains optional http request headers in the form 'Foo: bar'.
Headers string
}
// NewConfig creates auth config from the given opts.
func (opts *Options) NewConfig() (*Config, error) {
var ac authContext
if opts.BasicAuth != nil {
if ac.getAuthHeader != nil {
return nil, fmt.Errorf("cannot use both `authorization` and `basic_auth`")
}
if err := ac.initFromBasicAuthConfig(opts.BasicAuth); err != nil {
return nil, err
}
}
if opts.BearerToken != "" {
if ac.getAuthHeader != nil {
return nil, fmt.Errorf("cannot simultaneously use `authorization`, `basic_auth` and `bearer_token`")
}
if err := ac.initFromBearerToken(opts.BearerToken); err != nil {
return nil, err
}
}
headers, err := parseHeaders(opts.Headers)
if err != nil {
return nil, err
}
c := &Config{
getAuthHeader: ac.getAuthHeader,
headers: headers,
authDigest: ac.authDigest,
}
return c, nil
}
type keyValue struct {
key string
value string
}
func parseHeaders(headers string) ([]keyValue, error) {
if len(headers) == 0 {
return nil, nil
}
var headersSplitByDelimiter = strings.Split(headers, "^^")
kvs := make([]keyValue, len(headersSplitByDelimiter))
for i, h := range headersSplitByDelimiter {
n := strings.IndexByte(h, ':')
if n < 0 {
return nil, fmt.Errorf(`missing ':' in header %q; expecting "key: value" format`, h)
}
kv := &kvs[i]
kv.key = strings.TrimSpace(h[:n])
kv.value = strings.TrimSpace(h[n+1:])
}
return kvs, nil
}

View file

@ -325,15 +325,19 @@ const (
vmNativeFilterTimeEnd = "vm-native-filter-time-end"
vmNativeStepInterval = "vm-native-step-interval"
vmNativeDisableHTTPKeepAlive = "vm-native-disable-http-keep-alive"
vmNativeSrcAddr = "vm-native-src-addr"
vmNativeSrcUser = "vm-native-src-user"
vmNativeSrcPassword = "vm-native-src-password"
vmNativeSrcHeaders = "vm-native-src-headers"
vmNativeSrcBearerToken = "vm-native-src-bearer-token"
vmNativeDstAddr = "vm-native-dst-addr"
vmNativeDstUser = "vm-native-dst-user"
vmNativeDstPassword = "vm-native-dst-password"
vmNativeDstHeaders = "vm-native-dst-headers"
vmNativeDstBearerToken = "vm-native-dst-bearer-token"
)
var (
@ -358,6 +362,11 @@ var (
Name: vmNativeStepInterval,
Usage: fmt.Sprintf("Split export data into chunks. Requires setting --%s. Valid values are '%s','%s','%s','%s'.", vmNativeFilterTimeStart, stepper.StepMonth, stepper.StepDay, stepper.StepHour, stepper.StepMinute),
},
&cli.BoolFlag{
Name: vmNativeDisableHTTPKeepAlive,
Usage: "Disable HTTP persistent connections for requests made to VictoriaMetrics components during export",
Value: false,
},
&cli.StringFlag{
Name: vmNativeSrcAddr,
Usage: "VictoriaMetrics address to perform export from. \n" +
@ -381,6 +390,10 @@ var (
"For example, --vm-native-src-headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding source address. \n" +
"Multiple headers must be delimited by '^^': --vm-native-src-headers='header1:value1^^header2:value2'",
},
&cli.StringFlag{
Name: vmNativeSrcBearerToken,
Usage: "Optional bearer auth token to use for the corresponding `--vm-native-src-addr`",
},
&cli.StringFlag{
Name: vmNativeDstAddr,
Usage: "VictoriaMetrics address to perform import to. \n" +
@ -404,6 +417,10 @@ var (
"For example, --vm-native-dst-headers='My-Auth:foobar' would send 'My-Auth: foobar' HTTP header with every request to the corresponding destination address. \n" +
"Multiple headers must be delimited by '^^': --vm-native-dst-headers='header1:value1^^header2:value2'",
},
&cli.StringFlag{
Name: vmNativeDstBearerToken,
Usage: "Optional bearer auth token to use for the corresponding `--vm-native-dst-addr`",
},
&cli.StringSliceFlag{
Name: vmExtraLabel,
Value: nil,

View file

@ -11,6 +11,7 @@ import (
"syscall"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/auth"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
@ -199,6 +200,26 @@ func main() {
return fmt.Errorf("flag %q can't be empty", vmNativeFilterMatch)
}
var srcExtraLabels []string
srcAddr := strings.Trim(c.String(vmNativeSrcAddr), "/")
srcAuthConfig, err := auth.Generate(
auth.WithBasicAuth(c.String(vmNativeSrcUser), c.String(vmNativeSrcPassword)),
auth.WithBearer(c.String(vmNativeSrcBearerToken)),
auth.WithHeaders(c.String(vmNativeSrcHeaders)))
if err != nil {
return fmt.Errorf("error initilize auth config for source: %s", srcAddr)
}
dstAddr := strings.Trim(c.String(vmNativeDstAddr), "/")
dstExtraLabels := c.StringSlice(vmExtraLabel)
dstAuthConfig, err := auth.Generate(
auth.WithBasicAuth(c.String(vmNativeDstUser), c.String(vmNativeDstPassword)),
auth.WithBearer(c.String(vmNativeDstBearerToken)),
auth.WithHeaders(c.String(vmNativeDstHeaders)))
if err != nil {
return fmt.Errorf("error initilize auth config for destination: %s", dstAddr)
}
p := vmNativeProcessor{
rateLimit: c.Int64(vmRateLimit),
interCluster: c.Bool(vmInterCluster),
@ -209,17 +230,16 @@ func main() {
Chunk: c.String(vmNativeStepInterval),
},
src: &native.Client{
Addr: strings.Trim(c.String(vmNativeSrcAddr), "/"),
User: c.String(vmNativeSrcUser),
Password: c.String(vmNativeSrcPassword),
Headers: c.String(vmNativeSrcHeaders),
AuthCfg: srcAuthConfig,
Addr: srcAddr,
ExtraLabels: srcExtraLabels,
DisableHTTPKeepAlive: c.Bool(vmNativeDisableHTTPKeepAlive),
},
dst: &native.Client{
Addr: strings.Trim(c.String(vmNativeDstAddr), "/"),
User: c.String(vmNativeDstUser),
Password: c.String(vmNativeDstPassword),
ExtraLabels: c.StringSlice(vmExtraLabel),
Headers: c.String(vmNativeDstHeaders),
AuthCfg: dstAuthConfig,
Addr: dstAddr,
ExtraLabels: dstExtraLabels,
DisableHTTPKeepAlive: c.Bool(vmNativeDisableHTTPKeepAlive),
},
backoff: backoff.New(),
cc: c.Int(vmConcurrency),

View file

@ -6,7 +6,8 @@ import (
"fmt"
"io"
"net/http"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/auth"
)
const (
@ -18,11 +19,10 @@ const (
// Client is an HTTP client for exporting and importing
// time series via native protocol.
type Client struct {
AuthCfg *auth.Config
Addr string
User string
Password string
ExtraLabels []string
Headers string
DisableHTTPKeepAlive bool
}
// LabelValues represents series from api/v1/series response
@ -92,15 +92,6 @@ func (c *Client) ImportPipe(ctx context.Context, dstURL string, pr *io.PipeReade
return fmt.Errorf("cannot create import request to %q: %s", c.Addr, err)
}
parsedHeaders, err := parseHeaders(c.Headers)
if err != nil {
return err
}
for _, header := range parsedHeaders {
req.Header.Set(header.key, header.value)
}
importResp, err := c.do(req, http.StatusNoContent)
if err != nil {
return fmt.Errorf("import request failed: %s", err)
@ -131,15 +122,6 @@ func (c *Client) ExportPipe(ctx context.Context, url string, f Filter) (io.ReadC
// disable compression since it is meaningless for native format
req.Header.Set("Accept-Encoding", "identity")
parsedHeaders, err := parseHeaders(c.Headers)
if err != nil {
return nil, err
}
for _, header := range parsedHeaders {
req.Header.Set(header.key, header.value)
}
resp, err := c.do(req, http.StatusOK)
if err != nil {
return nil, fmt.Errorf("export request failed: %w", err)
@ -164,15 +146,6 @@ func (c *Client) GetSourceTenants(ctx context.Context, f Filter) ([]string, erro
}
req.URL.RawQuery = params.Encode()
parsedHeaders, err := parseHeaders(c.Headers)
if err != nil {
return nil, err
}
for _, header := range parsedHeaders {
req.Header.Set(header.key, header.value)
}
resp, err := c.do(req, http.StatusOK)
if err != nil {
return nil, fmt.Errorf("tenants request failed: %s", err)
@ -193,10 +166,11 @@ func (c *Client) GetSourceTenants(ctx context.Context, f Filter) ([]string, erro
}
func (c *Client) do(req *http.Request, expSC int) (*http.Response, error) {
if c.User != "" {
req.SetBasicAuth(c.User, c.Password)
if c.AuthCfg != nil {
c.AuthCfg.SetHeaders(req, true)
}
resp, err := http.DefaultClient.Do(req)
var httpClient = &http.Client{Transport: &http.Transport{DisableKeepAlives: c.DisableHTTPKeepAlive}}
resp, err := httpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("unexpected error when performing request: %w", err)
}
@ -210,28 +184,3 @@ func (c *Client) do(req *http.Request, expSC int) (*http.Response, error) {
}
return resp, err
}
type keyValue struct {
key string
value string
}
func parseHeaders(headers string) ([]keyValue, error) {
if len(headers) == 0 {
return nil, nil
}
var headersSplitByDelimiter = strings.Split(headers, "^^")
kvs := make([]keyValue, len(headersSplitByDelimiter))
for i, h := range headersSplitByDelimiter {
n := strings.IndexByte(h, ':')
if n < 0 {
return nil, fmt.Errorf(`missing ':' in header %q; expecting "key: value" format`, h)
}
kv := &kvs[i]
kv.key = strings.TrimSpace(h[:n])
kv.value = strings.TrimSpace(h[n+1:])
}
return kvs, nil
}

View file

@ -1266,8 +1266,10 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
putStorageSearch(sr)
return nil, fmt.Errorf("cannot write %d bytes to temporary file: %w", len(buf), err)
}
metricName := bytesutil.InternBytes(sr.MetricBlockRef.MetricName)
brs := m[metricName]
// Do not intern mb.MetricName, since it leads to increased memory usage.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3692
metricName := sr.MetricBlockRef.MetricName
brs := m[string(metricName)]
if brs == nil {
brs = &blockRefs{}
brs.brs = brs.brsPrealloc[:0]
@ -1277,8 +1279,9 @@ func ProcessSearchQuery(qt *querytracer.Tracer, sq *storage.SearchQuery, deadlin
addr: addr,
})
if len(brs.brs) == 1 {
orderedMetricNames = append(orderedMetricNames, metricName)
m[metricName] = brs
metricNameStr := string(metricName)
orderedMetricNames = append(orderedMetricNames, metricNameStr)
m[metricNameStr] = brs
}
}
if err := sr.Error(); err != nil {

View file

@ -1,4 +1,4 @@
FROM golang:1.20.1 as build-web-stage
FROM golang:1.20.2 as build-web-stage
COPY build /build
WORKDIR /build

View file

@ -76,12 +76,24 @@
"uid": "$ds"
},
"enable": true,
"expr": "sum(vm_app_version{job=~\"$job\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\"} offset 20m) by(short_version))",
"expr": "sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"} offset 20m) by(short_version))",
"hide": true,
"iconColor": "dark-blue",
"name": "version change",
"textFormat": "{{short_version}}",
"titleFormat": "Version change"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"enable": true,
"expr": "sum(changes(vm_app_start_timestamp{job=~\"$job\", instance=~\"$instance\"})) by(job)",
"hide": true,
"iconColor": "dark-yellow",
"name": "restarts",
"textFormat": "{{job}} restarted"
}
]
},
@ -4642,7 +4654,7 @@
},
"editorMode": "code",
"exemplar": false,
"expr": "median(\n rate(process_cpu_seconds_total{job=~\"$job_storage\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_storage\", instance=~\"$instance\"}\n)",
"expr": "quantile(0.5,\n rate(process_cpu_seconds_total{job=~\"$job_storage\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_storage\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
@ -4788,7 +4800,7 @@
},
"editorMode": "code",
"exemplar": true,
"expr": "median(\n max_over_time(process_resident_memory_bytes{job=~\"$job_storage\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_storage\", instance=~\"$instance\"}\n)",
"expr": "quantile(0.5,\n max_over_time(process_resident_memory_bytes{job=~\"$job_storage\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_storage\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
@ -5381,7 +5393,7 @@
"uid": "$ds"
},
"editorMode": "code",
"expr": "median(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) /\n (\n sum(vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) +\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n ) \n)",
"expr": "quantile(0.5,\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) /\n (\n sum(vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) +\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n ) \n)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@ -6255,7 +6267,7 @@
},
"editorMode": "code",
"exemplar": true,
"expr": "median(\n rate(process_cpu_seconds_total{job=~\"$job_select\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_select\", instance=~\"$instance\"}\n)",
"expr": "quantile(0.5,\n rate(process_cpu_seconds_total{job=~\"$job_select\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_select\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
@ -6399,7 +6411,7 @@
},
"editorMode": "code",
"exemplar": true,
"expr": "median(\n max_over_time(process_resident_memory_bytes{job=~\"$job_select\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_select\", instance=~\"$instance\"}\n)",
"expr": "quantile(0.5,\n max_over_time(process_resident_memory_bytes{job=~\"$job_select\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_select\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
@ -7364,7 +7376,7 @@
},
"editorMode": "code",
"exemplar": true,
"expr": "median(\n rate(process_cpu_seconds_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"expr": "quantile(0.5,\n rate(process_cpu_seconds_total{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",
@ -7508,7 +7520,7 @@
},
"editorMode": "code",
"exemplar": true,
"expr": "median(\n max_over_time(process_resident_memory_bytes{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"expr": "quantile(0.5,\n max_over_time(process_resident_memory_bytes{job=~\"$job_insert\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_memory_bytes{job=~\"$job_insert\", instance=~\"$instance\"}\n)",
"format": "time_series",
"hide": false,
"interval": "",

View file

@ -65,12 +65,24 @@
"uid": "$ds"
},
"enable": true,
"expr": "sum(vm_app_version{job=~\"$job\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\"} offset 20m) by(short_version))",
"expr": "sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"} offset 20m) by(short_version))",
"hide": true,
"iconColor": "dark-blue",
"name": "version",
"textFormat": "{{short_version}}",
"titleFormat": "Version change"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"enable": true,
"expr": "sum(changes(vm_app_start_timestamp{job=~\"$job\", instance=~\"$instance\"})) by(job, instance)",
"hide": true,
"iconColor": "dark-yellow",
"name": "restarts",
"textFormat": "{{job}}:{{instance}} restarted"
}
]
},

View file

@ -59,12 +59,24 @@
"uid": "$ds"
},
"enable": true,
"expr": "sum(vm_app_version{job=~\"$job\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\"} offset 20m) by(short_version))",
"expr": "sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"}) by(short_version) unless (sum(vm_app_version{job=~\"$job\", instance=~\"$instance\"} offset 20m) by(short_version))",
"hide": true,
"iconColor": "dark-blue",
"name": "version",
"textFormat": "{{short_version}}",
"titleFormat": "Version change"
},
{
"datasource": {
"type": "prometheus",
"uid": "$ds"
},
"enable": true,
"expr": "sum(changes(vm_app_start_timestamp{job=~\"$job\", instance=~\"$instance\"})) by(job, instance)",
"hide": true,
"iconColor": "dark-yellow",
"name": "restarts",
"textFormat": "{{job}}:{{instance}} restarted"
}
]
},

View file

@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
ROOT_IMAGE ?= alpine:3.17.2
CERTS_IMAGE := alpine:3.17.2
GO_BUILDER_IMAGE := golang:1.20.1-alpine
GO_BUILDER_IMAGE := golang:1.20.2-alpine
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
BASE_IMAGE := local/base:1.1.4-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)

View file

@ -15,16 +15,25 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip
* SECURITY: upgrade Go builder from Go1.20.1 to Go1.20.2. See [the list of issues addressed in Go1.20.2](https://github.com/golang/go/issues?q=milestone%3AGo1.20.2+label%3ACherryPickApproved).
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): increase the default value for `--remote-read-http-timeout` command-line option from 30s (30 seconds) to 5m (5 minutes). This reduces the probability of timeout errors when migrating big number of time series. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3879).
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): migrate series one-by-one in [vm-native mode](https://docs.victoriametrics.com/vmctl.html#native-protocol). This allows better tracking the migration progress and resuming the migration process from the last migrated time series. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3859) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3600).
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add `--vm-native-src-headers` and `--vm-native-dst-headers` command-line flags, which can be used for setting custom HTTP headers during [vm-native migration mode](https://docs.victoriametrics.com/vmctl.html#native-protocol). Thanks to @baconmania for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3906).
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add `--vm-native-src-bearer-token` and `--vm-native-dst-bearer-token` command-line flags, which can be used for setting Bearer token headers for the source and the destination storage during [vm-native migration mode](https://docs.victoriametrics.com/vmctl.html#native-protocol). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3835).
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add `--vm-native-disable-http-keep-alive` command-line flag to allow `vmctl` to use non-persistent HTTP connections in [vm-native migration mode](https://docs.victoriametrics.com/vmctl.html#native-protocol). Thanks to @baconmania for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3909).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): log number of configration files found for each specified `-rule` command-line flag.
* FEATURE: [vmalert enterprise](https://docs.victoriametrics.com/vmalert.html): concurrently [read config files from S3, GCS or S3-compatible object storage](https://docs.victoriametrics.com/vmalert.html#reading-rules-from-object-storage). This significantly improves config load speed for cases when there are thousands of files to read from the object storage.
* BUGFIX: vmstorage: fix a bug, which could lead to incomplete or empty results for heavy queries selecting tens of thousands of time series. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3946).
* BUGFIX: vmselect: reduce memory usage and CPU usage when performing heavy queries. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3692).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): fix panic when [writing data to Kafka](https://docs.victoriametrics.com/vmagent.html#writing-metrics-to-kafka). The panic has been introduced in [v1.88.0](https://docs.victoriametrics.com/CHANGELOG.html#v1880).
* BUGFIX: prevent from possible `invalid memory address or nil pointer dereference` panic during [background merge](https://docs.victoriametrics.com/#storage). The issue has been introduced at [v1.85.0](https://docs.victoriametrics.com/CHANGELOG.html#v1850). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3897).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): stop showing `Please enter a valid Query and execute it` error message on the first load of vmui.
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly process `Run in VMUI` button click in [VictoriaMetrics datasource plugin for Grafana](https://github.com/VictoriaMetrics/grafana-datasource).
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the display of the selected value for dropdowns on `Explore` page.
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): fix `cannot serve http` panic when plain HTTP request is sent to `vmauth` configured to accept requests over [proxy protocol](https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt)-encoded request (e.g. when `vmauth` runs with `-httpListenAddr.useProxyProtocol` command-line flag). The issue has been introduced at [v1.87.0](https://docs.victoriametrics.com/CHANGELOG.html#v1870) when implementing [this feature](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3335).
* BUGFIX: [vmgateway](https://docs.victoriametrics.com/vmgateway.html): properly parsing RSA publick key discovered via JWK endpoint.
## [v1.88.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.88.1)

View file

@ -3,6 +3,7 @@
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
[![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics.svg?maxAge=604800)](https://hub.docker.com/r/victoriametrics/victoria-metrics)
[![victoriametrics](https://snapcraft.io/victoriametrics/badge.svg)](https://snapcraft.io/victoriametrics)
[![Slack](https://img.shields.io/badge/join%20slack-%23victoriametrics-brightgreen.svg)](https://slack.victoriametrics.com/)
[![GitHub license](https://img.shields.io/github/license/VictoriaMetrics/VictoriaMetrics.svg)](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/VictoriaMetrics)](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)

View file

@ -6,6 +6,7 @@ sort: 1
[![Latest Release](https://img.shields.io/github/release/VictoriaMetrics/VictoriaMetrics.svg?style=flat-square)](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest)
[![Docker Pulls](https://img.shields.io/docker/pulls/victoriametrics/victoria-metrics.svg?maxAge=604800)](https://hub.docker.com/r/victoriametrics/victoria-metrics)
[![victoriametrics](https://snapcraft.io/victoriametrics/badge.svg)](https://snapcraft.io/victoriametrics)
[![Slack](https://img.shields.io/badge/join%20slack-%23victoriametrics-brightgreen.svg)](https://slack.victoriametrics.com/)
[![GitHub license](https://img.shields.io/github/license/VictoriaMetrics/VictoriaMetrics.svg)](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/LICENSE)
[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/VictoriaMetrics)](https://goreportcard.com/report/github.com/VictoriaMetrics/VictoriaMetrics)

Binary file not shown.

After

Width:  |  Height:  |  Size: 449 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 490 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 444 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 335 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 355 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 351 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 436 KiB

View file

@ -50,7 +50,7 @@ Stream aggregation can be used in the following cases:
### Statsd alternative
Stream aggregation can be used as [statsd](https://github.com/statsd/statsd) altnernative in the following cases:
Stream aggregation can be used as [statsd](https://github.com/statsd/statsd) alternative in the following cases:
* [Counting input samples](#counting-input-samples)
* [Summing input metrics](#summing-input-metrics)
@ -60,8 +60,8 @@ Stream aggregation can be used as [statsd](https://github.com/statsd/statsd) alt
### Recording rules alternative
Sometimes [alerting queries](https://docs.victoriametrics.com/vmalert.html#alerting-rules) may require non-trivial amounts of CPU, RAM,
disk IO and network bandwith at metrics storage side. For example, if `http_request_duration_seconds` histogram is generated by thousands
of app instances, then the alerting query `histogram_quantile(0.99, sum(increase(http_request_duration_seconds_bucket[5m])) without (instance)) > 0.5`
disk IO and network bandwidth at metrics storage side. For example, if `http_request_duration_seconds` histogram is generated by thousands
of application instances, then the alerting query `histogram_quantile(0.99, sum(increase(http_request_duration_seconds_bucket[5m])) without (instance)) > 0.5`
can become slow, since it needs to scan too big number of unique [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series)
with `http_request_duration_seconds_bucket` name. This alerting query can be sped up by pre-calculating
the `sum(increase(http_request_duration_seconds_bucket[5m])) without (instance)` via [recording rule](https://docs.victoriametrics.com/vmalert.html#recording-rules).
@ -87,6 +87,8 @@ This query is executed much faster than the original query, because it needs to
See [the list of aggregate output](#aggregation-outputs), which can be specified at `output` field.
See also [aggregating by labels](#aggregating-by-labels).
Field `interval` is recommended to be set to a value at least several times higher than your metrics collect interval.
### Reducing the number of stored samples
@ -131,7 +133,7 @@ See also [aggregating by labels](#aggregating-by-labels).
### Reducing the number of stored series
Sometimes apps may generate too many [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series).
Sometimes applications may generate too many [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series).
For example, the `http_requests_total` metric may have `path` or `user` label with too big number of unique values.
In this case the following stream aggregation can be used for reducing the number metrics stored in VictoriaMetrics:
@ -156,7 +158,7 @@ See [the list of aggregate output](#aggregation-outputs), which can be specified
### Counting input samples
If the monitored app generates event-based metrics, then it may be useful to count the number of such metrics
If the monitored application generates event-based metrics, then it may be useful to count the number of such metrics
at stream aggregation level.
For example, if an advertising server generates `hits{some="labels"} 1` and `clicks{some="labels"} 1` metrics
@ -183,7 +185,7 @@ See also [aggregating by labels](#aggregating-by-labels).
### Summing input metrics
If the monitored app calulates some events and then sends the calculated number of events to VictoriaMetrics
If the monitored application calculates some events and then sends the calculated number of events to VictoriaMetrics
at irregular intervals or at too high frequency, then stream aggregation can be used for summing such events
and writing the aggregate sums to the storage at regular intervals.
@ -210,10 +212,10 @@ See also [aggregating by labels](#aggregating-by-labels).
### Quantiles over input metrics
If the monitored app generates measurement metrics per each request, then it may be useful to calculate
If the monitored application generates measurement metrics per each request, then it may be useful to calculate
the pre-defined set of [percentiles](https://en.wikipedia.org/wiki/Percentile) over these measurements.
For example, if the monitored app generates `request_duration_seconds N` and `response_size_bytes M` metrics
For example, if the monitored application generates `request_duration_seconds N` and `response_size_bytes M` metrics
per each incoming request, then the following [stream aggregation config](#stream-aggregation-config)
can be used for calculating 50th and 99th percentiles for these metrics every 30 seconds:
@ -238,10 +240,10 @@ See also [histograms over input metrics](#histograms-over-input-metrics) and [ag
### Histograms over input metrics
If the monitored app generates measurement metrics per each request, then it may be useful to calculate
If the monitored application generates measurement metrics per each request, then it may be useful to calculate
a [histogram](https://docs.victoriametrics.com/keyConcepts.html#histogram) over these metrics.
For example, if the monitored app generates `request_duration_seconds N` and `response_size_bytes M` metrics
For example, if the monitored application generates `request_duration_seconds N` and `response_size_bytes M` metrics
per each incoming request, then the following [stream aggregation config](#stream-aggregation-config)
can be used for calculating [VictoriaMetrics histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350)
for these metrics every 60 seconds:
@ -313,7 +315,7 @@ Output metric names for stream aggregation are constructed according to the foll
- `<output>` is the aggregate used for constucting the output metric. The aggregate name is taken from the `outputs` list
at the corresponding [stream aggregation config](#stream-aggregation-config).
Both input and ouput metric names can be modified if needed via relabeling according to [these docs](#relabeling).
Both input and output metric names can be modified if needed via relabeling according to [these docs](#relabeling).
## Relabeling
@ -334,35 +336,128 @@ For example, the following config removes the `:1m_sum_samples` suffix added [to
## Aggregation outputs
The following aggregation outputs can be put in the `outputs` list at [stream aggregation config](#stream-aggregation-config):
* `total` generates output [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) by summing the input counters.
The `total` handler properly handles input counter resets.
The `total` handler returns garbage when something other than [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) is passed to the input.
* `increase` returns the increase of input [counters](https://docs.victoriametrics.com/keyConcepts.html#counter).
The `increase` handler properly handles the input counter resets.
The `increase` handler returns garbage when something other than [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) is passed to the input.
* `count_series` counts the number of unique [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series).
* `count_samples` counts the number of input [samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
* `sum_samples` sums input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
* `last` returns the last input [sample value](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
* `min` returns the minimum input [sample value](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
* `max` returns the maximum input [sample value](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
* `avg` returns the average input [sample value](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
* `stddev` returns [standard deviation](https://en.wikipedia.org/wiki/Standard_deviation) for the input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
* `stdvar` returns [standard variance](https://en.wikipedia.org/wiki/Variance) for the input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
* `histogram_bucket` returns [VictoriaMetrics histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350)
for the input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
* `quantiles(phi1, ..., phiN)` returns [percentiles](https://en.wikipedia.org/wiki/Percentile) for the given `phi*`
over the input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The `phi` must be in the range `[0..1]`, where `0` means `0th` percentile, while `1` means `100th` percentile.
The aggregations are calculated during the `interval` specified in the [config](#stream-aggregation-config)
and then sent to the storage.
If `by` and `without` lists are specified in the [config](#stream-aggregation-config),
then the [aggregation by labels](#aggregating-by-labels) is performed additionally to aggregation by `interval`.
Below are aggregation functions that can be put in the `outputs` list at [stream aggregation config](#stream-aggregation-config).
### total
`total` generates output [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) by summing the input counters.
`total` only makes sense for aggregating [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) type metrics.
The results of `total` is equal to the `sum(some_counter)` query.
For example, see below time series produced by config with aggregation interval `1m` and `by: ["instance"]` and the regular query:
<img alt="total aggregation" src="stream-aggregation-check-total.png">
### increase
`increase` returns the increase of input [counters](https://docs.victoriametrics.com/keyConcepts.html#counter).
`increase` only makes sense for aggregating [counter](https://docs.victoriametrics.com/keyConcepts.html#counter) type metrics.
The results of `increase` with aggregation interval of `1m` is equal to the `increase(some_counter[1m])` query.
For example, see below time series produced by config with aggregation interval `1m` and `by: ["instance"]` and the regular query:
<img alt="increase aggregation" src="stream-aggregation-check-increase.png">
### count_series
`count_series` counts the number of unique [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series).
The results of `count_series` is equal to the `count(some_metric)` query.
### count_samples
`count_samples` counts the number of input [samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The results of `count_samples` with aggregation interval of `1m` is equal to the `count_over_time(some_metric[1m])` query.
### sum_samples
`sum_samples` sums input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The results of `sum_samples` with aggregation interval of `1m` is equal to the `sum_over_time(some_metric[1m])` query.
For example, see below time series produced by config with aggregation interval `1m` and the regular query:
<img alt="sum_samples aggregation" src="stream-aggregation-check-sum-samples.png">
### last
`last` returns the last input [sample value](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The results of `last` with aggregation interval of `1m` is equal to the `last_over_time(some_metric[1m])` query.
This aggregation output doesn't make much sense with `by` lists specified in the [config](#stream-aggregation-config).
The result of aggregation by labels in this case will be undetermined, because it depends on the order of processing the time series.
### min
`min` returns the minimum input [sample value](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The results of `min` with aggregation interval of `1m` is equal to the `min_over_time(some_metric[1m])` query.
For example, see below time series produced by config with aggregation interval `1m` and the regular query:
<img alt="min aggregation" src="stream-aggregation-check-min.png">
### max
`max` returns the maximum input [sample value](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The results of `max` with aggregation interval of `1m` is equal to the `max_over_time(some_metric[1m])` query.
For example, see below time series produced by config with aggregation interval `1m` and the regular query:
<img alt="total aggregation" src="stream-aggregation-check-max.png">
### avg
`avg` returns the average input [sample value](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The results of `avg` with aggregation interval of `1m` is equal to the `avg_over_time(some_metric[1m])` query.
For example, see below time series produced by config with aggregation interval `1m` and `by: ["instance"]` and the regular query:
<img alt="avg aggregation" src="stream-aggregation-check-avg.png">
### stddev
`stddev` returns [standard deviation](https://en.wikipedia.org/wiki/Standard_deviation) for the input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The results of `stddev` with aggregation interval of `1m` is equal to the `stddev_over_time(some_metric[1m])` query.
### stdvar
`stdvar` returns [standard variance](https://en.wikipedia.org/wiki/Variance) for the input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The results of `stdvar` with aggregation interval of `1m` is equal to the `stdvar_over_time(some_metric[1m])` query.
For example, see below time series produced by config with aggregation interval `1m` and the regular query:
<img alt="stdvar aggregation" src="stream-aggregation-check-stdvar.png">
### histogram_bucket
`histogram_bucket` returns [VictoriaMetrics histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350)
for the input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The results of `histogram_bucket` with aggregation interval of `1m` is equal to the `histogram_over_time(some_histogram_bucket[1m])` query.
### quantiles
`quantiles(phi1, ..., phiN)` returns [percentiles](https://en.wikipedia.org/wiki/Percentile) for the given `phi*`
over the input [sample values](https://docs.victoriametrics.com/keyConcepts.html#raw-samples).
The `phi` must be in the range `[0..1]`, where `0` means `0th` percentile, while `1` means `100th` percentile.
The results of `quantiles(phi1, ..., phiN)` with aggregation interval of `1m`
is equal to the `quantiles_over_time("quantile", phi1, ..., phiN, some_histogram_bucket[1m])` query.
## Aggregating by labels

View file

@ -625,6 +625,10 @@ can read the same rules configuration as normal, evaluate them on the given time
results via remote write to the configured storage. vmalert supports any PromQL/MetricsQL compatible
data source for backfilling.
Please note, that response caching may lead to unexpected results during and after backfilling process.
In order to avoid this you need to reset cache contents or disable caching when using backfilling
as described in [backfilling docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#backfilling).
See a blogpost about [Rules backfilling via vmalert](https://victoriametrics.com/blog/rules-replay/).
### How it works

View file

@ -792,6 +792,11 @@ Please note that each import request can load up to a single vCPU core on Victor
to allocated CPU resources of your VictoriaMetrics installation.
8. `vmctl` supports `--vm-native-src-headers` and `--vm-native-dst-headers` which defines headers to send with each request
to the corresponding source address.
9. `vmctl` supports `--vm-native-disable-http-keep-alive` to allow `vmctl` to use non-persistent HTTP connections to avoid
error `use of closed network connection` when run a longer export.
10. Migrating data with overlapping time range for destination data can produce duplicates series at destination.
To avoid duplicates on the destination set `-dedup.minScrapeInterval=1ms` for `vmselect` and `vmstorage`.
This will instruct `vmselect` and `vmstorage` to ignore duplicates with match timestamps.
In this mode `vmctl` acts as a proxy between two VM instances, where time series filtering is done by "source" (`src`)
and processing is done by "destination" (`dst`). So no extra memory or CPU resources required on `vmctl` side. Only

View file

@ -1184,7 +1184,7 @@ func (s *Storage) prefetchMetricNames(qt *querytracer.Tracer, srcMetricIDs []uin
}
}
})
if err != nil {
if err != nil && err != io.EOF {
return err
}
qt.Printf("pre-fetch metric names for %d metric ids", len(metricIDs))

View file

@ -1,4 +1,4 @@
GO_VERSION ?=1.20.1
GO_VERSION ?=1.20.2
SNAP_BUILDER_IMAGE := local/snap-builder:2.0.0-$(shell echo $(GO_VERSION) | tr :/ __)