mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-10 15:14:09 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
2ce4d04d8e
90 changed files with 444 additions and 305 deletions
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/csvimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/csvimport/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -25,7 +26,7 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return parser.ParseStream(req, func(rows []parser.Row) error {
|
||||
return stream.Parse(req, func(rows []parser.Row) error {
|
||||
return insertRows(at, rows, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadog"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadog/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -28,7 +29,7 @@ func InsertHandlerForHTTP(at *auth.Token, req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
ce := req.Header.Get("Content-Encoding")
|
||||
return parser.ParseStream(req.Body, ce, func(series []parser.Series) error {
|
||||
return stream.Parse(req.Body, ce, func(series []parser.Series) error {
|
||||
return insertRows(at, series, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -19,7 +20,7 @@ var (
|
|||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
func InsertHandler(r io.Reader) error {
|
||||
return parser.ParseStream(r, insertRows)
|
||||
return stream.Parse(r, insertRows)
|
||||
}
|
||||
|
||||
func insertRows(rows []parser.Row) error {
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -36,7 +37,7 @@ var (
|
|||
//
|
||||
// See https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener/
|
||||
func InsertHandlerForReader(r io.Reader, isGzipped bool) error {
|
||||
return parser.ParseStream(r, isGzipped, "", "", func(db string, rows []parser.Row) error {
|
||||
return stream.Parse(r, isGzipped, "", "", func(db string, rows []parser.Row) error {
|
||||
return insertRows(nil, db, rows, nil)
|
||||
})
|
||||
}
|
||||
|
@ -54,7 +55,7 @@ func InsertHandlerForHTTP(at *auth.Token, req *http.Request) error {
|
|||
precision := q.Get("precision")
|
||||
// Read db tag from https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint
|
||||
db := q.Get("db")
|
||||
return parser.ParseStream(req.Body, isGzipped, precision, db, func(db string, rows []parser.Row) error {
|
||||
return stream.Parse(req.Body, isGzipped, precision, db, func(db string, rows []parser.Row) error {
|
||||
return insertRows(at, db, rows, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -30,12 +30,12 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
isGzip := req.Header.Get("Content-Encoding") == "gzip"
|
||||
return parser.ParseStream(req.Body, isGzip, func(block *parser.Block) error {
|
||||
return stream.Parse(req.Body, isGzip, func(block *stream.Block) error {
|
||||
return insertRows(at, block, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(at *auth.Token, block *parser.Block, extraLabels []prompbmarshal.Label) error {
|
||||
func insertRows(at *auth.Token, block *stream.Block, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := common.GetPushCtx()
|
||||
defer common.PutPushCtx(ctx)
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdb/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -19,7 +20,7 @@ var (
|
|||
//
|
||||
// See http://opentsdb.net/docs/build/html/api_telnet/put.html
|
||||
func InsertHandler(r io.Reader) error {
|
||||
return parser.ParseStream(r, insertRows)
|
||||
return stream.Parse(r, insertRows)
|
||||
}
|
||||
|
||||
func insertRows(rows []parser.Row) error {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdbhttp"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdbhttp/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -24,7 +25,7 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return parser.ParseStream(req, func(rows []parser.Row) error {
|
||||
return stream.Parse(req, func(rows []parser.Row) error {
|
||||
return insertRows(at, rows, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -31,7 +32,7 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
return parser.ParseStream(req.Body, defaultTimestamp, isGzipped, func(rows []parser.Row) error {
|
||||
return stream.Parse(req.Body, defaultTimestamp, isGzipped, func(rows []parser.Row) error {
|
||||
return insertRows(at, rows, extraLabels)
|
||||
}, func(s string) {
|
||||
httpserver.LogError(req, s)
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/promremotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/promremotewrite/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -27,7 +27,7 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return parser.ParseStream(req.Body, func(tss []prompb.TimeSeries) error {
|
||||
return stream.Parse(req.Body, func(tss []prompb.TimeSeries) error {
|
||||
return insertRows(at, tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -30,7 +31,7 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
return parser.ParseStream(req.Body, isGzipped, func(rows []parser.Row) error {
|
||||
return stream.Parse(req.Body, isGzipped, func(rows []parser.Row) error {
|
||||
return insertRows(at, rows, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -725,31 +725,42 @@ a review to the dashboard.
|
|||
|
||||
## Troubleshooting
|
||||
|
||||
vmalert executes configured rules within certain intervals. It is expected that at the moment when rule is executed,
|
||||
the data is already present in configured `-datasource.url`:
|
||||
### Data delay
|
||||
|
||||
Data delay is one of the most common issues with rules execution.
|
||||
vmalert executes configured rules within certain intervals at specifics timestamps.
|
||||
It expects that the data is already present in configured `-datasource.url` at the moment of time when rule is executed:
|
||||
|
||||
<img alt="vmalert expected evaluation" src="vmalert_ts_normal.gif">
|
||||
|
||||
Usually, troubles start to appear when data in `-datasource.url` is delayed or absent. In such cases, evaluations
|
||||
may get empty response from datasource and produce empty recording rules or reset alerts state:
|
||||
may get empty response from the datasource and produce empty recording rules or reset alerts state:
|
||||
|
||||
<img alt="vmalert evaluation when data is delayed" src="vmalert_ts_data_delay.gif">
|
||||
|
||||
By default, recently written samples to VictoriaMetrics aren't visible for queries for up to 30s.
|
||||
This behavior is controlled by `-search.latencyOffset` command-line flag and the `latency_offset` query ag at `vmselect`.
|
||||
Usually, this results into a 30s shift for recording rules results.
|
||||
Note that too small value passed to `-search.latencyOffset` or to `latency_offest` query arg may lead to incomplete query results.
|
||||
Try the following recommendations to reduce the chance of hitting the data delay issue:
|
||||
|
||||
Try the following recommendations in such cases:
|
||||
|
||||
* Always configure group's `evaluationInterval` to be bigger or equal to `scrape_interval` at which metrics
|
||||
are delivered to the datasource;
|
||||
* Always configure group's `evaluationInterval` to be bigger or at least equal to
|
||||
[time series resolution](https://docs.victoriametrics.com/keyConcepts.html#time-series-resolution);
|
||||
* Ensure that `[duration]` value is at least twice bigger than
|
||||
[time series resolution](https://docs.victoriametrics.com/keyConcepts.html#time-series-resolution). For example,
|
||||
if expression is `rate(my_metric[2m]) > 0` then ensure that `my_metric` resolution is at least `1m` or better `30s`.
|
||||
If you use VictoriaMetrics as datasource, `[duration]` can be omitted and VictoriaMetrics will adjust it automatically.
|
||||
* If you know in advance, that data in datasource is delayed - try changing vmalert's `-datasource.lookback`
|
||||
command-line flag to add a time shift for evaluations;
|
||||
* If time intervals between datapoints in datasource are irregular or `>=5min` - try changing vmalert's
|
||||
`-datasource.queryStep` command-line flag to specify how far search query can lookback for the recent datapoint.
|
||||
The recommendation is to have the step at least two times bigger than `scrape_interval`, since
|
||||
there are no guarantees that scrape will not fail.
|
||||
command-line flag to add a time shift for evaluations. Or extend `[duration]` to tolerate the delay.
|
||||
For example, `max_over_time(errors_total[10m]) > 0` will be active even if there is no data in datasource for last `9m`.
|
||||
* If [time series resolution](https://docs.victoriametrics.com/keyConcepts.html#time-series-resolution)
|
||||
in datasource is inconsistent or `>=5min` - try changing vmalert's `-datasource.queryStep` command-line flag to specify
|
||||
how far search query can lookback for the recent datapoint. The recommendation is to have the step
|
||||
at least two times bigger than the resolution.
|
||||
|
||||
> Please note, data delay is inevitable in distributed systems. And it is better to account for it instead of ignoring.
|
||||
|
||||
By default, recently written samples to VictoriaMetrics aren't visible for queries for up to 30s
|
||||
(see `-search.latencyOffset` command-line flag at vmselect). Such delay is needed to eliminate risk of incomplete
|
||||
data on the moment of querying, since metrics collectors won't be able to deliver the data in time.
|
||||
|
||||
### Alerts state
|
||||
|
||||
Sometimes, it is not clear why some specific alert fired or didn't fire. It is very important to remember, that
|
||||
alerts with `for: 0` fire immediately when their expression becomes true. And alerts with `for > 0` will fire only
|
||||
|
@ -772,6 +783,8 @@ HTTP request sent by vmalert to the `-datasource.url` during evaluation. If spec
|
|||
no samples returned and curl command returns data - then it is very likely there was no data in datasource on the
|
||||
moment when rule was evaluated.
|
||||
|
||||
### Debug mode
|
||||
|
||||
vmalert allows configuring more detailed logging for specific alerting rule. Just set `debug: true` in rule's configuration
|
||||
and vmalert will start printing additional log messages:
|
||||
```terminal
|
||||
|
|
|
@ -41,7 +41,7 @@ type Alert struct {
|
|||
LastSent time.Time
|
||||
// Value stores the value returned from evaluating expression from Expr field
|
||||
Value float64
|
||||
// ID is the unique identifer for the Alert
|
||||
// ID is the unique identifier for the Alert
|
||||
ID uint64
|
||||
// Restored is true if Alert was restored after restart
|
||||
Restored bool
|
||||
|
|
|
@ -29,7 +29,7 @@ type Config struct {
|
|||
// ConsulSDConfigs contains list of settings for service discovery via Consul
|
||||
// see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
|
||||
ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
|
||||
// DNSSDConfigs ontains list of settings for service discovery via DNS.
|
||||
// DNSSDConfigs contains list of settings for service discovery via DNS.
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config
|
||||
DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs,omitempty"`
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ func templateFuncs() textTpl.FuncMap {
|
|||
"toLower": strings.ToLower,
|
||||
|
||||
// crlfEscape replaces '\n' and '\r' chars with `\\n` and `\\r`.
|
||||
// This funcion is deprectated.
|
||||
// This function is deprecated.
|
||||
//
|
||||
// It is better to use quotesEscape, jsonEscape, queryEscape or pathEscape instead -
|
||||
// these functions properly escape `\n` and `\r` chars according to their purpose.
|
||||
|
|
|
@ -110,7 +110,7 @@ type SrcPath struct {
|
|||
re *regexp.Regexp
|
||||
}
|
||||
|
||||
// URLPrefix represents pased `url_prefix`
|
||||
// URLPrefix represents passed `url_prefix`
|
||||
type URLPrefix struct {
|
||||
n uint32
|
||||
bus []*backendURL
|
||||
|
|
|
@ -50,7 +50,7 @@ func normalizeURL(uOrig *url.URL) *url.URL {
|
|||
// Prevent from attacks with using `..` in r.URL.Path
|
||||
u.Path = path.Clean(u.Path)
|
||||
if !strings.HasSuffix(u.Path, "/") && strings.HasSuffix(uOrig.Path, "/") {
|
||||
// The path.Clean() removes traling slash.
|
||||
// The path.Clean() removes trailing slash.
|
||||
// Return it back if needed.
|
||||
// This should fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1752
|
||||
u.Path += "/"
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native/stream"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -247,7 +247,7 @@ func main() {
|
|||
return cli.Exit(fmt.Errorf("cannot open exported block at path=%q err=%w", blockPath, err), 1)
|
||||
}
|
||||
var blocksCount uint64
|
||||
if err := parser.ParseStream(f, isBlockGzipped, func(block *parser.Block) error {
|
||||
if err := stream.Parse(f, isBlockGzipped, func(block *stream.Block) error {
|
||||
atomic.AddUint64(&blocksCount, 1)
|
||||
return nil
|
||||
}); err != nil {
|
||||
|
|
|
@ -146,7 +146,7 @@ func (ctx *InsertCtx) FlushBufs() error {
|
|||
}
|
||||
// There is no need in limiting the number of concurrent calls to vmstorage.AddRows() here,
|
||||
// since the number of concurrent FlushBufs() calls should be already limited via writeconcurrencylimiter
|
||||
// used at every ParseStream() call under lib/protoparser/*/streamparser.go
|
||||
// used at every stream.Parse() call under lib/protoparser/*
|
||||
err := vmstorage.AddRows(ctx.mrs)
|
||||
ctx.Reset(0)
|
||||
if err == nil {
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/csvimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/csvimport/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -22,7 +23,7 @@ func InsertHandler(req *http.Request) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return parser.ParseStream(req, func(rows []parser.Row) error {
|
||||
return stream.Parse(req, func(rows []parser.Row) error {
|
||||
return insertRows(rows, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadog"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadog/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -25,7 +26,7 @@ func InsertHandlerForHTTP(req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
ce := req.Header.Get("Content-Encoding")
|
||||
return parser.ParseStream(req.Body, ce, func(series []parser.Series) error {
|
||||
return stream.Parse(req.Body, ce, func(series []parser.Series) error {
|
||||
return insertRows(series, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -18,7 +19,7 @@ var (
|
|||
//
|
||||
// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol
|
||||
func InsertHandler(r io.Reader) error {
|
||||
return parser.ParseStream(r, insertRows)
|
||||
return stream.Parse(r, insertRows)
|
||||
}
|
||||
|
||||
func insertRows(rows []parser.Row) error {
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -34,7 +35,7 @@ var (
|
|||
//
|
||||
// See https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener/
|
||||
func InsertHandlerForReader(r io.Reader) error {
|
||||
return parser.ParseStream(r, false, "", "", func(db string, rows []parser.Row) error {
|
||||
return stream.Parse(r, false, "", "", func(db string, rows []parser.Row) error {
|
||||
return insertRows(db, rows, nil)
|
||||
})
|
||||
}
|
||||
|
@ -52,7 +53,7 @@ func InsertHandlerForHTTP(req *http.Request) error {
|
|||
precision := q.Get("precision")
|
||||
// Read db tag from https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint
|
||||
db := q.Get("db")
|
||||
return parser.ParseStream(req.Body, isGzipped, precision, db, func(db string, rows []parser.Row) error {
|
||||
return stream.Parse(req.Body, isGzipped, precision, db, func(db string, rows []parser.Row) error {
|
||||
return insertRows(db, rows, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -27,12 +27,12 @@ func InsertHandler(req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
isGzip := req.Header.Get("Content-Encoding") == "gzip"
|
||||
return parser.ParseStream(req.Body, isGzip, func(block *parser.Block) error {
|
||||
return stream.Parse(req.Body, isGzip, func(block *stream.Block) error {
|
||||
return insertRows(block, extraLabels)
|
||||
})
|
||||
}
|
||||
|
||||
func insertRows(block *parser.Block, extraLabels []prompbmarshal.Label) error {
|
||||
func insertRows(block *stream.Block, extraLabels []prompbmarshal.Label) error {
|
||||
ctx := getPushCtx()
|
||||
defer putPushCtx(ctx)
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdb/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -18,7 +19,7 @@ var (
|
|||
//
|
||||
// See http://opentsdb.net/docs/build/html/api_telnet/put.html
|
||||
func InsertHandler(r io.Reader) error {
|
||||
return parser.ParseStream(r, insertRows)
|
||||
return stream.Parse(r, insertRows)
|
||||
}
|
||||
|
||||
func insertRows(rows []parser.Row) error {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdbhttp"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdbhttp/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -27,7 +28,7 @@ func InsertHandler(req *http.Request) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return parser.ParseStream(req, func(rows []parser.Row) error {
|
||||
return stream.Parse(req, func(rows []parser.Row) error {
|
||||
return insertRows(rows, extraLabels)
|
||||
})
|
||||
default:
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -28,7 +29,7 @@ func InsertHandler(req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
return parser.ParseStream(req.Body, defaultTimestamp, isGzipped, func(rows []parser.Row) error {
|
||||
return stream.Parse(req.Body, defaultTimestamp, isGzipped, func(rows []parser.Row) error {
|
||||
return insertRows(rows, extraLabels)
|
||||
}, func(s string) {
|
||||
httpserver.LogError(req, s)
|
||||
|
|
|
@ -21,7 +21,7 @@ func Push(wr *prompbmarshal.WriteRequest) {
|
|||
|
||||
tss := wr.Timeseries
|
||||
for len(tss) > 0 {
|
||||
// Process big tss in smaller blocks in order to reduce maxmimum memory usage
|
||||
// Process big tss in smaller blocks in order to reduce maximum memory usage
|
||||
samplesCount := 0
|
||||
i := 0
|
||||
for i < len(tss) {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/promremotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/promremotewrite/stream"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
|
@ -23,7 +23,7 @@ func InsertHandler(req *http.Request) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return parser.ParseStream(req.Body, func(tss []prompb.TimeSeries) error {
|
||||
return stream.Parse(req.Body, func(tss []prompb.TimeSeries) error {
|
||||
return insertRows(tss, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -29,7 +30,7 @@ func InsertHandler(req *http.Request) error {
|
|||
return err
|
||||
}
|
||||
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
||||
return parser.ParseStream(req.Body, isGzipped, func(rows []parser.Row) error {
|
||||
return stream.Parse(req.Body, isGzipped, func(rows []parser.Row) error {
|
||||
return insertRows(rows, extraLabels)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ func (p *parser) parseString() (*StringExpr, error) {
|
|||
return se, nil
|
||||
}
|
||||
|
||||
// StringExpr represents string contant.
|
||||
// StringExpr represents string constant.
|
||||
type StringExpr struct {
|
||||
// S contains unquoted string contents.
|
||||
S string
|
||||
|
|
|
@ -194,7 +194,7 @@ func getTmpResult() *result {
|
|||
func putTmpResult(r *result) {
|
||||
currentTime := fasttime.UnixTimestamp()
|
||||
if cap(r.rs.Values) > 1024*1024 && 4*len(r.rs.Values) < cap(r.rs.Values) && currentTime-r.lastResetTime > 10 {
|
||||
// Reset r.rs in order to preseve memory usage after processing big time series with millions of rows.
|
||||
// Reset r.rs in order to preserve memory usage after processing big time series with millions of rows.
|
||||
r.rs = Result{}
|
||||
r.lastResetTime = currentTime
|
||||
}
|
||||
|
@ -1015,7 +1015,7 @@ func TagValueSuffixes(qt *querytracer.Tracer, tr storage.TimeRange, tagKey, tagV
|
|||
|
||||
// TSDBStatus returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
|
||||
//
|
||||
// It accepts aribtrary filters on time series in sq.
|
||||
// It accepts arbitrary filters on time series in sq.
|
||||
func TSDBStatus(qt *querytracer.Tracer, sq *storage.SearchQuery, focusLabel string, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
|
||||
qt = qt.NewChild("get tsdb stats: %s, focusLabel=%q, topN=%d", sq, focusLabel, topN)
|
||||
defer qt.Done()
|
||||
|
|
|
@ -901,7 +901,7 @@ func quantileSorted(phi float64, values []float64) float64 {
|
|||
func aggrFuncMAD(tss []*timeseries) []*timeseries {
|
||||
// Calculate medians for each point across tss.
|
||||
medians := getPerPointMedians(tss)
|
||||
// Calculate MAD values multipled by tolerance for each point across tss.
|
||||
// Calculate MAD values multiplied by tolerance for each point across tss.
|
||||
// See https://en.wikipedia.org/wiki/Median_absolute_deviation
|
||||
mads := getPerPointMADs(tss, medians)
|
||||
tss[0].Values = append(tss[0].Values[:0], mads...)
|
||||
|
@ -920,7 +920,7 @@ func aggrFuncOutliersMAD(afa *aggrFuncArg) ([]*timeseries, error) {
|
|||
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||
// Calculate medians for each point across tss.
|
||||
medians := getPerPointMedians(tss)
|
||||
// Calculate MAD values multipled by tolerance for each point across tss.
|
||||
// Calculate MAD values multiplied by tolerance for each point across tss.
|
||||
// See https://en.wikipedia.org/wiki/Median_absolute_deviation
|
||||
mads := getPerPointMADs(tss, medians)
|
||||
for n := range mads {
|
||||
|
|
|
@ -466,7 +466,7 @@ func execBinaryOpArgs(qt *querytracer.Tracer, ec *EvalConfig, exprFirst, exprSec
|
|||
// 1) execute the exprFirst
|
||||
// 2) get common label filters for series returned at step 1
|
||||
// 3) push down the found common label filters to exprSecond. This filters out unneeded series
|
||||
// during exprSecond exection instead of spending compute resources on extracting and processing these series
|
||||
// during exprSecond execution instead of spending compute resources on extracting and processing these series
|
||||
// before they are dropped later when matching time series according to https://prometheus.io/docs/prometheus/latest/querying/operators/#vector-matching
|
||||
// 4) execute the exprSecond with possible additional filters found at step 3
|
||||
//
|
||||
|
|
|
@ -385,7 +385,7 @@ func getRollupFunc(funcName string) newRollupFunc {
|
|||
}
|
||||
|
||||
type rollupFuncArg struct {
|
||||
// The value preceeding values if it fits staleness interval.
|
||||
// The value preceding values if it fits staleness interval.
|
||||
prevValue float64
|
||||
|
||||
// The timestamp for prevValue.
|
||||
|
@ -397,7 +397,7 @@ type rollupFuncArg struct {
|
|||
// Timestamps for values.
|
||||
timestamps []int64
|
||||
|
||||
// Real value preceeding values without restrictions on staleness interval.
|
||||
// Real value preceding values without restrictions on staleness interval.
|
||||
realPrevValue float64
|
||||
|
||||
// Real value which goes after values.
|
||||
|
@ -587,7 +587,7 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
|||
if window <= 0 {
|
||||
window = rc.Step
|
||||
if rc.MayAdjustWindow && window < maxPrevInterval {
|
||||
// Adjust lookbehind window only if it isn't set explicilty, e.g. rate(foo).
|
||||
// Adjust lookbehind window only if it isn't set explicitly, e.g. rate(foo).
|
||||
// In the case of missing lookbehind window it should be adjusted in order to return non-empty graph
|
||||
// when the window doesn't cover at least two raw samples (this is what most users expect).
|
||||
//
|
||||
|
|
|
@ -25,6 +25,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): show `median` instead of `avg` in graph tooltip and line legend, since `median` is more tolerant against spikes. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3706).
|
||||
|
||||
* BUGFIX: prevent from possible data ingestion slowdown and query performance slowdown during [background merges of big parts](https://docs.victoriametrics.com/#storage) on systems with small number of CPU cores (1 or 2 CPU cores). The issue has been introduced in [v1.85.0](https://docs.victoriametrics.com/CHANGELOG.html#v1850) when implementing [this feature](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3337). See also [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3790).
|
||||
* BUGFIX: properly parse timestamps in milliseconds when [ingesting data via OpenTSDB telnet put protocol](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol). Previously timestamps in milliseconds were mistakenly multiplied by 1000. Thanks to @Droxenator for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3810).
|
||||
|
||||
## [v1.87.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.1)
|
||||
|
||||
|
|
|
@ -79,6 +79,30 @@ requests_total{path="/", code="200"} 123 4567890
|
|||
- The `4567890` is an optional timestamp for the sample. If it is missing,
|
||||
then the current timestamp is used when storing the sample in VictoriaMetrics.
|
||||
|
||||
#### Time series resolution
|
||||
|
||||
Resolution is the minimum interval between [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples)
|
||||
of the [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series). Consider the following example:
|
||||
```
|
||||
----------------------------------------------------------------------
|
||||
| <time series> | <value> | <timestamp> |
|
||||
| requests_total{path="/health", code="200"} | 1 | 1676297640 |
|
||||
| requests_total{path="/health", code="200"} | 2 | 1676297670 |
|
||||
| requests_total{path="/health", code="200"} | 3 | 1676297700 |
|
||||
| requests_total{path="/health", code="200"} | 4 | 1676297730 |
|
||||
....
|
||||
```
|
||||
Here we have a time series `requests_total{path="/health", code="200"}` which has a value update each `30s`.
|
||||
This means, its resolution is also a `30s`.
|
||||
|
||||
> In terms of [pull model](https://docs.victoriametrics.com/keyConcepts.html#pull-model), resolution is equal
|
||||
> to `scrape_interval` and is controlled by the monitoring system (server).
|
||||
> For [push model](https://docs.victoriametrics.com/keyConcepts.html#push-model), resolution is an interval between
|
||||
> samples timestamps and is controlled by a client (metrics collector).
|
||||
|
||||
Try to keep time series resolution consistent, since some [MetricsQL](#metricsql) functions may expect it to be so.
|
||||
|
||||
|
||||
### Types of metrics
|
||||
|
||||
Internally, VictoriaMetrics does not have the notion of a metric type. The concept of a metric
|
||||
|
|
|
@ -4,37 +4,19 @@ sort: 28
|
|||
|
||||
# Managed VictoriaMetrics
|
||||
|
||||
VictoriaMetrics is a fast and easy-to-use monitoring solution and time series database.
|
||||
It integrates well with existing monitoring systems such as Grafana, Prometheus, Graphite,
|
||||
InfluxDB, OpenTSDB and DataDog - see [these docs](https://docs.victoriametrics.com/#how-to-import-time-series-data) for details.
|
||||
Product information:
|
||||
|
||||
The most common use cases for VictoriaMetrics are:
|
||||
* Long-term remote storage for Prometheus;
|
||||
* More efficient drop-in replacement for Prometheus and Graphite
|
||||
* Replacement for InfluxDB and OpenTSDB, which uses lower amounts of RAM, CPU and disk;
|
||||
* Cost-efficient alternative for DataDog.
|
||||
* [Overview of Managed VictoriaMetrics](/managed-victoriametrics/overview.html)
|
||||
* [User Management](/managed-victoriametrics/user-managment.html)
|
||||
|
||||
We are glad to announce the availability of Managed VictoriaMetrics
|
||||
at AWS Marketplace - [try it right now](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc)!
|
||||
Guides:
|
||||
|
||||
Managed VictoriaMetrics allows users running VictoriaMetrics at AWS without the need to perform typical
|
||||
DevOps tasks such as proper configuration, monitoring, logs collection, access protection, software updates,
|
||||
backups, etc.
|
||||
* [Quick Start](/managed-victoriametrics/quickstart.html)
|
||||
* [Kubernetes Monitoring with Managed VictoriaMetrics](/managed-victoriametrics/how-to-monitor-k8s.html)
|
||||
* [Understand Your Setup Size](/guides/understand-your-setup-size.html)
|
||||
|
||||
We run Managed VictoriaMetrics instances in our environment at AWS while providing easy-to-use endpoints
|
||||
for data ingestion and querying. And the VictoriaMetrics team takes care of optimal configuration and software
|
||||
maintenance.
|
||||
|
||||
Managed VictoriaMetrics comes with the following features:
|
||||
|
||||
* It can be used as a Managed Prometheus - just configure Prometheus or vmagent to write data to Managed VictoriaMetrics and then use the provided endpoint as a Prometheus datasource in Grafana;
|
||||
* Every Managed VictoriaMetrics instance runs in an isolated environment, so instances cannot interfere with each other;
|
||||
* Managed VictoriaMetrics instance can be scaled up or scaled down in a few clicks;
|
||||
* Automated backups;
|
||||
* Pay only for the actually used compute resources - instance type, disk and network.
|
||||
|
||||
See more about Managed VictoriaMetrics in the following articles:
|
||||
* [Managed VictoriaMetrics announcement](https://victoriametrics.com/blog/managed-victoriametrics-announcement)
|
||||
* [Pricing comparison for Managed Prometheus](https://victoriametrics.com/blog/managed-prometheus-pricing/)
|
||||
* [Monitoring Proxmox VE via Managed VictoriaMetrics and vmagent](https://victoriametrics.com/blog/proxmox-monitoring-with-dbaas/)
|
||||
|
||||
|
|
|
@ -1,3 +1,12 @@
|
|||
---
|
||||
sort: 3
|
||||
weight: 3
|
||||
title: Kubernetes Monitoring with Managed VictoriaMetrics
|
||||
menu:
|
||||
docs:
|
||||
parent: "managed"
|
||||
weight: 3
|
||||
---
|
||||
# Kubernetes Monitoring with Managed VictoriaMetrics
|
||||
|
||||
Monitoring kubernetes cluster is necessary to build SLO/SLI, to analyze performance and cost-efficiency of your workloads.
|
||||
|
|
40
docs/managed-victoriametrics/overview.md
Normal file
40
docs/managed-victoriametrics/overview.md
Normal file
|
@ -0,0 +1,40 @@
|
|||
---
|
||||
sort: 1
|
||||
weight: 1
|
||||
title: Overview of Managed VictoriaMetrics
|
||||
menu:
|
||||
docs:
|
||||
parent: "managed"
|
||||
weight: 1
|
||||
---
|
||||
|
||||
# Overview of Managed VictoriaMetrics
|
||||
|
||||
VictoriaMetrics is a fast and easy-to-use monitoring solution and time series database.
|
||||
It integrates well with existing monitoring systems such as Grafana, Prometheus, Graphite,
|
||||
InfluxDB, OpenTSDB and DataDog - see [these docs](https://docs.victoriametrics.com/#how-to-import-time-series-data) for details.
|
||||
|
||||
The most common use cases for VictoriaMetrics are:
|
||||
* Long-term remote storage for Prometheus;
|
||||
* More efficient drop-in replacement for Prometheus and Graphite
|
||||
* Replacement for InfluxDB and OpenTSDB, which uses lower amounts of RAM, CPU and disk;
|
||||
* Cost-efficient alternative for DataDog.
|
||||
|
||||
We are glad to announce the availability of Managed VictoriaMetrics
|
||||
at AWS Marketplace - [try it right now](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc)!
|
||||
|
||||
Managed VictoriaMetrics allows users running VictoriaMetrics at AWS without the need to perform typical
|
||||
DevOps tasks such as proper configuration, monitoring, logs collection, access protection, software updates,
|
||||
backups, etc.
|
||||
|
||||
We run Managed VictoriaMetrics instances in our environment at AWS while providing easy-to-use endpoints
|
||||
for data ingestion and querying. And the VictoriaMetrics team takes care of optimal configuration and software
|
||||
maintenance.
|
||||
|
||||
Managed VictoriaMetrics comes with the following features:
|
||||
|
||||
* It can be used as a Managed Prometheus - just configure Prometheus or vmagent to write data to Managed VictoriaMetrics and then use the provided endpoint as a Prometheus datasource in Grafana;
|
||||
* Every Managed VictoriaMetrics instance runs in an isolated environment, so instances cannot interfere with each other;
|
||||
* Managed VictoriaMetrics instance can be scaled up or scaled down in a few clicks;
|
||||
* Automated backups;
|
||||
* Pay only for the actually used compute resources - instance type, disk and network.
|
|
@ -1,3 +1,12 @@
|
|||
---
|
||||
sort: 2
|
||||
weight: 2
|
||||
title: Quick Start
|
||||
menu:
|
||||
docs:
|
||||
parent: "managed"
|
||||
weight: 2
|
||||
---
|
||||
# Quick Start
|
||||
|
||||
Managed VictoriaMetrics - is a database-as-a-service platform, where users can run the VictoriaMetrics
|
||||
|
|
|
@ -1,3 +1,12 @@
|
|||
---
|
||||
sort: 4
|
||||
weight: 4
|
||||
title: User Management in Managed VictoriaMetrics
|
||||
menu:
|
||||
docs:
|
||||
parent: "managed"
|
||||
weight: 4
|
||||
---
|
||||
# User Management in Managed VictoriaMetrics
|
||||
|
||||
The user management system enables admins to control user access and onboard and offboard users to the Managed VictoriaMetrics. It organizes users according to their needs and role.
|
||||
|
|
|
@ -729,31 +729,42 @@ a review to the dashboard.
|
|||
|
||||
## Troubleshooting
|
||||
|
||||
vmalert executes configured rules within certain intervals. It is expected that at the moment when rule is executed,
|
||||
the data is already present in configured `-datasource.url`:
|
||||
### Data delay
|
||||
|
||||
Data delay is one of the most common issues with rules execution.
|
||||
vmalert executes configured rules within certain intervals at specifics timestamps.
|
||||
It expects that the data is already present in configured `-datasource.url` at the moment of time when rule is executed:
|
||||
|
||||
<img alt="vmalert expected evaluation" src="vmalert_ts_normal.gif">
|
||||
|
||||
Usually, troubles start to appear when data in `-datasource.url` is delayed or absent. In such cases, evaluations
|
||||
may get empty response from datasource and produce empty recording rules or reset alerts state:
|
||||
may get empty response from the datasource and produce empty recording rules or reset alerts state:
|
||||
|
||||
<img alt="vmalert evaluation when data is delayed" src="vmalert_ts_data_delay.gif">
|
||||
|
||||
By default, recently written samples to VictoriaMetrics aren't visible for queries for up to 30s.
|
||||
This behavior is controlled by `-search.latencyOffset` command-line flag and the `latency_offset` query ag at `vmselect`.
|
||||
Usually, this results into a 30s shift for recording rules results.
|
||||
Note that too small value passed to `-search.latencyOffset` or to `latency_offest` query arg may lead to incomplete query results.
|
||||
Try the following recommendations to reduce the chance of hitting the data delay issue:
|
||||
|
||||
Try the following recommendations in such cases:
|
||||
|
||||
* Always configure group's `evaluationInterval` to be bigger or equal to `scrape_interval` at which metrics
|
||||
are delivered to the datasource;
|
||||
* Always configure group's `evaluationInterval` to be bigger or at least equal to
|
||||
[time series resolution](https://docs.victoriametrics.com/keyConcepts.html#time-series-resolution);
|
||||
* Ensure that `[duration]` value is at least twice bigger than
|
||||
[time series resolution](https://docs.victoriametrics.com/keyConcepts.html#time-series-resolution). For example,
|
||||
if expression is `rate(my_metric[2m]) > 0` then ensure that `my_metric` resolution is at least `1m` or better `30s`.
|
||||
If you use VictoriaMetrics as datasource, `[duration]` can be omitted and VictoriaMetrics will adjust it automatically.
|
||||
* If you know in advance, that data in datasource is delayed - try changing vmalert's `-datasource.lookback`
|
||||
command-line flag to add a time shift for evaluations;
|
||||
* If time intervals between datapoints in datasource are irregular or `>=5min` - try changing vmalert's
|
||||
`-datasource.queryStep` command-line flag to specify how far search query can lookback for the recent datapoint.
|
||||
The recommendation is to have the step at least two times bigger than `scrape_interval`, since
|
||||
there are no guarantees that scrape will not fail.
|
||||
command-line flag to add a time shift for evaluations. Or extend `[duration]` to tolerate the delay.
|
||||
For example, `max_over_time(errors_total[10m]) > 0` will be active even if there is no data in datasource for last `9m`.
|
||||
* If [time series resolution](https://docs.victoriametrics.com/keyConcepts.html#time-series-resolution)
|
||||
in datasource is inconsistent or `>=5min` - try changing vmalert's `-datasource.queryStep` command-line flag to specify
|
||||
how far search query can lookback for the recent datapoint. The recommendation is to have the step
|
||||
at least two times bigger than the resolution.
|
||||
|
||||
> Please note, data delay is inevitable in distributed systems. And it is better to account for it instead of ignoring.
|
||||
|
||||
By default, recently written samples to VictoriaMetrics aren't visible for queries for up to 30s
|
||||
(see `-search.latencyOffset` command-line flag at vmselect). Such delay is needed to eliminate risk of incomplete
|
||||
data on the moment of querying, since metrics collectors won't be able to deliver the data in time.
|
||||
|
||||
### Alerts state
|
||||
|
||||
Sometimes, it is not clear why some specific alert fired or didn't fire. It is very important to remember, that
|
||||
alerts with `for: 0` fire immediately when their expression becomes true. And alerts with `for > 0` will fire only
|
||||
|
@ -776,6 +787,8 @@ HTTP request sent by vmalert to the `-datasource.url` during evaluation. If spec
|
|||
no samples returned and curl command returns data - then it is very likely there was no data in datasource on the
|
||||
moment when rule was evaluated.
|
||||
|
||||
### Debug mode
|
||||
|
||||
vmalert allows configuring more detailed logging for specific alerting rule. Just set `debug: true` in rule's configuration
|
||||
and vmalert will start printing additional log messages:
|
||||
```terminal
|
||||
|
|
|
@ -239,7 +239,7 @@ func (cfg *Config) getAPICredentials() (*credentials, error) {
|
|||
}
|
||||
|
||||
// getECSRoleCredentialsByPath makes request to ecs metadata service
|
||||
// and retrieves instances credentails
|
||||
// and retrieves instances credentials
|
||||
// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html
|
||||
func getECSRoleCredentialsByPath(client *http.Client, path string) (*credentials, error) {
|
||||
resp, err := client.Get(path)
|
||||
|
@ -329,7 +329,7 @@ func getMetadataByPath(client *http.Client, apiPath string) ([]byte, error) {
|
|||
return readResponseBody(resp, apiURL)
|
||||
}
|
||||
|
||||
// getRoleWebIdentityCredentials obtains credentials fo the given roleARN with webToken.
|
||||
// getRoleWebIdentityCredentials obtains credentials for the given roleARN with webToken.
|
||||
// https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html
|
||||
// aws IRSA for kubernetes.
|
||||
// https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/
|
||||
|
@ -365,7 +365,7 @@ func (cfg *Config) getSTSAPIResponse(action string, reqBuilder func(apiURL strin
|
|||
return readResponseBody(resp, apiURL)
|
||||
}
|
||||
|
||||
// getRoleARNCredentials obtains credentials fo the given roleARN.
|
||||
// getRoleARNCredentials obtains credentials for the given roleARN.
|
||||
func (cfg *Config) getRoleARNCredentials(creds *credentials) (*credentials, error) {
|
||||
data, err := cfg.getSTSAPIResponse("AssumeRole", func(apiURL string) (*http.Request, error) {
|
||||
return newSignedGetRequest(apiURL, "sts", cfg.region, creds)
|
||||
|
|
|
@ -309,7 +309,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// HasFile returns ture if filePath exists at fs.
|
||||
// HasFile returns true if filePath exists at fs.
|
||||
func (fs *FS) HasFile(filePath string) (bool, error) {
|
||||
path := fs.Dir + filePath
|
||||
|
||||
|
|
|
@ -250,7 +250,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
// Use os.RemoveAll() instead of os.Remove(), since the dir may contain special files such as flock.lock and restore-in-progress,
|
||||
// which must be ingored.
|
||||
// which must be ignored.
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
return false, fmt.Errorf("cannot remove %q: %w", dir, err)
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ type FS struct {
|
|||
// Directory in the bucket to write to.
|
||||
Dir string
|
||||
|
||||
// Set for using S3-compatible enpoint such as MinIO etc.
|
||||
// Set for using S3-compatible endpoint such as MinIO etc.
|
||||
CustomEndpoint string
|
||||
|
||||
// Force to use path style for s3, true by default.
|
||||
|
|
|
@ -40,7 +40,7 @@ func WriteFileAndSync(path string, data []byte) error {
|
|||
}
|
||||
if _, err := f.Write(data); err != nil {
|
||||
f.MustClose()
|
||||
// Do not call MustRemoveAll(path), so the user could inpsect
|
||||
// Do not call MustRemoveAll(path), so the user could inspect
|
||||
// the file contents during investigation of the issue.
|
||||
return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err)
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ func sysTotalMemory() int {
|
|||
}
|
||||
mem := cgroup.GetMemoryLimit()
|
||||
if mem <= 0 || int64(int(mem)) != mem || int(mem) > totalMem {
|
||||
// Try reading hierachical memory limit.
|
||||
// Try reading hierarchical memory limit.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/699
|
||||
mem = cgroup.GetHierarchicalMemoryLimit()
|
||||
if mem <= 0 || int64(int(mem)) != mem || int(mem) > totalMem {
|
||||
|
|
|
@ -157,7 +157,7 @@ func commonPrefixLen(a, b []byte) int {
|
|||
|
||||
// Add adds x to the end of ib.
|
||||
//
|
||||
// false is returned if x isn't added to ib due to block size contraints.
|
||||
// false is returned if x isn't added to ib due to block size constraints.
|
||||
func (ib *inmemoryBlock) Add(x []byte) bool {
|
||||
data := ib.data
|
||||
if len(x)+len(data) > maxInmemoryBlockSize {
|
||||
|
|
|
@ -1665,7 +1665,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
|
|||
srcPath, dstPath)
|
||||
}
|
||||
|
||||
// Flush pathPrefix directory metadata to the underying storage.
|
||||
// Flush pathPrefix directory metadata to the underlying storage.
|
||||
fs.MustSyncPath(pathPrefix)
|
||||
|
||||
pendingTxnDeletionsWG.Add(1)
|
||||
|
|
|
@ -89,7 +89,7 @@ func TestParseProxyProtocolFail(t *testing.T) {
|
|||
// unsupported command
|
||||
f([]byte{0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A, 0x22, 0x11, 0x00, 0x0C,
|
||||
0x7F, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 80, 0, 0})
|
||||
// mimatch ipv6 and ipv4
|
||||
// mismatch ipv6 and ipv4
|
||||
f([]byte{0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A, 0x21, 0x21, 0x00, 0x0C,
|
||||
// ip data srcid,dstip,srcport
|
||||
0x7F, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 80, 0, 0})
|
||||
|
|
|
@ -76,7 +76,7 @@ func appendGraphiteMatchTemplateParts(dst []string, s string) []string {
|
|||
// Match matches s against gmt.
|
||||
//
|
||||
// On success it adds matched captures to dst and returns it with true.
|
||||
// Of failre it returns false.
|
||||
// On failure it returns false.
|
||||
func (gmt *graphiteMatchTemplate) Match(dst []string, s string) ([]string, bool) {
|
||||
dst = append(dst, s)
|
||||
parts := gmt.parts
|
||||
|
|
|
@ -39,7 +39,7 @@ type client struct {
|
|||
// hc is the default client optimized for common case of scraping targets with moderate number of metrics.
|
||||
hc *fasthttp.HostClient
|
||||
|
||||
// sc (aka `stream client`) is used instead of hc if ScrapeWork.ParseStream is set.
|
||||
// sc (aka `stream client`) is used instead of hc if ScrapeWork.StreamParse is set.
|
||||
// It may be useful for scraping targets with millions of metrics per target.
|
||||
sc *http.Client
|
||||
|
||||
|
|
|
@ -119,7 +119,7 @@ func getToken(token *promauth.Secret) (string, error) {
|
|||
return string(data), nil
|
||||
}
|
||||
t := os.Getenv("CONSUL_HTTP_TOKEN")
|
||||
// Allow empty token - it shouls work if authorization is disabled in Consul
|
||||
// Allow empty token - it should work if authorization is disabled in Consul
|
||||
return t, nil
|
||||
}
|
||||
|
||||
|
@ -145,7 +145,7 @@ func maxWaitTime() time.Duration {
|
|||
// Consul adds random delay up to wait/16, so reduce the timeout in order to keep it below BlockingClientReadTimeout.
|
||||
// See https://www.consul.io/api-docs/features/blocking
|
||||
d -= d / 8
|
||||
// The timeout cannot exceed 10 minuntes. See https://www.consul.io/api-docs/features/blocking
|
||||
// The timeout cannot exceed 10 minutes. See https://www.consul.io/api-docs/features/blocking
|
||||
if d > 10*time.Minute {
|
||||
d = 10 * time.Minute
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ func maxWaitTime() time.Duration {
|
|||
return d
|
||||
}
|
||||
|
||||
// getBlockingAPIResponse perfoms blocking request to Consul via client and returns response.
|
||||
// getBlockingAPIResponse performs blocking request to Consul via client and returns response.
|
||||
//
|
||||
// See https://www.consul.io/api-docs/features/blocking .
|
||||
func getBlockingAPIResponse(ctx context.Context, client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
|
||||
|
|
|
@ -150,7 +150,7 @@ func addTasksLabels(tasks []task, nodesLabels, servicesLabels []*promutils.Label
|
|||
return ms
|
||||
}
|
||||
|
||||
// addLabels adds lables from src to dst if they contain the given `key: value` pair.
|
||||
// addLabels adds labels from src to dst if they contain the given `key: value` pair.
|
||||
func addLabels(dst *promutils.Labels, src []*promutils.Labels, key, value string) {
|
||||
for _, m := range src {
|
||||
if m.Get(key) != value {
|
||||
|
|
|
@ -22,7 +22,7 @@ type apiConfig struct {
|
|||
parseErrors *metrics.Counter
|
||||
}
|
||||
|
||||
// httpGroupTarget respresent prometheus GroupTarget
|
||||
// httpGroupTarget represent prometheus GroupTarget
|
||||
// https://prometheus.io/docs/prometheus/latest/http_sd/
|
||||
type httpGroupTarget struct {
|
||||
Targets []string `json:"targets"`
|
||||
|
|
|
@ -408,7 +408,7 @@ func (gw *groupWatcher) doRequest(requestURL string) (*http.Response, error) {
|
|||
requestURL = strings.Replace(requestURL, "/apis/networking.k8s.io/v1/", "/apis/networking.k8s.io/v1beta1/", 1)
|
||||
}
|
||||
if strings.Contains(requestURL, "/apis/discovery.k8s.io/v1/") && atomic.LoadUint32(&gw.useDiscoveryV1Beta1) == 1 {
|
||||
// Update discovery URL for old Kuberentes API, which supports only v1beta1 path.
|
||||
// Update discovery URL for old Kubernetes API, which supports only v1beta1 path.
|
||||
requestURL = strings.Replace(requestURL, "/apis/discovery.k8s.io/v1/", "/apis/discovery.k8s.io/v1beta1/", 1)
|
||||
}
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
|
|
|
@ -82,7 +82,7 @@ type NodeDaemonEndpoints struct {
|
|||
KubeletEndpoint DaemonEndpoint
|
||||
}
|
||||
|
||||
// getTargetLabels returs labels for the given n.
|
||||
// getTargetLabels returns labels for the given n.
|
||||
//
|
||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#node
|
||||
func (n *Node) getTargetLabels(gw *groupWatcher) []*promutils.Labels {
|
||||
|
|
|
@ -104,7 +104,7 @@ func maxWaitTime() time.Duration {
|
|||
// Nomad adds random delay up to wait/16, so reduce the timeout in order to keep it below BlockingClientReadTimeout.
|
||||
// See https://developer.hashicorp.com/nomad/api-docs#blocking-queries
|
||||
d -= d / 16
|
||||
// The timeout cannot exceed 10 minuntes. See https://developer.hashicorp.com/nomad/api-docs#blocking-queries
|
||||
// The timeout cannot exceed 10 minutes. See https://developer.hashicorp.com/nomad/api-docs#blocking-queries
|
||||
|
||||
if d > 10*time.Minute {
|
||||
d = 10 * time.Minute
|
||||
|
@ -115,7 +115,7 @@ func maxWaitTime() time.Duration {
|
|||
return d
|
||||
}
|
||||
|
||||
// getBlockingAPIResponse perfoms blocking request to Nomad via client and returns response.
|
||||
// getBlockingAPIResponse performs blocking request to Nomad via client and returns response.
|
||||
// See https://developer.hashicorp.com/nomad/api-docs#blocking-queries .
|
||||
func getBlockingAPIResponse(ctx context.Context, client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
|
||||
path += "&index=" + strconv.FormatInt(index, 10)
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus/stream"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -575,7 +576,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
|||
if err == nil {
|
||||
bodyString = bytesutil.ToUnsafeString(sbr.body)
|
||||
areIdenticalSeries = sw.areIdenticalSeries(lastScrape, bodyString)
|
||||
err = parser.ParseStream(&sbr, scrapeTimestamp, false, func(rows []parser.Row) error {
|
||||
err = stream.Parse(&sbr, scrapeTimestamp, false, func(rows []parser.Row) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
samplesScraped += len(rows)
|
||||
|
@ -796,7 +797,7 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i
|
|||
// and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3675
|
||||
var mu sync.Mutex
|
||||
br := bytes.NewBufferString(bodyString)
|
||||
err := parser.ParseStream(br, timestamp, false, func(rows []parser.Row) error {
|
||||
err := stream.Parse(br, timestamp, false, func(rows []parser.Row) error {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
for i := range rows {
|
||||
|
|
|
@ -49,7 +49,7 @@ func (x *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON returns JSON respresentation for x.
|
||||
// MarshalJSON returns JSON representation for x.
|
||||
func (x *Labels) MarshalJSON() ([]byte, error) {
|
||||
m := x.ToMap()
|
||||
return json.Marshal(m)
|
||||
|
@ -235,7 +235,7 @@ func (x *Labels) RemoveDuplicates() {
|
|||
|
||||
// RemoveMetaLabels removes all the `__meta_` labels from x.
|
||||
//
|
||||
// See https://www.robustperception.io/life-of-a-label fo details.
|
||||
// See https://www.robustperception.io/life-of-a-label for details.
|
||||
func (x *Labels) RemoveMetaLabels() {
|
||||
src := x.Labels
|
||||
dst := x.Labels[:0]
|
||||
|
|
|
@ -40,7 +40,7 @@ func StartUnmarshalWorkers() {
|
|||
|
||||
// StopUnmarshalWorkers stops unmarshal workers.
|
||||
//
|
||||
// No more calles to ScheduleUnmarshalWork are allowed after calling stopUnmarshalWorkers
|
||||
// No more calls to ScheduleUnmarshalWork are allowed after calling stopUnmarshalWorkers
|
||||
func StopUnmarshalWorkers() {
|
||||
close(unmarshalWorkCh)
|
||||
unmarshalWorkersWG.Wait()
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package csvimport
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/csvimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -21,19 +22,19 @@ var (
|
|||
"Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data")
|
||||
)
|
||||
|
||||
// ParseStream parses csv from req and calls callback for the parsed rows.
|
||||
// Parse parses csv from req and calls callback for the parsed rows.
|
||||
//
|
||||
// The callback can be called concurrently multiple times for streamed data from req.
|
||||
//
|
||||
// callback shouldn't hold rows after returning.
|
||||
func ParseStream(req *http.Request, callback func(rows []Row) error) error {
|
||||
func Parse(req *http.Request, callback func(rows []csvimport.Row) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(req.Body)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r := io.Reader(wcr)
|
||||
|
||||
q := req.URL.Query()
|
||||
format := q.Get("format")
|
||||
cds, err := ParseColumnDescriptors(format)
|
||||
cds, err := csvimport.ParseColumnDescriptors(format)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse the provided csv format: %w", err)
|
||||
}
|
||||
|
@ -149,10 +150,10 @@ var streamContextPool sync.Pool
|
|||
var streamContextPoolCh = make(chan *streamContext, cgroup.AvailableCPUs())
|
||||
|
||||
type unmarshalWork struct {
|
||||
rows Rows
|
||||
rows csvimport.Rows
|
||||
ctx *streamContext
|
||||
callback func(rows []Row) error
|
||||
cds []ColumnDescriptor
|
||||
callback func(rows []csvimport.Row) error
|
||||
cds []csvimport.ColumnDescriptor
|
||||
reqBuf []byte
|
||||
}
|
||||
|
||||
|
@ -164,7 +165,7 @@ func (uw *unmarshalWork) reset() {
|
|||
uw.reqBuf = uw.reqBuf[:0]
|
||||
}
|
||||
|
||||
func (uw *unmarshalWork) runCallback(rows []Row) {
|
||||
func (uw *unmarshalWork) runCallback(rows []csvimport.Row) {
|
||||
ctx := uw.ctx
|
||||
if err := uw.callback(rows); err != nil {
|
||||
ctx.callbackErrLock.Lock()
|
|
@ -1,4 +1,4 @@
|
|||
package datadog
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/datadog"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -30,10 +31,10 @@ var (
|
|||
"https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics")
|
||||
)
|
||||
|
||||
// ParseStream parses DataDog POST request for /api/v1/series from reader and calls callback for the parsed request.
|
||||
// Parse parses DataDog POST request for /api/v1/series from reader and calls callback for the parsed request.
|
||||
//
|
||||
// callback shouldn't hold series after returning.
|
||||
func ParseStream(r io.Reader, contentEncoding string, callback func(series []Series) error) error {
|
||||
func Parse(r io.Reader, contentEncoding string, callback func(series []datadog.Series) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r = wcr
|
||||
|
@ -143,21 +144,21 @@ func putPushCtx(ctx *pushCtx) {
|
|||
var pushCtxPool sync.Pool
|
||||
var pushCtxPoolCh = make(chan *pushCtx, cgroup.AvailableCPUs())
|
||||
|
||||
func getRequest() *Request {
|
||||
func getRequest() *datadog.Request {
|
||||
v := requestPool.Get()
|
||||
if v == nil {
|
||||
return &Request{}
|
||||
return &datadog.Request{}
|
||||
}
|
||||
return v.(*Request)
|
||||
return v.(*datadog.Request)
|
||||
}
|
||||
|
||||
func putRequest(req *Request) {
|
||||
func putRequest(req *datadog.Request) {
|
||||
requestPool.Put(req)
|
||||
}
|
||||
|
||||
var requestPool sync.Pool
|
||||
|
||||
// sanitizeName performs DataDog-compatible santizing for metric names
|
||||
// sanitizeName performs DataDog-compatible sanitizing for metric names
|
||||
//
|
||||
// See https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics
|
||||
func sanitizeName(name string) string {
|
|
@ -1,4 +1,4 @@
|
|||
package datadog
|
||||
package stream
|
||||
|
||||
import (
|
||||
"testing"
|
|
@ -2,10 +2,7 @@ package graphite
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
)
|
||||
|
||||
func TestUnmarshalMetricAndTagsFailure(t *testing.T) {
|
||||
|
@ -383,71 +380,3 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
|||
}},
|
||||
})
|
||||
}
|
||||
|
||||
func Test_streamContext_Read(t *testing.T) {
|
||||
f := func(s string, rowsExpected *Rows) {
|
||||
t.Helper()
|
||||
ctx := getStreamContext(strings.NewReader(s))
|
||||
if !ctx.Read() {
|
||||
t.Fatalf("expecting successful read")
|
||||
}
|
||||
uw := getUnmarshalWork()
|
||||
callbackCalls := 0
|
||||
uw.ctx = ctx
|
||||
uw.callback = func(rows []Row) error {
|
||||
callbackCalls++
|
||||
if len(rows) != len(rowsExpected.Rows) {
|
||||
t.Fatalf("different len of expected rows;\ngot\n%+v;\nwant\n%+v", rows, rowsExpected.Rows)
|
||||
}
|
||||
if !reflect.DeepEqual(rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows, rowsExpected.Rows)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
uw.reqBuf = append(uw.reqBuf[:0], ctx.reqBuf...)
|
||||
ctx.wg.Add(1)
|
||||
uw.Unmarshal()
|
||||
if callbackCalls != 1 {
|
||||
t.Fatalf("unexpected number of callback calls; got %d; want 1", callbackCalls)
|
||||
}
|
||||
}
|
||||
|
||||
// Full line without tags
|
||||
f("aaa 1123 345", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "aaa",
|
||||
Value: 1123,
|
||||
Timestamp: 345 * 1000,
|
||||
}},
|
||||
})
|
||||
// Full line with tags
|
||||
f("aaa;x=y 1123 345", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "aaa",
|
||||
Tags: []Tag{{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
}},
|
||||
Value: 1123,
|
||||
Timestamp: 345 * 1000,
|
||||
}},
|
||||
})
|
||||
// missing timestamp.
|
||||
// Note that this test may be flaky due to timing issues. TODO: fix it
|
||||
f("aaa 1123", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "aaa",
|
||||
Value: 1123,
|
||||
Timestamp: int64(fasttime.UnixTimestamp()) * 1000,
|
||||
}},
|
||||
})
|
||||
// -1 timestamp. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/610
|
||||
// Note that this test may be flaky due to timing issues. TODO: fix it.
|
||||
f("aaa 1123 -1", &Rows{
|
||||
Rows: []Row{{
|
||||
Metric: "aaa",
|
||||
Value: 1123,
|
||||
Timestamp: int64(fasttime.UnixTimestamp()) * 1000,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package graphite
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -21,12 +22,12 @@ var (
|
|||
"Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data")
|
||||
)
|
||||
|
||||
// ParseStream parses Graphite lines from r and calls callback for the parsed rows.
|
||||
// Parse parses Graphite lines from r and calls callback for the parsed rows.
|
||||
//
|
||||
// The callback can be called concurrently multiple times for streamed data from r.
|
||||
//
|
||||
// callback shouldn't hold rows after returning.
|
||||
func ParseStream(r io.Reader, callback func(rows []Row) error) error {
|
||||
func Parse(r io.Reader, callback func(rows []graphite.Row) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r = wcr
|
||||
|
@ -135,9 +136,9 @@ var streamContextPool sync.Pool
|
|||
var streamContextPoolCh = make(chan *streamContext, cgroup.AvailableCPUs())
|
||||
|
||||
type unmarshalWork struct {
|
||||
rows Rows
|
||||
rows graphite.Rows
|
||||
ctx *streamContext
|
||||
callback func(rows []Row) error
|
||||
callback func(rows []graphite.Row) error
|
||||
reqBuf []byte
|
||||
}
|
||||
|
||||
|
@ -148,7 +149,7 @@ func (uw *unmarshalWork) reset() {
|
|||
uw.reqBuf = uw.reqBuf[:0]
|
||||
}
|
||||
|
||||
func (uw *unmarshalWork) runCallback(rows []Row) {
|
||||
func (uw *unmarshalWork) runCallback(rows []graphite.Row) {
|
||||
ctx := uw.ctx
|
||||
if err := uw.callback(rows); err != nil {
|
||||
ctx.callbackErrLock.Lock()
|
78
lib/protoparser/graphite/stream/streamparser_test.go
Normal file
78
lib/protoparser/graphite/stream/streamparser_test.go
Normal file
|
@ -0,0 +1,78 @@
|
|||
package stream
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite"
|
||||
)
|
||||
|
||||
func Test_streamContext_Read(t *testing.T) {
|
||||
f := func(s string, rowsExpected *graphite.Rows) {
|
||||
t.Helper()
|
||||
ctx := getStreamContext(strings.NewReader(s))
|
||||
if !ctx.Read() {
|
||||
t.Fatalf("expecting successful read")
|
||||
}
|
||||
uw := getUnmarshalWork()
|
||||
callbackCalls := 0
|
||||
uw.ctx = ctx
|
||||
uw.callback = func(rows []graphite.Row) error {
|
||||
callbackCalls++
|
||||
if len(rows) != len(rowsExpected.Rows) {
|
||||
t.Fatalf("different len of expected rows;\ngot\n%+v;\nwant\n%+v", rows, rowsExpected.Rows)
|
||||
}
|
||||
if !reflect.DeepEqual(rows, rowsExpected.Rows) {
|
||||
t.Fatalf("unexpected rows;\ngot\n%+v;\nwant\n%+v", rows, rowsExpected.Rows)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
uw.reqBuf = append(uw.reqBuf[:0], ctx.reqBuf...)
|
||||
ctx.wg.Add(1)
|
||||
uw.Unmarshal()
|
||||
if callbackCalls != 1 {
|
||||
t.Fatalf("unexpected number of callback calls; got %d; want 1", callbackCalls)
|
||||
}
|
||||
}
|
||||
|
||||
// Full line without tags
|
||||
f("aaa 1123 345", &graphite.Rows{
|
||||
Rows: []graphite.Row{{
|
||||
Metric: "aaa",
|
||||
Value: 1123,
|
||||
Timestamp: 345 * 1000,
|
||||
}},
|
||||
})
|
||||
// Full line with tags
|
||||
f("aaa;x=y 1123 345", &graphite.Rows{
|
||||
Rows: []graphite.Row{{
|
||||
Metric: "aaa",
|
||||
Tags: []graphite.Tag{{
|
||||
Key: "x",
|
||||
Value: "y",
|
||||
}},
|
||||
Value: 1123,
|
||||
Timestamp: 345 * 1000,
|
||||
}},
|
||||
})
|
||||
// missing timestamp.
|
||||
// Note that this test may be flaky due to timing issues. TODO: fix it
|
||||
f("aaa 1123", &graphite.Rows{
|
||||
Rows: []graphite.Row{{
|
||||
Metric: "aaa",
|
||||
Value: 1123,
|
||||
Timestamp: int64(fasttime.UnixTimestamp()) * 1000,
|
||||
}},
|
||||
})
|
||||
// -1 timestamp. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/610
|
||||
// Note that this test may be flaky due to timing issues. TODO: fix it.
|
||||
f("aaa 1123 -1", &graphite.Rows{
|
||||
Rows: []graphite.Row{{
|
||||
Metric: "aaa",
|
||||
Value: 1123,
|
||||
Timestamp: int64(fasttime.UnixTimestamp()) * 1000,
|
||||
}},
|
||||
})
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package influx
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -22,12 +23,12 @@ var (
|
|||
"Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data")
|
||||
)
|
||||
|
||||
// ParseStream parses r with the given args and calls callback for the parsed rows.
|
||||
// Parse parses r with the given args and calls callback for the parsed rows.
|
||||
//
|
||||
// The callback can be called concurrently multiple times for streamed data from r.
|
||||
//
|
||||
// callback shouldn't hold rows after returning.
|
||||
func ParseStream(r io.Reader, isGzipped bool, precision, db string, callback func(db string, rows []Row) error) error {
|
||||
func Parse(r io.Reader, isGzipped bool, precision, db string, callback func(db string, rows []influx.Row) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r = wcr
|
||||
|
@ -162,9 +163,9 @@ var streamContextPool sync.Pool
|
|||
var streamContextPoolCh = make(chan *streamContext, cgroup.AvailableCPUs())
|
||||
|
||||
type unmarshalWork struct {
|
||||
rows Rows
|
||||
rows influx.Rows
|
||||
ctx *streamContext
|
||||
callback func(db string, rows []Row) error
|
||||
callback func(db string, rows []influx.Row) error
|
||||
db string
|
||||
tsMultiplier int64
|
||||
reqBuf []byte
|
||||
|
@ -179,7 +180,7 @@ func (uw *unmarshalWork) reset() {
|
|||
uw.reqBuf = uw.reqBuf[:0]
|
||||
}
|
||||
|
||||
func (uw *unmarshalWork) runCallback(rows []Row) {
|
||||
func (uw *unmarshalWork) runCallback(rows []influx.Row) {
|
||||
ctx := uw.ctx
|
||||
if err := uw.callback(uw.db, rows); err != nil {
|
||||
ctx.callbackErrLock.Lock()
|
|
@ -1,4 +1,4 @@
|
|||
package influx
|
||||
package stream
|
||||
|
||||
import (
|
||||
"testing"
|
|
@ -1,4 +1,4 @@
|
|||
package native
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -14,12 +14,12 @@ import (
|
|||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// ParseStream parses /api/v1/import/native lines from req and calls callback for parsed blocks.
|
||||
// Parse parses /api/v1/import/native lines from req and calls callback for parsed blocks.
|
||||
//
|
||||
// The callback can be called concurrently multiple times for streamed data from r.
|
||||
//
|
||||
// callback shouldn't hold block after returning.
|
||||
func ParseStream(r io.Reader, isGzip bool, callback func(block *Block) error) error {
|
||||
func Parse(r io.Reader, isGzip bool, callback func(block *Block) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r = wcr
|
|
@ -1,4 +1,4 @@
|
|||
package opentsdb
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdb"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -21,12 +22,12 @@ var (
|
|||
"Minimum practical duration is 1s. Higher duration (i.e. 1m) may be used for reducing disk space usage for timestamp data")
|
||||
)
|
||||
|
||||
// ParseStream parses OpenTSDB lines from r and calls callback for the parsed rows.
|
||||
// Parse parses OpenTSDB lines from r and calls callback for the parsed rows.
|
||||
//
|
||||
// The callback can be called concurrently multiple times for streamed data from r.
|
||||
//
|
||||
// callback shouldn't hold rows after returning.
|
||||
func ParseStream(r io.Reader, callback func(rows []Row) error) error {
|
||||
func Parse(r io.Reader, callback func(rows []opentsdb.Row) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r = wcr
|
||||
|
@ -134,9 +135,9 @@ var streamContextPool sync.Pool
|
|||
var streamContextPoolCh = make(chan *streamContext, cgroup.AvailableCPUs())
|
||||
|
||||
type unmarshalWork struct {
|
||||
rows Rows
|
||||
rows opentsdb.Rows
|
||||
ctx *streamContext
|
||||
callback func(rows []Row) error
|
||||
callback func(rows []opentsdb.Row) error
|
||||
reqBuf []byte
|
||||
}
|
||||
|
||||
|
@ -147,7 +148,7 @@ func (uw *unmarshalWork) reset() {
|
|||
uw.reqBuf = uw.reqBuf[:0]
|
||||
}
|
||||
|
||||
func (uw *unmarshalWork) runCallback(rows []Row) {
|
||||
func (uw *unmarshalWork) runCallback(rows []opentsdb.Row) {
|
||||
ctx := uw.ctx
|
||||
if err := uw.callback(rows); err != nil {
|
||||
ctx.callbackErrLock.Lock()
|
||||
|
@ -174,9 +175,13 @@ func (uw *unmarshalWork) Unmarshal() {
|
|||
}
|
||||
}
|
||||
|
||||
// Convert timestamps from seconds to milliseconds
|
||||
// Convert timestamps in seconds to milliseconds if needed.
|
||||
// See http://opentsdb.net/docs/javadoc/net/opentsdb/core/Const.html#SECOND_MASK
|
||||
for i := range rows {
|
||||
rows[i].Timestamp *= 1e3
|
||||
r := &rows[i]
|
||||
if r.Timestamp&secondMask == 0 {
|
||||
r.Timestamp *= 1e3
|
||||
}
|
||||
}
|
||||
|
||||
// Trim timestamps if required.
|
||||
|
@ -191,6 +196,8 @@ func (uw *unmarshalWork) Unmarshal() {
|
|||
putUnmarshalWork(uw)
|
||||
}
|
||||
|
||||
const secondMask int64 = 0x7FFFFFFF00000000
|
||||
|
||||
func getUnmarshalWork() *unmarshalWork {
|
||||
v := unmarshalWorkPool.Get()
|
||||
if v == nil {
|
|
@ -4,17 +4,17 @@ import (
|
|||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
// getJSONParser returns JSON parser.
|
||||
// GetJSONParser returns JSON parser.
|
||||
//
|
||||
// The parser must be returned to the pool via putJSONParser when no longer needed.
|
||||
func getJSONParser() *fastjson.Parser {
|
||||
// The parser must be returned to the pool via PutJSONParser when no longer needed.
|
||||
func GetJSONParser() *fastjson.Parser {
|
||||
return parserPool.Get()
|
||||
}
|
||||
|
||||
// putJSONParser returns p to the pool.
|
||||
// PutJSONParser returns p to the pool.
|
||||
//
|
||||
// p cannot be used after returning to the pool.
|
||||
func putJSONParser(p *fastjson.Parser) {
|
||||
func PutJSONParser(p *fastjson.Parser) {
|
||||
parserPool.Put(p)
|
||||
}
|
||||
|
||||
|
|
|
@ -9,8 +9,8 @@ func TestRowsUnmarshalFailure(t *testing.T) {
|
|||
f := func(s string) {
|
||||
t.Helper()
|
||||
var rows Rows
|
||||
p := getJSONParser()
|
||||
defer putJSONParser(p)
|
||||
p := GetJSONParser()
|
||||
defer PutJSONParser(p)
|
||||
v, err := p.Parse(s)
|
||||
if err != nil {
|
||||
// Expected JSON parser error
|
||||
|
@ -84,8 +84,8 @@ func TestRowsUnmarshalSuccess(t *testing.T) {
|
|||
t.Helper()
|
||||
var rows Rows
|
||||
|
||||
p := getJSONParser()
|
||||
defer putJSONParser(p)
|
||||
p := GetJSONParser()
|
||||
defer PutJSONParser(p)
|
||||
v, err := p.Parse(s)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot parse json %s: %s", s, err)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package opentsdbhttp
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdbhttp"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -24,12 +25,12 @@ var (
|
|||
"Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data")
|
||||
)
|
||||
|
||||
// ParseStream parses OpenTSDB http lines from req and calls callback for the parsed rows.
|
||||
// Parse parses OpenTSDB http lines from req and calls callback for the parsed rows.
|
||||
//
|
||||
// The callback can be called concurrently multiple times for streamed data from req.
|
||||
//
|
||||
// callback shouldn't hold rows after returning.
|
||||
func ParseStream(req *http.Request, callback func(rows []Row) error) error {
|
||||
func Parse(req *http.Request, callback func(rows []opentsdbhttp.Row) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(req.Body)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r := io.Reader(req.Body)
|
||||
|
@ -62,8 +63,8 @@ func ParseStream(req *http.Request, callback func(rows []Row) error) error {
|
|||
|
||||
// Process the request synchronously, since there is no sense in processing a single request asynchronously.
|
||||
// Sync code is easier to read and understand.
|
||||
p := getJSONParser()
|
||||
defer putJSONParser(p)
|
||||
p := opentsdbhttp.GetJSONParser()
|
||||
defer opentsdbhttp.PutJSONParser(p)
|
||||
v, err := p.ParseBytes(ctx.reqBuf.B)
|
||||
if err != nil {
|
||||
unmarshalErrors.Inc()
|
||||
|
@ -155,15 +156,15 @@ func putStreamContext(ctx *streamContext) {
|
|||
var streamContextPool sync.Pool
|
||||
var streamContextPoolCh = make(chan *streamContext, cgroup.AvailableCPUs())
|
||||
|
||||
func getRows() *Rows {
|
||||
func getRows() *opentsdbhttp.Rows {
|
||||
v := rowsPool.Get()
|
||||
if v == nil {
|
||||
return &Rows{}
|
||||
return &opentsdbhttp.Rows{}
|
||||
}
|
||||
return v.(*Rows)
|
||||
return v.(*opentsdbhttp.Rows)
|
||||
}
|
||||
|
||||
func putRows(rs *Rows) {
|
||||
func putRows(rs *opentsdbhttp.Rows) {
|
||||
rs.Reset()
|
||||
rowsPool.Put(rs)
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package prometheus
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -10,16 +10,17 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// ParseStream parses lines with Prometheus exposition format from r and calls callback for the parsed rows.
|
||||
// Parse parses lines with Prometheus exposition format from r and calls callback for the parsed rows.
|
||||
//
|
||||
// The callback can be called concurrently multiple times for streamed data from r.
|
||||
//
|
||||
// callback shouldn't hold rows after returning.
|
||||
func ParseStream(r io.Reader, defaultTimestamp int64, isGzipped bool, callback func(rows []Row) error, errLogger func(string)) error {
|
||||
func Parse(r io.Reader, defaultTimestamp int64, isGzipped bool, callback func(rows []prometheus.Row) error, errLogger func(string)) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r = wcr
|
||||
|
@ -137,9 +138,9 @@ var streamContextPool sync.Pool
|
|||
var streamContextPoolCh = make(chan *streamContext, cgroup.AvailableCPUs())
|
||||
|
||||
type unmarshalWork struct {
|
||||
rows Rows
|
||||
rows prometheus.Rows
|
||||
ctx *streamContext
|
||||
callback func(rows []Row) error
|
||||
callback func(rows []prometheus.Row) error
|
||||
errLogger func(string)
|
||||
defaultTimestamp int64
|
||||
reqBuf []byte
|
||||
|
@ -154,7 +155,7 @@ func (uw *unmarshalWork) reset() {
|
|||
uw.reqBuf = uw.reqBuf[:0]
|
||||
}
|
||||
|
||||
func (uw *unmarshalWork) runCallback(rows []Row) {
|
||||
func (uw *unmarshalWork) runCallback(rows []prometheus.Row) {
|
||||
ctx := uw.ctx
|
||||
if err := uw.callback(rows); err != nil {
|
||||
ctx.callbackErrLock.Lock()
|
|
@ -1,4 +1,4 @@
|
|||
package prometheus
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -10,20 +10,21 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
)
|
||||
|
||||
func TestParseStream(t *testing.T) {
|
||||
func TestParse(t *testing.T) {
|
||||
common.StartUnmarshalWorkers()
|
||||
defer common.StopUnmarshalWorkers()
|
||||
|
||||
const defaultTimestamp = 123
|
||||
f := func(s string, rowsExpected []Row) {
|
||||
f := func(s string, rowsExpected []prometheus.Row) {
|
||||
t.Helper()
|
||||
bb := bytes.NewBufferString(s)
|
||||
var result []Row
|
||||
var result []prometheus.Row
|
||||
var lock sync.Mutex
|
||||
doneCh := make(chan struct{})
|
||||
err := ParseStream(bb, defaultTimestamp, false, func(rows []Row) error {
|
||||
err := Parse(bb, defaultTimestamp, false, func(rows []prometheus.Row) error {
|
||||
lock.Lock()
|
||||
result = appendRowCopies(result, rows)
|
||||
if len(result) == len(rowsExpected) {
|
||||
|
@ -56,7 +57,7 @@ func TestParseStream(t *testing.T) {
|
|||
}
|
||||
result = nil
|
||||
doneCh = make(chan struct{})
|
||||
err = ParseStream(bb, defaultTimestamp, true, func(rows []Row) error {
|
||||
err = Parse(bb, defaultTimestamp, true, func(rows []prometheus.Row) error {
|
||||
lock.Lock()
|
||||
result = appendRowCopies(result, rows)
|
||||
if len(result) == len(rowsExpected) {
|
||||
|
@ -79,12 +80,12 @@ func TestParseStream(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
f("foo 123 456", []Row{{
|
||||
f("foo 123 456", []prometheus.Row{{
|
||||
Metric: "foo",
|
||||
Value: 123,
|
||||
Timestamp: 456000,
|
||||
}})
|
||||
f(`foo{bar="baz"} 1 2`+"\n"+`aaa{} 3 4`, []Row{
|
||||
f(`foo{bar="baz"} 1 2`+"\n"+`aaa{} 3 4`, []prometheus.Row{
|
||||
{
|
||||
Metric: "aaa",
|
||||
Value: 3,
|
||||
|
@ -92,7 +93,7 @@ func TestParseStream(t *testing.T) {
|
|||
},
|
||||
{
|
||||
Metric: "foo",
|
||||
Tags: []Tag{{
|
||||
Tags: []prometheus.Tag{{
|
||||
Key: "bar",
|
||||
Value: "baz",
|
||||
}},
|
||||
|
@ -100,29 +101,29 @@ func TestParseStream(t *testing.T) {
|
|||
Timestamp: 2000,
|
||||
},
|
||||
})
|
||||
f("foo 23", []Row{{
|
||||
f("foo 23", []prometheus.Row{{
|
||||
Metric: "foo",
|
||||
Value: 23,
|
||||
Timestamp: defaultTimestamp,
|
||||
}})
|
||||
}
|
||||
|
||||
func sortRows(rows []Row) {
|
||||
func sortRows(rows []prometheus.Row) {
|
||||
sort.Slice(rows, func(i, j int) bool {
|
||||
a, b := rows[i], rows[j]
|
||||
return a.Metric < b.Metric
|
||||
})
|
||||
}
|
||||
|
||||
func appendRowCopies(dst, src []Row) []Row {
|
||||
func appendRowCopies(dst, src []prometheus.Row) []prometheus.Row {
|
||||
for _, r := range src {
|
||||
// Make a copy of r, since r may contain garbage after returning from the callback to ParseStream.
|
||||
var rCopy Row
|
||||
// Make a copy of r, since r may contain garbage after returning from the callback to Parse.
|
||||
var rCopy prometheus.Row
|
||||
rCopy.Metric = copyString(r.Metric)
|
||||
rCopy.Value = r.Value
|
||||
rCopy.Timestamp = r.Timestamp
|
||||
for _, tag := range r.Tags {
|
||||
rCopy.Tags = append(rCopy.Tags, Tag{
|
||||
rCopy.Tags = append(rCopy.Tags, prometheus.Tag{
|
||||
Key: copyString(tag.Key),
|
||||
Value: copyString(tag.Value),
|
||||
})
|
|
@ -1,4 +1,4 @@
|
|||
package promremotewrite
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -18,10 +18,10 @@ import (
|
|||
|
||||
var maxInsertRequestSize = flagutil.NewBytes("maxInsertRequestSize", 32*1024*1024, "The maximum size in bytes of a single Prometheus remote_write API request")
|
||||
|
||||
// ParseStream parses Prometheus remote_write message from reader and calls callback for the parsed timeseries.
|
||||
// Parse parses Prometheus remote_write message from reader and calls callback for the parsed timeseries.
|
||||
//
|
||||
// callback shouldn't hold tss after returning.
|
||||
func ParseStream(r io.Reader, callback func(tss []prompb.TimeSeries) error) error {
|
||||
func Parse(r io.Reader, callback func(tss []prompb.TimeSeries) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r = wcr
|
||||
|
@ -32,7 +32,7 @@ func ParseStream(r io.Reader, callback func(tss []prompb.TimeSeries) error) erro
|
|||
return err
|
||||
}
|
||||
|
||||
// Synchronously process the request in order to properly return errors to ParseStream caller,
|
||||
// Synchronously process the request in order to properly return errors to Parse caller,
|
||||
// so it could properly return HTTP 503 status code in response.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/896
|
||||
bb := bodyBufferPool.Get()
|
|
@ -1,4 +1,4 @@
|
|||
package vmimport
|
||||
package stream
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
@ -17,12 +18,12 @@ import (
|
|||
var maxLineLen = flagutil.NewBytes("import.maxLineLen", 100*1024*1024, "The maximum length in bytes of a single line accepted by /api/v1/import; "+
|
||||
"the line length can be limited with 'max_rows_per_line' query arg passed to /api/v1/export")
|
||||
|
||||
// ParseStream parses /api/v1/import lines from req and calls callback for the parsed rows.
|
||||
// Parse parses /api/v1/import lines from req and calls callback for the parsed rows.
|
||||
//
|
||||
// The callback can be called concurrently multiple times for streamed data from reader.
|
||||
//
|
||||
// callback shouldn't hold rows after returning.
|
||||
func ParseStream(r io.Reader, isGzipped bool, callback func(rows []Row) error) error {
|
||||
func Parse(r io.Reader, isGzipped bool, callback func(rows []vmimport.Row) error) error {
|
||||
wcr := writeconcurrencylimiter.GetReader(r)
|
||||
defer writeconcurrencylimiter.PutReader(wcr)
|
||||
r = wcr
|
||||
|
@ -138,9 +139,9 @@ var streamContextPool sync.Pool
|
|||
var streamContextPoolCh = make(chan *streamContext, cgroup.AvailableCPUs())
|
||||
|
||||
type unmarshalWork struct {
|
||||
rows Rows
|
||||
rows vmimport.Rows
|
||||
ctx *streamContext
|
||||
callback func(rows []Row) error
|
||||
callback func(rows []vmimport.Row) error
|
||||
reqBuf []byte
|
||||
}
|
||||
|
||||
|
@ -151,7 +152,7 @@ func (uw *unmarshalWork) reset() {
|
|||
uw.reqBuf = uw.reqBuf[:0]
|
||||
}
|
||||
|
||||
func (uw *unmarshalWork) runCallback(rows []Row) {
|
||||
func (uw *unmarshalWork) runCallback(rows []vmimport.Row) {
|
||||
ctx := uw.ctx
|
||||
if err := uw.callback(rows); err != nil {
|
||||
ctx.callbackErrLock.Lock()
|
|
@ -65,7 +65,7 @@ func NewPromRegex(expr string) (*PromRegex, error) {
|
|||
return pr, nil
|
||||
}
|
||||
|
||||
// MatchString retruns true if s matches pr.
|
||||
// MatchString returns true if s matches pr.
|
||||
//
|
||||
// The pr is automatically anchored to the beginning and to the end
|
||||
// of the matching string with '^' and '$'.
|
||||
|
|
|
@ -79,7 +79,7 @@ func (bsw *blockStreamWriter) reset() {
|
|||
bsw.prevTimestampsBlockOffset = 0
|
||||
}
|
||||
|
||||
// InitFromInmemoryPart initialzes bsw from inmemory part.
|
||||
// InitFromInmemoryPart initializes bsw from inmemory part.
|
||||
func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart, compressLevel int) {
|
||||
bsw.reset()
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ func isDedupEnabled() bool {
|
|||
return len(downsamplingPeriods) > 0
|
||||
}
|
||||
|
||||
// DeduplicateSamples removes samples from src* if they are closer to each other than dedupInterval in millseconds.
|
||||
// DeduplicateSamples removes samples from src* if they are closer to each other than dedupInterval in milliseconds.
|
||||
func DeduplicateSamples(srcTimestamps []int64, srcValues []float64, dedupInterval int64) ([]int64, []float64) {
|
||||
if !needsDedup(srcTimestamps, dedupInterval) {
|
||||
// Fast path - nothing to deduplicate
|
||||
|
|
|
@ -650,7 +650,7 @@ func generateTSID(dst *TSID, mn *MetricName) {
|
|||
// This assumption is true because mn.Tags must be sorted with mn.sortTags() before calling generateTSID() function.
|
||||
// This allows grouping data blocks for the same (job, instance) close to each other on disk.
|
||||
// This reduces disk seeks and disk read IO when data blocks are read from disk for the same job and/or instance.
|
||||
// For example, data blocks for time series matching `process_resident_memory_bytes{job="vmstorage"}` are physically adjancent on disk.
|
||||
// For example, data blocks for time series matching `process_resident_memory_bytes{job="vmstorage"}` are physically adjacent on disk.
|
||||
if len(mn.Tags) > 0 {
|
||||
dst.JobID = uint32(xxhash.Sum64(mn.Tags[0].Value))
|
||||
}
|
||||
|
@ -2754,7 +2754,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(qt *querytracer.Tracer, dat
|
|||
// Intersect metricIDs with the rest of filters.
|
||||
//
|
||||
// Do not run these tag filters in parallel, since this may result in CPU and RAM waste
|
||||
// when the intial tag filters significantly reduce the number of found metricIDs,
|
||||
// when the initial tag filters significantly reduce the number of found metricIDs,
|
||||
// so the remaining filters could be performed via much faster metricName matching instead
|
||||
// of slow selecting of matching metricIDs.
|
||||
qtChild = qt.NewChild("intersect the remaining %d filters with the found %d metric ids", len(tfws), metricIDs.Len())
|
||||
|
|
|
@ -57,7 +57,7 @@ const finalPartsToMerge = 3
|
|||
// Higher number of shards reduces CPU contention and increases the max bandwidth on multi-core systems.
|
||||
var rawRowsShardsPerPartition = (cgroup.AvailableCPUs() + 1) / 2
|
||||
|
||||
// The interval for flushing bufferred rows into parts, so they become visible to search.
|
||||
// The interval for flushing buffered rows into parts, so they become visible to search.
|
||||
const pendingRowsFlushInterval = time.Second
|
||||
|
||||
// The interval for guaranteed flush of recently ingested data from memory to on-disk parts,
|
||||
|
@ -2144,7 +2144,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, txnPath str
|
|||
}
|
||||
}
|
||||
|
||||
// Flush pathPrefix* directory metadata to the underying storage,
|
||||
// Flush pathPrefix* directory metadata to the underlying storage,
|
||||
// so the moved files become visible there.
|
||||
fs.MustSyncPath(pathPrefix1)
|
||||
fs.MustSyncPath(pathPrefix2)
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
// rawRow reperesents raw timeseries row.
|
||||
// rawRow represents raw timeseries row.
|
||||
type rawRow struct {
|
||||
// TSID is time series id.
|
||||
TSID TSID
|
||||
|
|
|
@ -100,7 +100,7 @@ type Search struct {
|
|||
|
||||
ts tableSearch
|
||||
|
||||
// tr contains time range used in the serach.
|
||||
// tr contains time range used in the search.
|
||||
tr TimeRange
|
||||
|
||||
// tfss contains tag filters used in the search.
|
||||
|
@ -165,7 +165,7 @@ func (s *Search) Init(qt *querytracer.Tracer, storage *Storage, tfss []*TagFilte
|
|||
}
|
||||
// It is ok to call Init on non-nil err.
|
||||
// Init must be called before returning because it will fail
|
||||
// on Seach.MustClose otherwise.
|
||||
// on Search.MustClose otherwise.
|
||||
s.ts.Init(storage.tb, tsids, tr)
|
||||
qt.Printf("search for parts with data for %d series", len(tsids))
|
||||
if err != nil {
|
||||
|
|
|
@ -1972,7 +1972,7 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
|||
s.pendingHourEntriesLock.Unlock()
|
||||
}
|
||||
if len(pendingDateMetricIDs) == 0 {
|
||||
// Fast path - there are no new (date, metricID) entires in rows.
|
||||
// Fast path - there are no new (date, metricID) entries in rows.
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -425,7 +425,7 @@ func (tb *table) retentionWatcher() {
|
|||
continue
|
||||
}
|
||||
|
||||
// There are paritions to drop. Drop them.
|
||||
// There are partitions to drop. Drop them.
|
||||
|
||||
// Remove table references from partitions, so they will be eventually
|
||||
// closed and dropped after all the pending searches are done.
|
||||
|
|
|
@ -51,7 +51,7 @@ func timestampToPartitionName(timestamp int64) string {
|
|||
return t.Format("2006_01")
|
||||
}
|
||||
|
||||
// fromPartitionName initializes tr from the given parition name.
|
||||
// fromPartitionName initializes tr from the given partition name.
|
||||
func (tr *TimeRange) fromPartitionName(name string) error {
|
||||
t, err := time.Parse("2006_01", name)
|
||||
if err != nil {
|
||||
|
|
|
@ -199,7 +199,7 @@ type aggregator struct {
|
|||
|
||||
// suffix contains a suffix, which should be added to aggregate metric names
|
||||
//
|
||||
// It contains the interval, lables in (by, without), plus output name.
|
||||
// It contains the interval, labels in (by, without), plus output name.
|
||||
// For example, foo_bar metric name is transformed to foo_bar:1m_by_job
|
||||
// for `interval: 1m`, `by: [job]`
|
||||
suffix string
|
||||
|
|
Loading…
Reference in a new issue