mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-30 15:22:07 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
549d430907
214 changed files with 19548 additions and 11833 deletions
2
Makefile
2
Makefile
|
@ -29,7 +29,7 @@ all: \
|
|||
clean:
|
||||
rm -rf bin/*
|
||||
|
||||
publish: \
|
||||
publish: docker-scan \
|
||||
publish-victoria-metrics \
|
||||
publish-vmagent \
|
||||
publish-vmalert \
|
||||
|
|
31
README.md
31
README.md
|
@ -66,6 +66,7 @@ VictoriaMetrics has the following prominent features:
|
|||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
|
||||
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/products/enterprise/).
|
||||
* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
* It can store data on [NFS-based storages](https://en.wikipedia.org/wiki/Network_File_System) such as [Amazon EFS](https://aws.amazon.com/efs/) and [Google Filestore](https://cloud.google.com/filestore).
|
||||
|
||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
|
@ -1634,16 +1635,32 @@ See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting
|
|||
|
||||
## Push metrics
|
||||
|
||||
All the VictoriaMetrics apps support pushing their metrics exposed at `/metrics` page to remote storage in Prometheus text exposition format. This can be done by specifying the following command-line flags:
|
||||
All the VictoriaMetrics components support pushing their metrics exposed at `/metrics` page to remote storage in Prometheus text exposition format.
|
||||
This functionality may be used instead of [classic Prometheus-like metrics scraping](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
|
||||
if VictoriaMetrics components are located in isolated networks, so they cannot be scraped by local [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
|
||||
* `-pushmetrics.url` - the url to push metrics to. For example, `-pushmetrics.url=http://victoria-metrics:8428/api/v1/import/prometheus` instructs to push internal metrics to `/api/v1/import/prometheus` endpoint according to [these docs](#how-to-import-data-in-prometheus-exposition-format). The `-pushmetrics.url` can be specified multiple times. In this case metrics are pushed to all the specified urls. The url can contain basic auth params in the form http://user:pass@hostname/api/v1/import/prometheus .
|
||||
The following command-line flags are related to pushing metrics from VictoriaMetrics components:
|
||||
|
||||
* `-pushmetrics.url` - the url to push metrics to. For example, `-pushmetrics.url=http://victoria-metrics:8428/api/v1/import/prometheus` instructs
|
||||
to push internal metrics to `/api/v1/import/prometheus` endpoint according to [these docs](#how-to-import-data-in-prometheus-exposition-format).
|
||||
The `-pushmetrics.url` can be specified multiple times. In this case metrics are pushed to all the specified urls.
|
||||
The url can contain basic auth params in the form `http://user:pass@hostname/api/v1/import/prometheus`.
|
||||
Metrics are pushed to the provided `-pushmetrics.url` in a compressed form with `Content-Encoding: gzip` request header.
|
||||
This allows reducing the required network bandwidth for metrics push.
|
||||
* `-pushmetrics.extraLabel` - labels to add to all the metrics before sending them to `-pushmetrics.url`. Each label must be specified in the format `label="value"`.
|
||||
It is OK to specify multiple `-pushmetrics.extraLabel` command-line flags. In this case all the specified labels
|
||||
are added to all the metrics before sending them to all the configured `-pushmetrics.url` addresses.
|
||||
* `-pushmetrics.interval` - the interval between pushes. By default it is set to 10 seconds.
|
||||
* `-pushmetrics.extraLabel` - label to add to all the metrics before sending them to `-pushmetrics.url`. The label must be specified in the format `label="value"`. It is OK to specify multiple `-pushmetrics.extraLabel` command-line flags. In this case all the specified labels are added to all the metrics sending them to `-pushmetrics.url`.
|
||||
|
||||
For example, the following command instructs VictoriaMetrics to push metrics from `/metrics` page to `https://maas.victoriametrics.com/api/v1/import/prometheus` with `user:pass` [Basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication). The `instance="foobar"` and `job="vm"` labels are added to all the metrics before sending them to the remote storage:
|
||||
For example, the following command instructs VictoriaMetrics to push metrics from `/metrics` page to `https://maas.victoriametrics.com/api/v1/import/prometheus`
|
||||
with `user:pass` [Basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication). The `instance="foobar"` and `job="vm"` labels
|
||||
are added to all the metrics before sending them to the remote storage:
|
||||
|
||||
```console
|
||||
/path/to/victoria-metrics -pushmetrics.url=https://user:pass@maas.victoriametrics.com/api/v1/import/prometheus -pushmetrics.extraLabel='instance="foobar",job="vm"'
|
||||
/path/to/victoria-metrics \
|
||||
-pushmetrics.url=https://user:pass@maas.victoriametrics.com/api/v1/import/prometheus \
|
||||
-pushmetrics.extraLabel='instance="foobar"' \
|
||||
-pushmetrics.extraLabel='job="vm"'
|
||||
```
|
||||
|
||||
## Cache removal
|
||||
|
@ -2100,8 +2117,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay
|
||||
-promscrape.suppressScrapeErrorsDelay duration
|
||||
The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -44,9 +44,9 @@ func main() {
|
|||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
pushmetrics.Init()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
if promscrape.IsDryRun() {
|
||||
*dryRun = true
|
||||
|
|
|
@ -1079,8 +1079,8 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay
|
||||
-promscrape.suppressScrapeErrorsDelay duration
|
||||
The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -77,10 +77,10 @@ func main() {
|
|||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
pushmetrics.Init()
|
||||
remotewrite.InitSecretFlags()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
if promscrape.IsDryRun() {
|
||||
if err := promscrape.CheckConfig(); err != nil {
|
||||
|
|
|
@ -799,8 +799,8 @@ The shortlist of configuration flags is the following:
|
|||
The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s)
|
||||
-promscrape.dnsSDCheckInterval duration
|
||||
Interval for checking for changes in dns. This works only if dns_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config for details (default 30s)
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
|
||||
// AlertingRule is basic alert entity
|
||||
type AlertingRule struct {
|
||||
Type datasource.Type
|
||||
Type config.Type
|
||||
RuleID uint64
|
||||
Name string
|
||||
Expr string
|
||||
|
@ -72,7 +72,7 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
|
|||
GroupName: group.Name,
|
||||
EvalInterval: group.Interval,
|
||||
q: qb.BuildWithParams(datasource.QuerierParams{
|
||||
DataSourceType: &group.Type,
|
||||
DataSourceType: group.Type.String(),
|
||||
EvaluationInterval: group.Interval,
|
||||
QueryParams: group.Params,
|
||||
Headers: group.Headers,
|
||||
|
|
|
@ -12,8 +12,6 @@ import (
|
|||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
|
@ -23,7 +21,7 @@ import (
|
|||
// Group contains list of Rules grouped into
|
||||
// entity with one name and evaluation interval
|
||||
type Group struct {
|
||||
Type datasource.Type `yaml:"type,omitempty"`
|
||||
Type Type `yaml:"type,omitempty"`
|
||||
File string
|
||||
Name string `yaml:"name"`
|
||||
Interval *promutils.Duration `yaml:"interval,omitempty"`
|
||||
|
@ -39,7 +37,7 @@ type Group struct {
|
|||
// Optional HTTP URL parameters added to each rule request
|
||||
Params url.Values `yaml:"params"`
|
||||
// Headers contains optional HTTP headers added to each rule request
|
||||
Headers []datasource.Header `yaml:"headers,omitempty"`
|
||||
Headers []Header `yaml:"headers,omitempty"`
|
||||
|
||||
// Catches all undefined fields and must be empty after parsing.
|
||||
XXX map[string]interface{} `yaml:",inline"`
|
||||
|
@ -57,7 +55,7 @@ func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
// change default value to prometheus datasource.
|
||||
if g.Type.Get() == "" {
|
||||
g.Type.Set(datasource.NewPrometheusType())
|
||||
g.Type.Set(NewPrometheusType())
|
||||
}
|
||||
|
||||
h := md5.New()
|
||||
|
@ -67,7 +65,7 @@ func (g *Group) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
}
|
||||
|
||||
// Validate check for internal Group or Rule configuration errors
|
||||
func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
||||
func (g *Group) Validate(validateTplFn ValidateTplFn, validateExpressions bool) error {
|
||||
if g.Name == "" {
|
||||
return fmt.Errorf("group name must be set")
|
||||
}
|
||||
|
@ -93,11 +91,11 @@ func (g *Group) Validate(validateAnnotations, validateExpressions bool) error {
|
|||
return fmt.Errorf("invalid expression for rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
}
|
||||
if validateAnnotations {
|
||||
if err := notifier.ValidateTemplates(r.Annotations); err != nil {
|
||||
if validateTplFn != nil {
|
||||
if err := validateTplFn(r.Annotations); err != nil {
|
||||
return fmt.Errorf("invalid annotations for rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
if err := notifier.ValidateTemplates(r.Labels); err != nil {
|
||||
if err := validateTplFn(r.Labels); err != nil {
|
||||
return fmt.Errorf("invalid labels for rule %q.%q: %w", g.Name, ruleName, err)
|
||||
}
|
||||
}
|
||||
|
@ -170,8 +168,11 @@ func (r *Rule) Validate() error {
|
|||
return checkOverflow(r.XXX, "rule")
|
||||
}
|
||||
|
||||
// ValidateTplFn must validate the given annotations
|
||||
type ValidateTplFn func(annotations map[string]string) error
|
||||
|
||||
// Parse parses rule configs from given file patterns
|
||||
func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool) ([]Group, error) {
|
||||
func Parse(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressions bool) ([]Group, error) {
|
||||
var fp []string
|
||||
for _, pattern := range pathPatterns {
|
||||
matches, err := filepath.Glob(pattern)
|
||||
|
@ -190,7 +191,7 @@ func Parse(pathPatterns []string, validateAnnotations, validateExpressions bool)
|
|||
continue
|
||||
}
|
||||
for _, g := range gr {
|
||||
if err := g.Validate(validateAnnotations, validateExpressions); err != nil {
|
||||
if err := g.Validate(validateTplFn, validateExpressions); err != nil {
|
||||
errGroup.Add(fmt.Errorf("invalid group %q in file %q: %w", g.Name, file, err))
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
)
|
||||
|
@ -22,7 +22,7 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestParseGood(t *testing.T) {
|
||||
if _, err := Parse([]string{"testdata/rules/*good.rules", "testdata/dir/*good.*"}, true, true); err != nil {
|
||||
if _, err := Parse([]string{"testdata/rules/*good.rules", "testdata/dir/*good.*"}, notifier.ValidateTemplates, true); err != nil {
|
||||
t.Errorf("error parsing files %s", err)
|
||||
}
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ func TestParseBad(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
_, err := Parse(tc.path, true, true)
|
||||
_, err := Parse(tc.path, notifier.ValidateTemplates, true)
|
||||
if err == nil {
|
||||
t.Errorf("expected to get error")
|
||||
return
|
||||
|
@ -224,7 +224,7 @@ func TestGroup_Validate(t *testing.T) {
|
|||
},
|
||||
{
|
||||
group: &Group{Name: "test thanos",
|
||||
Type: datasource.NewRawType("thanos"),
|
||||
Type: NewRawType("thanos"),
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
"description": "{{ value|query }}",
|
||||
|
@ -236,7 +236,7 @@ func TestGroup_Validate(t *testing.T) {
|
|||
},
|
||||
{
|
||||
group: &Group{Name: "test graphite",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
Type: NewGraphiteType(),
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
"description": "some-description",
|
||||
|
@ -248,7 +248,7 @@ func TestGroup_Validate(t *testing.T) {
|
|||
},
|
||||
{
|
||||
group: &Group{Name: "test prometheus",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Type: NewPrometheusType(),
|
||||
Rules: []Rule{
|
||||
{Alert: "alert", Expr: "up == 1", Labels: map[string]string{
|
||||
"description": "{{ value|query }}",
|
||||
|
@ -261,7 +261,7 @@ func TestGroup_Validate(t *testing.T) {
|
|||
{
|
||||
group: &Group{
|
||||
Name: "test graphite inherit",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
Type: NewGraphiteType(),
|
||||
Rules: []Rule{
|
||||
{
|
||||
Expr: "sumSeries(time('foo.bar',10))",
|
||||
|
@ -276,7 +276,7 @@ func TestGroup_Validate(t *testing.T) {
|
|||
{
|
||||
group: &Group{
|
||||
Name: "test graphite prometheus bad expr",
|
||||
Type: datasource.NewGraphiteType(),
|
||||
Type: NewGraphiteType(),
|
||||
Rules: []Rule{
|
||||
{
|
||||
Expr: "sum(up == 0 ) by (host)",
|
||||
|
@ -290,8 +290,13 @@ func TestGroup_Validate(t *testing.T) {
|
|||
expErr: "invalid rule",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
err := tc.group.Validate(tc.validateAnnotations, tc.validateExpressions)
|
||||
var validateTplFn ValidateTplFn
|
||||
if tc.validateAnnotations {
|
||||
validateTplFn = notifier.ValidateTemplates
|
||||
}
|
||||
err := tc.group.Validate(validateTplFn, tc.validateExpressions)
|
||||
if err == nil {
|
||||
if tc.expErr != "" {
|
||||
t.Errorf("expected to get err %q; got nil insted", tc.expErr)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package datasource
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -10,45 +10,45 @@ import (
|
|||
|
||||
// Type represents data source type
|
||||
type Type struct {
|
||||
name string
|
||||
Name string
|
||||
}
|
||||
|
||||
// NewPrometheusType returns prometheus datasource type
|
||||
func NewPrometheusType() Type {
|
||||
return Type{
|
||||
name: "prometheus",
|
||||
Name: "prometheus",
|
||||
}
|
||||
}
|
||||
|
||||
// NewGraphiteType returns graphite datasource type
|
||||
func NewGraphiteType() Type {
|
||||
return Type{
|
||||
name: "graphite",
|
||||
Name: "graphite",
|
||||
}
|
||||
}
|
||||
|
||||
// NewRawType returns datasource type from raw string
|
||||
// without validation.
|
||||
func NewRawType(d string) Type {
|
||||
return Type{name: d}
|
||||
return Type{Name: d}
|
||||
}
|
||||
|
||||
// Get returns datasource type
|
||||
func (t *Type) Get() string {
|
||||
return t.name
|
||||
return t.Name
|
||||
}
|
||||
|
||||
// Set changes datasource type
|
||||
func (t *Type) Set(d Type) {
|
||||
t.name = d.name
|
||||
t.Name = d.Name
|
||||
}
|
||||
|
||||
// String implements String interface with default value.
|
||||
func (t Type) String() string {
|
||||
if t.name == "" {
|
||||
if t.Name == "" {
|
||||
return "prometheus"
|
||||
}
|
||||
return t.name
|
||||
return t.Name
|
||||
}
|
||||
|
||||
// ValidateExpr validates query expression with datasource ql.
|
||||
|
@ -63,7 +63,7 @@ func (t *Type) ValidateExpr(expr string) error {
|
|||
return fmt.Errorf("bad prometheus expr: %q, err: %w", expr, err)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown datasource type=%q", t.name)
|
||||
return fmt.Errorf("unknown datasource type=%q", t.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -82,13 +82,13 @@ func (t *Type) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|||
default:
|
||||
return fmt.Errorf("unknown datasource type=%q, want %q or %q", s, "prometheus", "graphite")
|
||||
}
|
||||
t.name = s
|
||||
t.Name = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalYAML implements the yaml.Unmarshaler interface.
|
||||
func (t Type) MarshalYAML() (interface{}, error) {
|
||||
return t.name, nil
|
||||
return t.Name, nil
|
||||
}
|
||||
|
||||
// Header is a Key - Value struct for holding an HTTP header.
|
|
@ -19,10 +19,10 @@ type QuerierBuilder interface {
|
|||
|
||||
// QuerierParams params for Querier.
|
||||
type QuerierParams struct {
|
||||
DataSourceType *Type
|
||||
DataSourceType string
|
||||
EvaluationInterval time.Duration
|
||||
QueryParams url.Values
|
||||
Headers []Header
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// Metric is the basic entity which should be return by datasource
|
||||
|
|
|
@ -97,7 +97,7 @@ func Init(extraParams url.Values) (QuerierBuilder, error) {
|
|||
appendTypePrefix: *appendTypePrefix,
|
||||
lookBack: *lookBack,
|
||||
queryStep: *queryStep,
|
||||
dataSourceType: NewPrometheusType(),
|
||||
dataSourceType: datasourcePrometheus,
|
||||
extraParams: extraParams,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -12,6 +12,20 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
)
|
||||
|
||||
type datasourceType string
|
||||
|
||||
const (
|
||||
datasourcePrometheus datasourceType = "prometheus"
|
||||
datasourceGraphite datasourceType = "graphite"
|
||||
)
|
||||
|
||||
func toDatasourceType(s string) datasourceType {
|
||||
if s == string(datasourceGraphite) {
|
||||
return datasourceGraphite
|
||||
}
|
||||
return datasourcePrometheus
|
||||
}
|
||||
|
||||
// VMStorage represents vmstorage entity with ability to read and write metrics
|
||||
type VMStorage struct {
|
||||
c *http.Client
|
||||
|
@ -21,10 +35,15 @@ type VMStorage struct {
|
|||
lookBack time.Duration
|
||||
queryStep time.Duration
|
||||
|
||||
dataSourceType Type
|
||||
dataSourceType datasourceType
|
||||
evaluationInterval time.Duration
|
||||
extraParams url.Values
|
||||
extraHeaders []Header
|
||||
extraHeaders []keyValue
|
||||
}
|
||||
|
||||
type keyValue struct {
|
||||
key string
|
||||
value string
|
||||
}
|
||||
|
||||
// Clone makes clone of VMStorage, shares http client.
|
||||
|
@ -42,12 +61,15 @@ func (s *VMStorage) Clone() *VMStorage {
|
|||
|
||||
// ApplyParams - changes given querier params.
|
||||
func (s *VMStorage) ApplyParams(params QuerierParams) *VMStorage {
|
||||
if params.DataSourceType != nil {
|
||||
s.dataSourceType = *params.DataSourceType
|
||||
}
|
||||
s.dataSourceType = toDatasourceType(params.DataSourceType)
|
||||
s.evaluationInterval = params.EvaluationInterval
|
||||
s.extraParams = params.QueryParams
|
||||
s.extraHeaders = params.Headers
|
||||
if params.Headers != nil {
|
||||
for key, value := range params.Headers {
|
||||
kv := keyValue{key: key, value: value}
|
||||
s.extraHeaders = append(s.extraHeaders, kv)
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
|
@ -65,7 +87,7 @@ func NewVMStorage(baseURL string, authCfg *promauth.Config, lookBack time.Durati
|
|||
appendTypePrefix: appendTypePrefix,
|
||||
lookBack: lookBack,
|
||||
queryStep: queryStep,
|
||||
dataSourceType: NewPrometheusType(),
|
||||
dataSourceType: datasourcePrometheus,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -76,13 +98,13 @@ func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) ([]Me
|
|||
return nil, err
|
||||
}
|
||||
|
||||
switch s.dataSourceType.String() {
|
||||
case "prometheus":
|
||||
switch s.dataSourceType {
|
||||
case "", datasourcePrometheus:
|
||||
s.setPrometheusInstantReqParams(req, query, ts)
|
||||
case "graphite":
|
||||
case datasourceGraphite:
|
||||
s.setGraphiteReqParams(req, query, ts)
|
||||
default:
|
||||
return nil, fmt.Errorf("engine not found: %q", s.dataSourceType.name)
|
||||
return nil, fmt.Errorf("engine not found: %q", s.dataSourceType)
|
||||
}
|
||||
|
||||
resp, err := s.do(ctx, req)
|
||||
|
@ -94,7 +116,7 @@ func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) ([]Me
|
|||
}()
|
||||
|
||||
parseFn := parsePrometheusResponse
|
||||
if s.dataSourceType.name != "prometheus" {
|
||||
if s.dataSourceType != datasourcePrometheus {
|
||||
parseFn = parseGraphiteResponse
|
||||
}
|
||||
return parseFn(req, resp)
|
||||
|
@ -104,8 +126,8 @@ func (s *VMStorage) Query(ctx context.Context, query string, ts time.Time) ([]Me
|
|||
// For Prometheus type see https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries
|
||||
// Graphite type isn't supported.
|
||||
func (s *VMStorage) QueryRange(ctx context.Context, query string, start, end time.Time) ([]Metric, error) {
|
||||
if s.dataSourceType.name != "prometheus" {
|
||||
return nil, fmt.Errorf("%q is not supported for QueryRange", s.dataSourceType.name)
|
||||
if s.dataSourceType != datasourcePrometheus {
|
||||
return nil, fmt.Errorf("%q is not supported for QueryRange", s.dataSourceType)
|
||||
}
|
||||
req, err := s.newRequestPOST()
|
||||
if err != nil {
|
||||
|
@ -151,7 +173,7 @@ func (s *VMStorage) newRequestPOST() (*http.Request, error) {
|
|||
s.authCfg.SetHeaders(req, true)
|
||||
}
|
||||
for _, h := range s.extraHeaders {
|
||||
req.Header.Set(h.Key, h.Value)
|
||||
req.Header.Set(h.key, h.value)
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
|
|
@ -89,8 +89,8 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
}
|
||||
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client())
|
||||
|
||||
p := NewPrometheusType()
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: &p, EvaluationInterval: 15 * time.Second})
|
||||
p := datasourcePrometheus
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(p), EvaluationInterval: 15 * time.Second})
|
||||
ts := time.Now()
|
||||
|
||||
expErr := func(err string) {
|
||||
|
@ -146,8 +146,7 @@ func TestVMInstantQuery(t *testing.T) {
|
|||
t.Fatalf("unexpected metric %+v want %+v", m, expected)
|
||||
}
|
||||
|
||||
g := NewGraphiteType()
|
||||
gq := s.BuildWithParams(QuerierParams{DataSourceType: &g})
|
||||
gq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceGraphite)})
|
||||
|
||||
m, err = gq.Query(ctx, queryRender, ts) // 8 - graphite
|
||||
if err != nil {
|
||||
|
@ -212,8 +211,7 @@ func TestVMRangeQuery(t *testing.T) {
|
|||
}
|
||||
s := NewVMStorage(srv.URL, authCfg, time.Minute, 0, false, srv.Client())
|
||||
|
||||
p := NewPrometheusType()
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: &p, EvaluationInterval: 15 * time.Second})
|
||||
pq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourcePrometheus), EvaluationInterval: 15 * time.Second})
|
||||
|
||||
_, err = pq.QueryRange(ctx, query, time.Now(), time.Time{})
|
||||
expectError(t, err, "is missing")
|
||||
|
@ -239,8 +237,7 @@ func TestVMRangeQuery(t *testing.T) {
|
|||
t.Fatalf("unexpected metric %+v want %+v", m[0], expected)
|
||||
}
|
||||
|
||||
g := NewGraphiteType()
|
||||
gq := s.BuildWithParams(QuerierParams{DataSourceType: &g})
|
||||
gq := s.BuildWithParams(QuerierParams{DataSourceType: string(datasourceGraphite)})
|
||||
|
||||
_, err = gq.QueryRange(ctx, queryRender, start, end)
|
||||
expectError(t, err, "is not supported")
|
||||
|
@ -263,7 +260,7 @@ func TestRequestParams(t *testing.T) {
|
|||
"prometheus path",
|
||||
false,
|
||||
&VMStorage{
|
||||
dataSourceType: NewPrometheusType(),
|
||||
dataSourceType: datasourcePrometheus,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
checkEqualString(t, "/api/v1/query", r.URL.Path)
|
||||
|
@ -273,7 +270,7 @@ func TestRequestParams(t *testing.T) {
|
|||
"prometheus prefix",
|
||||
false,
|
||||
&VMStorage{
|
||||
dataSourceType: NewPrometheusType(),
|
||||
dataSourceType: datasourcePrometheus,
|
||||
appendTypePrefix: true,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
|
@ -284,7 +281,7 @@ func TestRequestParams(t *testing.T) {
|
|||
"prometheus range path",
|
||||
true,
|
||||
&VMStorage{
|
||||
dataSourceType: NewPrometheusType(),
|
||||
dataSourceType: datasourcePrometheus,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
checkEqualString(t, "/api/v1/query_range", r.URL.Path)
|
||||
|
@ -294,7 +291,7 @@ func TestRequestParams(t *testing.T) {
|
|||
"prometheus range prefix",
|
||||
true,
|
||||
&VMStorage{
|
||||
dataSourceType: NewPrometheusType(),
|
||||
dataSourceType: datasourcePrometheus,
|
||||
appendTypePrefix: true,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
|
@ -305,7 +302,7 @@ func TestRequestParams(t *testing.T) {
|
|||
"graphite path",
|
||||
false,
|
||||
&VMStorage{
|
||||
dataSourceType: NewGraphiteType(),
|
||||
dataSourceType: datasourceGraphite,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
checkEqualString(t, graphitePath, r.URL.Path)
|
||||
|
@ -315,7 +312,7 @@ func TestRequestParams(t *testing.T) {
|
|||
"graphite prefix",
|
||||
false,
|
||||
&VMStorage{
|
||||
dataSourceType: NewGraphiteType(),
|
||||
dataSourceType: datasourceGraphite,
|
||||
appendTypePrefix: true,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
|
@ -453,7 +450,7 @@ func TestRequestParams(t *testing.T) {
|
|||
"graphite extra params",
|
||||
false,
|
||||
&VMStorage{
|
||||
dataSourceType: NewGraphiteType(),
|
||||
dataSourceType: datasourceGraphite,
|
||||
extraParams: url.Values{
|
||||
"nocache": {"1"},
|
||||
"max_lookback": {"1h"},
|
||||
|
@ -472,14 +469,14 @@ func TestRequestParams(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
switch tc.vm.dataSourceType.String() {
|
||||
case "prometheus":
|
||||
switch tc.vm.dataSourceType {
|
||||
case "", datasourcePrometheus:
|
||||
if tc.queryRange {
|
||||
tc.vm.setPrometheusRangeReqParams(req, query, timestamp, timestamp)
|
||||
} else {
|
||||
tc.vm.setPrometheusInstantReqParams(req, query, timestamp)
|
||||
}
|
||||
case "graphite":
|
||||
case datasourceGraphite:
|
||||
tc.vm.setGraphiteReqParams(req, query, timestamp)
|
||||
}
|
||||
tc.checkFn(t, req)
|
||||
|
@ -530,9 +527,9 @@ func TestHeaders(t *testing.T) {
|
|||
{
|
||||
name: "custom extraHeaders",
|
||||
vmFn: func() *VMStorage {
|
||||
return &VMStorage{extraHeaders: []Header{
|
||||
{Key: "Foo", Value: "bar"},
|
||||
{Key: "Baz", Value: "qux"},
|
||||
return &VMStorage{extraHeaders: []keyValue{
|
||||
{key: "Foo", value: "bar"},
|
||||
{key: "Baz", value: "qux"},
|
||||
}}
|
||||
},
|
||||
checkFn: func(t *testing.T, r *http.Request) {
|
||||
|
@ -551,8 +548,8 @@ func TestHeaders(t *testing.T) {
|
|||
}
|
||||
return &VMStorage{
|
||||
authCfg: cfg,
|
||||
extraHeaders: []Header{
|
||||
{Key: "Authorization", Value: "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="},
|
||||
extraHeaders: []keyValue{
|
||||
{key: "Authorization", value: "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ=="},
|
||||
}}
|
||||
},
|
||||
checkFn: func(t *testing.T, r *http.Request) {
|
||||
|
|
|
@ -28,7 +28,7 @@ type Group struct {
|
|||
Name string
|
||||
File string
|
||||
Rules []Rule
|
||||
Type datasource.Type
|
||||
Type config.Type
|
||||
Interval time.Duration
|
||||
Limit int
|
||||
Concurrency int
|
||||
|
@ -37,7 +37,7 @@ type Group struct {
|
|||
|
||||
Labels map[string]string
|
||||
Params url.Values
|
||||
Headers []datasource.Header
|
||||
Headers map[string]string
|
||||
|
||||
doneCh chan struct{}
|
||||
finishedCh chan struct{}
|
||||
|
@ -97,7 +97,7 @@ func newGroup(cfg config.Group, qb datasource.QuerierBuilder, defaultInterval ti
|
|||
Concurrency: cfg.Concurrency,
|
||||
Checksum: cfg.Checksum,
|
||||
Params: cfg.Params,
|
||||
Headers: cfg.Headers,
|
||||
Headers: make(map[string]string),
|
||||
Labels: cfg.Labels,
|
||||
|
||||
doneCh: make(chan struct{}),
|
||||
|
@ -110,6 +110,9 @@ func newGroup(cfg config.Group, qb datasource.QuerierBuilder, defaultInterval ti
|
|||
if g.Concurrency < 1 {
|
||||
g.Concurrency = 1
|
||||
}
|
||||
for _, h := range cfg.Headers {
|
||||
g.Headers[h.Key] = h.Value
|
||||
}
|
||||
g.metrics = newGroupMetrics(g)
|
||||
rules := make([]Rule, len(cfg.Rules))
|
||||
for i, r := range cfg.Rules {
|
||||
|
|
|
@ -157,7 +157,7 @@ func TestUpdateWith(t *testing.T) {
|
|||
|
||||
func TestGroupStart(t *testing.T) {
|
||||
// TODO: make parsing from string instead of file
|
||||
groups, err := config.Parse([]string{"config/testdata/rules/rules1-good.rules"}, true, true)
|
||||
groups, err := config.Parse([]string{"config/testdata/rules/rules1-good.rules"}, notifier.ValidateTemplates, true)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse rules: %s", err)
|
||||
}
|
||||
|
|
|
@ -80,16 +80,17 @@ func main() {
|
|||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
pushmetrics.Init()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
err := templates.Load(*ruleTemplatesPath, true)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to parse %q: %s", *ruleTemplatesPath, err)
|
||||
}
|
||||
|
||||
if *dryRun {
|
||||
groups, err := config.Parse(*rulePath, true, true)
|
||||
groups, err := config.Parse(*rulePath, notifier.ValidateTemplates, true)
|
||||
if err != nil {
|
||||
logger.Fatalf("failed to parse %q: %s", *rulePath, err)
|
||||
}
|
||||
|
@ -109,6 +110,11 @@ func main() {
|
|||
logger.Fatalf("failed to init `external.alert.source`: %s", err)
|
||||
}
|
||||
|
||||
var validateTplFn config.ValidateTplFn
|
||||
if *validateTemplates {
|
||||
validateTplFn = notifier.ValidateTemplates
|
||||
}
|
||||
|
||||
if *replayFrom != "" || *replayTo != "" {
|
||||
rw, err := remotewrite.Init(context.Background())
|
||||
if err != nil {
|
||||
|
@ -117,7 +123,7 @@ func main() {
|
|||
if rw == nil {
|
||||
logger.Fatalf("remoteWrite.url can't be empty in replay mode")
|
||||
}
|
||||
groupsCfg, err := config.Parse(*rulePath, *validateTemplates, *validateExpressions)
|
||||
groupsCfg, err := config.Parse(*rulePath, validateTplFn, *validateExpressions)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse configuration file: %s", err)
|
||||
}
|
||||
|
@ -139,7 +145,7 @@ func main() {
|
|||
logger.Fatalf("failed to init: %s", err)
|
||||
}
|
||||
logger.Infof("reading rules configuration file from %q", strings.Join(*rulePath, ";"))
|
||||
groupsCfg, err := config.Parse(*rulePath, *validateTemplates, *validateExpressions)
|
||||
groupsCfg, err := config.Parse(*rulePath, validateTplFn, *validateExpressions)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot parse configuration file: %s", err)
|
||||
}
|
||||
|
@ -284,6 +290,11 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
|
|||
defer ticker.Stop()
|
||||
}
|
||||
|
||||
var validateTplFn config.ValidateTplFn
|
||||
if *validateTemplates {
|
||||
validateTplFn = notifier.ValidateTemplates
|
||||
}
|
||||
|
||||
// init reload metrics with positive values to improve alerting conditions
|
||||
configSuccess.Set(1)
|
||||
configTimestamp.Set(fasttime.UnixTimestamp())
|
||||
|
@ -313,7 +324,7 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
|
|||
logger.Errorf("failed to load new templates: %s", err)
|
||||
continue
|
||||
}
|
||||
newGroupsCfg, err := config.Parse(*rulePath, *validateTemplates, *validateExpressions)
|
||||
newGroupsCfg, err := config.Parse(*rulePath, validateTplFn, *validateExpressions)
|
||||
if err != nil {
|
||||
configReloadErrors.Inc()
|
||||
configSuccess.Set(0)
|
||||
|
|
|
@ -200,13 +200,22 @@ func urlValuesToStrings(values url.Values) []string {
|
|||
return res
|
||||
}
|
||||
|
||||
func headersToStrings(headers []datasource.Header) []string {
|
||||
func headersToStrings(headers map[string]string) []string {
|
||||
if len(headers) < 1 {
|
||||
return nil
|
||||
}
|
||||
var res []string
|
||||
for _, h := range headers {
|
||||
res = append(res, fmt.Sprintf("%s: %s", h.Key, h.Value))
|
||||
|
||||
keys := make([]string, 0, len(headers))
|
||||
for k := range headers {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
var res []string
|
||||
for _, k := range keys {
|
||||
v := headers[k]
|
||||
res = append(res, fmt.Sprintf("%s: %s", k, v))
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/datasource"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/remotewrite"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/templates"
|
||||
|
@ -69,7 +68,7 @@ func TestManagerUpdateConcurrent(t *testing.T) {
|
|||
defer wg.Done()
|
||||
for i := 0; i < iterations; i++ {
|
||||
rnd := rand.Intn(len(paths))
|
||||
cfg, err := config.Parse([]string{paths[rnd]}, true, true)
|
||||
cfg, err := config.Parse([]string{paths[rnd]}, notifier.ValidateTemplates, true)
|
||||
if err != nil { // update can fail and this is expected
|
||||
continue
|
||||
}
|
||||
|
@ -132,7 +131,7 @@ func TestManagerUpdate(t *testing.T) {
|
|||
{
|
||||
File: "config/testdata/dir/rules1-good.rules",
|
||||
Name: "duplicatedGroupDiffFiles",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Type: config.NewPrometheusType(),
|
||||
Interval: defaultEvalInterval,
|
||||
Rules: []Rule{
|
||||
&AlertingRule{
|
||||
|
@ -157,14 +156,14 @@ func TestManagerUpdate(t *testing.T) {
|
|||
{
|
||||
File: "config/testdata/rules/rules0-good.rules",
|
||||
Name: "groupGorSingleAlert",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Type: config.NewPrometheusType(),
|
||||
Rules: []Rule{VMRows},
|
||||
Interval: defaultEvalInterval,
|
||||
},
|
||||
{
|
||||
File: "config/testdata/rules/rules0-good.rules",
|
||||
Interval: defaultEvalInterval,
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Type: config.NewPrometheusType(),
|
||||
Name: "TestGroup", Rules: []Rule{
|
||||
Conns,
|
||||
ExampleAlertAlwaysFiring,
|
||||
|
@ -179,7 +178,7 @@ func TestManagerUpdate(t *testing.T) {
|
|||
{
|
||||
File: "config/testdata/rules/rules0-good.rules",
|
||||
Name: "groupGorSingleAlert",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Type: config.NewPrometheusType(),
|
||||
Interval: defaultEvalInterval,
|
||||
Rules: []Rule{VMRows},
|
||||
},
|
||||
|
@ -187,7 +186,7 @@ func TestManagerUpdate(t *testing.T) {
|
|||
File: "config/testdata/rules/rules0-good.rules",
|
||||
Interval: defaultEvalInterval,
|
||||
Name: "TestGroup",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Type: config.NewPrometheusType(),
|
||||
Rules: []Rule{
|
||||
Conns,
|
||||
ExampleAlertAlwaysFiring,
|
||||
|
@ -202,14 +201,14 @@ func TestManagerUpdate(t *testing.T) {
|
|||
{
|
||||
File: "config/testdata/rules/rules0-good.rules",
|
||||
Name: "groupGorSingleAlert",
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Type: config.NewPrometheusType(),
|
||||
Interval: defaultEvalInterval,
|
||||
Rules: []Rule{VMRows},
|
||||
},
|
||||
{
|
||||
File: "config/testdata/rules/rules0-good.rules",
|
||||
Interval: defaultEvalInterval,
|
||||
Type: datasource.NewPrometheusType(),
|
||||
Type: config.NewPrometheusType(),
|
||||
Name: "TestGroup", Rules: []Rule{
|
||||
Conns,
|
||||
ExampleAlertAlwaysFiring,
|
||||
|
@ -232,7 +231,7 @@ func TestManagerUpdate(t *testing.T) {
|
|||
t.Fatalf("failed to complete initial rules update: %s", err)
|
||||
}
|
||||
|
||||
cfgUpdate, err := config.Parse([]string{tc.updatePath}, true, true)
|
||||
cfgUpdate, err := config.Parse([]string{tc.updatePath}, notifier.ValidateTemplates, true)
|
||||
if err == nil { // update can fail and that's expected
|
||||
_ = m.update(ctx, cfgUpdate, false)
|
||||
}
|
||||
|
@ -330,7 +329,11 @@ func TestManagerUpdateNegative(t *testing.T) {
|
|||
|
||||
func loadCfg(t *testing.T, path []string, validateAnnotations, validateExpressions bool) []config.Group {
|
||||
t.Helper()
|
||||
cfg, err := config.Parse(path, validateAnnotations, validateExpressions)
|
||||
var validateTplFn config.ValidateTplFn
|
||||
if validateAnnotations {
|
||||
validateTplFn = notifier.ValidateTemplates
|
||||
}
|
||||
cfg, err := config.Parse(path, validateTplFn, validateExpressions)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
// to evaluate configured Expression and
|
||||
// return TimeSeries as result.
|
||||
type RecordingRule struct {
|
||||
Type datasource.Type
|
||||
Type config.Type
|
||||
RuleID uint64
|
||||
Name string
|
||||
Expr string
|
||||
|
@ -70,7 +70,7 @@ func newRecordingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rul
|
|||
GroupID: group.ID(),
|
||||
metrics: &recordingRuleMetrics{},
|
||||
q: qb.BuildWithParams(datasource.QuerierParams{
|
||||
DataSourceType: &group.Type,
|
||||
DataSourceType: group.Type.String(),
|
||||
EvaluationInterval: group.Interval,
|
||||
QueryParams: group.Params,
|
||||
Headers: group.Headers,
|
||||
|
|
|
@ -160,7 +160,7 @@ func (rh *requestHandler) handler(w http.ResponseWriter, r *http.Request) bool {
|
|||
if strings.HasPrefix(r.URL.Path, "/api/v1/") {
|
||||
redirectURL = alert.APILink()
|
||||
}
|
||||
http.Redirect(w, r, "/"+redirectURL, http.StatusPermanentRedirect)
|
||||
httpserver.RedirectPermanent(w, "/"+redirectURL)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -288,8 +288,8 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -34,9 +34,10 @@ func main() {
|
|||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
pushmetrics.Init()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
logger.Infof("starting vmauth at %q...", *httpListenAddr)
|
||||
startTime := time.Now()
|
||||
initAuthConfig()
|
||||
|
|
|
@ -239,8 +239,8 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -42,9 +42,9 @@ func main() {
|
|||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
pushmetrics.Init()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
if len(*snapshotCreateURL) > 0 {
|
||||
if len(*snapshotName) > 0 {
|
||||
|
|
|
@ -238,8 +238,8 @@ vmbackupmanager performs regular backups according to the provided configs.
|
|||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -35,7 +35,7 @@ jwt token must be in following format:
|
|||
"team": "dev",
|
||||
"project": "mobile"
|
||||
},
|
||||
"extra_filters": ["{env~=\"prod|dev\",team!=\"test\"}"],
|
||||
"extra_filters": ["{env=~\"prod|dev\",team!=\"test\"}"],
|
||||
"mode": 1
|
||||
}
|
||||
}
|
||||
|
@ -281,8 +281,8 @@ The shortlist of configuration flags include the following:
|
|||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -141,8 +141,8 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
|||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -34,9 +34,9 @@ func main() {
|
|||
flag.CommandLine.SetOutput(os.Stdout)
|
||||
flag.Usage = usage
|
||||
envflag.Parse()
|
||||
pushmetrics.Init()
|
||||
buildinfo.Init()
|
||||
logger.Init()
|
||||
pushmetrics.Init()
|
||||
|
||||
go httpserver.Serve(*httpListenAddr, nil)
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
_ = r.ParseForm()
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
newURL := path + "/?" + r.Form.Encode()
|
||||
http.Redirect(w, r, newURL, http.StatusMovedPermanently)
|
||||
httpserver.RedirectPermanent(w, newURL)
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmui/") {
|
||||
|
@ -217,7 +217,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
// vmalert access via incomplete url without `/` in the end. Redirecto to complete url.
|
||||
// Use relative redirect, since, since the hostname and path prefix may be incorrect if VictoriaMetrics
|
||||
// is hidden behind vmauth or similar proxy.
|
||||
http.Redirect(w, r, "vmalert/", http.StatusMovedPermanently)
|
||||
httpserver.RedirectPermanent(w, "vmalert/")
|
||||
return true
|
||||
}
|
||||
if strings.HasPrefix(path, "/vmalert/") {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"regexp"
|
||||
"sort"
|
||||
"sync"
|
||||
|
@ -81,7 +82,7 @@ type timeseriesWork struct {
|
|||
rss *Results
|
||||
pts *packedTimeseries
|
||||
f func(rs *Result, workerID uint) error
|
||||
doneCh chan error
|
||||
err error
|
||||
|
||||
rowsProcessed int
|
||||
}
|
||||
|
@ -91,18 +92,14 @@ func (tsw *timeseriesWork) reset() {
|
|||
tsw.rss = nil
|
||||
tsw.pts = nil
|
||||
tsw.f = nil
|
||||
if n := len(tsw.doneCh); n > 0 {
|
||||
logger.Panicf("BUG: tsw.doneCh must be empty during reset; it contains %d items instead", n)
|
||||
}
|
||||
tsw.err = nil
|
||||
tsw.rowsProcessed = 0
|
||||
}
|
||||
|
||||
func getTimeseriesWork() *timeseriesWork {
|
||||
v := tswPool.Get()
|
||||
if v == nil {
|
||||
v = ×eriesWork{
|
||||
doneCh: make(chan error, 1),
|
||||
}
|
||||
v = ×eriesWork{}
|
||||
}
|
||||
return v.(*timeseriesWork)
|
||||
}
|
||||
|
@ -114,28 +111,6 @@ func putTimeseriesWork(tsw *timeseriesWork) {
|
|||
|
||||
var tswPool sync.Pool
|
||||
|
||||
func scheduleTimeseriesWork(workChs []chan *timeseriesWork, tsw *timeseriesWork) {
|
||||
if len(workChs) == 1 {
|
||||
// Fast path for a single worker
|
||||
workChs[0] <- tsw
|
||||
return
|
||||
}
|
||||
attempts := 0
|
||||
for {
|
||||
idx := fastrand.Uint32n(uint32(len(workChs)))
|
||||
select {
|
||||
case workChs[idx] <- tsw:
|
||||
return
|
||||
default:
|
||||
attempts++
|
||||
if attempts >= len(workChs) {
|
||||
workChs[idx] <- tsw
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tsw *timeseriesWork) do(r *Result, workerID uint) error {
|
||||
if atomic.LoadUint32(tsw.mustStop) != 0 {
|
||||
return nil
|
||||
|
@ -149,25 +124,25 @@ func (tsw *timeseriesWork) do(r *Result, workerID uint) error {
|
|||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
return fmt.Errorf("error during time series unpacking: %w", err)
|
||||
}
|
||||
tsw.rowsProcessed = len(r.Timestamps)
|
||||
if len(r.Timestamps) > 0 {
|
||||
if err := tsw.f(r, workerID); err != nil {
|
||||
atomic.StoreUint32(tsw.mustStop, 1)
|
||||
return err
|
||||
}
|
||||
}
|
||||
tsw.rowsProcessed = len(r.Values)
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeseriesWorker(ch <-chan *timeseriesWork, workerID uint) {
|
||||
func timeseriesWorker(tsws []*timeseriesWork, workerID uint) {
|
||||
v := resultPool.Get()
|
||||
if v == nil {
|
||||
v = &result{}
|
||||
}
|
||||
r := v.(*result)
|
||||
for tsw := range ch {
|
||||
for _, tsw := range tsws {
|
||||
err := tsw.do(&r.rs, workerID)
|
||||
tsw.doneCh <- err
|
||||
tsw.err = err
|
||||
}
|
||||
currentTime := fasttime.UnixTimestamp()
|
||||
if cap(r.rs.Values) > 1024*1024 && 4*len(r.rs.Values) < cap(r.rs.Values) && currentTime-r.lastResetTime > 10 {
|
||||
|
@ -196,31 +171,7 @@ func (rss *Results) RunParallel(qt *querytracer.Tracer, f func(rs *Result, worke
|
|||
qt = qt.NewChild("parallel process of fetched data")
|
||||
defer rss.mustClose()
|
||||
|
||||
// Spin up local workers.
|
||||
//
|
||||
// Do not use a global workChs with a global pool of workers, since it may lead to a deadlock in the following case:
|
||||
// - RunParallel is called with f, which blocks without forward progress.
|
||||
// - All the workers in the global pool became blocked in f.
|
||||
// - workChs is filled up, so it cannot accept new work items from other RunParallel calls.
|
||||
workers := len(rss.packedTimeseries)
|
||||
if workers > gomaxprocs {
|
||||
workers = gomaxprocs
|
||||
}
|
||||
if workers < 1 {
|
||||
workers = 1
|
||||
}
|
||||
workChs := make([]chan *timeseriesWork, workers)
|
||||
var workChsWG sync.WaitGroup
|
||||
for i := 0; i < workers; i++ {
|
||||
workChs[i] = make(chan *timeseriesWork, 16)
|
||||
workChsWG.Add(1)
|
||||
go func(workerID int) {
|
||||
defer workChsWG.Done()
|
||||
timeseriesWorker(workChs[workerID], uint(workerID))
|
||||
}(i)
|
||||
}
|
||||
|
||||
// Feed workers with work.
|
||||
// Prepare work for workers.
|
||||
tsws := make([]*timeseriesWork, len(rss.packedTimeseries))
|
||||
var mustStop uint32
|
||||
for i := range rss.packedTimeseries {
|
||||
|
@ -229,17 +180,50 @@ func (rss *Results) RunParallel(qt *querytracer.Tracer, f func(rs *Result, worke
|
|||
tsw.pts = &rss.packedTimeseries[i]
|
||||
tsw.f = f
|
||||
tsw.mustStop = &mustStop
|
||||
scheduleTimeseriesWork(workChs, tsw)
|
||||
tsws[i] = tsw
|
||||
}
|
||||
seriesProcessedTotal := len(rss.packedTimeseries)
|
||||
rss.packedTimeseries = rss.packedTimeseries[:0]
|
||||
// Shuffle tsws for providing the equal amount of work among workers.
|
||||
r := getRand()
|
||||
r.Shuffle(len(tsws), func(i, j int) {
|
||||
tsws[i], tsws[j] = tsws[j], tsws[i]
|
||||
})
|
||||
putRand(r)
|
||||
|
||||
// Spin up up to gomaxprocs local workers and split work equally among them.
|
||||
// This guarantees linear scalability with the increase of gomaxprocs
|
||||
// (e.g. the number of available CPU cores).
|
||||
workers := len(rss.packedTimeseries)
|
||||
itemsPerWorker := 1
|
||||
if workers > gomaxprocs {
|
||||
itemsPerWorker = 1 + workers/gomaxprocs
|
||||
workers = gomaxprocs
|
||||
}
|
||||
var start int
|
||||
var i uint
|
||||
var wg sync.WaitGroup
|
||||
for start < len(tsws) {
|
||||
end := start + itemsPerWorker
|
||||
if end > len(tsws) {
|
||||
end = len(tsws)
|
||||
}
|
||||
chunk := tsws[start:end]
|
||||
wg.Add(1)
|
||||
go func(tswsChunk []*timeseriesWork, workerID uint) {
|
||||
defer wg.Done()
|
||||
timeseriesWorker(tswsChunk, workerID)
|
||||
}(chunk, i)
|
||||
start = end
|
||||
i++
|
||||
}
|
||||
|
||||
// Wait until work is complete.
|
||||
wg.Wait()
|
||||
|
||||
// Collect results.
|
||||
var firstErr error
|
||||
rowsProcessedTotal := 0
|
||||
for _, tsw := range tsws {
|
||||
if err := <-tsw.doneCh; err != nil && firstErr == nil {
|
||||
if err := tsw.err; err != nil && firstErr == nil {
|
||||
// Return just the first error, since other errors are likely duplicate the first error.
|
||||
firstErr = err
|
||||
}
|
||||
|
@ -248,19 +232,30 @@ func (rss *Results) RunParallel(qt *querytracer.Tracer, f func(rs *Result, worke
|
|||
putTimeseriesWork(tsw)
|
||||
}
|
||||
|
||||
seriesProcessedTotal := len(rss.packedTimeseries)
|
||||
rss.packedTimeseries = rss.packedTimeseries[:0]
|
||||
rowsReadPerQuery.Update(float64(rowsProcessedTotal))
|
||||
seriesReadPerQuery.Update(float64(seriesProcessedTotal))
|
||||
|
||||
// Shut down local workers
|
||||
for _, workCh := range workChs {
|
||||
close(workCh)
|
||||
}
|
||||
workChsWG.Wait()
|
||||
qt.Donef("series=%d, samples=%d", seriesProcessedTotal, rowsProcessedTotal)
|
||||
|
||||
return firstErr
|
||||
}
|
||||
|
||||
var randPool sync.Pool
|
||||
|
||||
func getRand() *rand.Rand {
|
||||
v := randPool.Get()
|
||||
if v == nil {
|
||||
v = rand.New(rand.NewSource(int64(fasttime.UnixTimestamp())))
|
||||
}
|
||||
return v.(*rand.Rand)
|
||||
}
|
||||
|
||||
func putRand(r *rand.Rand) {
|
||||
randPool.Put(r)
|
||||
}
|
||||
|
||||
var (
|
||||
rowsReadPerSeries = metrics.NewHistogram(`vm_rows_read_per_series`)
|
||||
rowsReadPerQuery = metrics.NewHistogram(`vm_rows_read_per_query`)
|
||||
|
|
|
@ -4287,18 +4287,7 @@ func TestExecSuccess(t *testing.T) {
|
|||
t.Run(`prometheus_buckets(zero-vmrange-value)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort(prometheus_buckets(label_set(0, "vmrange", "0...0")))`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0, 0, 0, 0, 0, 0},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{
|
||||
{
|
||||
Key: []byte("le"),
|
||||
Value: []byte("+Inf"),
|
||||
},
|
||||
}
|
||||
resultsExpected := []netstorage.Result{r1}
|
||||
resultsExpected := []netstorage.Result{}
|
||||
f(q, resultsExpected)
|
||||
})
|
||||
t.Run(`prometheus_buckets(valid)`, func(t *testing.T) {
|
||||
|
|
|
@ -557,14 +557,14 @@ func vmrangeBucketsToLE(tss []*timeseries) []*timeseries {
|
|||
prevTs := uniqTs[xs.endStr]
|
||||
if prevTs != nil {
|
||||
// the end of the current bucket is not unique, need to merge it with the existing bucket.
|
||||
mergeNonOverlappingTimeseries(prevTs, xs.ts)
|
||||
_ = mergeNonOverlappingTimeseries(prevTs, xs.ts)
|
||||
} else {
|
||||
xssNew = append(xssNew, xs)
|
||||
uniqTs[xs.endStr] = xs.ts
|
||||
}
|
||||
xsPrev = xs
|
||||
}
|
||||
if !math.IsInf(xsPrev.end, 1) {
|
||||
if !math.IsInf(xsPrev.end, 1) && !isZeroTS(xsPrev.ts) {
|
||||
xssNew = append(xssNew, x{
|
||||
endStr: "+Inf",
|
||||
end: math.Inf(1),
|
||||
|
@ -572,6 +572,9 @@ func vmrangeBucketsToLE(tss []*timeseries) []*timeseries {
|
|||
})
|
||||
}
|
||||
xss = xssNew
|
||||
if len(xss) == 0 {
|
||||
continue
|
||||
}
|
||||
for i := range xss[0].ts.Values {
|
||||
count := float64(0)
|
||||
for _, xs := range xss {
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestFixBrokenBuckets(t *testing.T) {
|
||||
|
@ -32,3 +37,186 @@ func TestFixBrokenBuckets(t *testing.T) {
|
|||
f([]float64{1, 5, 2, nan, 6, 3}, []float64{1, 2, 2, 3, 3, 3})
|
||||
f([]float64{5, 10, 4, 3}, []float64{3, 3, 3, 3})
|
||||
}
|
||||
|
||||
func TestVmrangeBucketsToLE(t *testing.T) {
|
||||
f := func(buckets, bucketsExpected string) {
|
||||
t.Helper()
|
||||
tss := promMetricsToTimeseries(buckets)
|
||||
result := vmrangeBucketsToLE(tss)
|
||||
resultBuckets := timeseriesToPromMetrics(result)
|
||||
if !reflect.DeepEqual(resultBuckets, bucketsExpected) {
|
||||
t.Errorf("unexpected vmrangeBucketsToLE(); got\n%v\nwant\n%v", resultBuckets, bucketsExpected)
|
||||
}
|
||||
}
|
||||
|
||||
// A single non-empty vmrange bucket
|
||||
f(
|
||||
`foo{vmrange="4.084e+02...4.642e+02"} 2 123`,
|
||||
`foo{le="4.084e+02"} 0 123
|
||||
foo{le="4.642e+02"} 2 123
|
||||
foo{le="+Inf"} 2 123`,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="0...+Inf"} 5 123`,
|
||||
`foo{le="+Inf"} 5 123`,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="-Inf...0"} 4 123`,
|
||||
`foo{le="-Inf"} 0 123
|
||||
foo{le="0"} 4 123
|
||||
foo{le="+Inf"} 4 123`,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="-Inf...+Inf"} 1.23 456`,
|
||||
`foo{le="-Inf"} 0 456
|
||||
foo{le="+Inf"} 1.23 456`,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="0...0"} 5.3 0`,
|
||||
`foo{le="0"} 5.3 0
|
||||
foo{le="+Inf"} 5.3 0`,
|
||||
)
|
||||
|
||||
// Multiple non-empty vmrange buckets
|
||||
f(
|
||||
`foo{vmrange="4.084e+02...4.642e+02"} 2 123
|
||||
foo{vmrange="1.234e+02...4.084e+02"} 3 123
|
||||
`,
|
||||
`foo{le="1.234e+02"} 0 123
|
||||
foo{le="4.084e+02"} 3 123
|
||||
foo{le="4.642e+02"} 5 123
|
||||
foo{le="+Inf"} 5 123`,
|
||||
)
|
||||
|
||||
// Multiple disjoint vmrange buckets
|
||||
f(
|
||||
`foo{vmrange="1...2"} 2 123
|
||||
foo{vmrange="4...6"} 3 123
|
||||
`,
|
||||
`foo{le="1"} 0 123
|
||||
foo{le="2"} 2 123
|
||||
foo{le="4"} 2 123
|
||||
foo{le="6"} 5 123
|
||||
foo{le="+Inf"} 5 123`,
|
||||
)
|
||||
|
||||
// Multiple intersected vmrange buckets
|
||||
f(
|
||||
`foo{vmrange="1...5"} 2 123
|
||||
foo{vmrange="4...6"} 3 123
|
||||
`,
|
||||
`foo{le="1"} 0 123
|
||||
foo{le="5"} 2 123
|
||||
foo{le="4"} 2 123
|
||||
foo{le="6"} 5 123
|
||||
foo{le="+Inf"} 5 123`,
|
||||
)
|
||||
|
||||
// Multiple vmrange buckets with the same end range
|
||||
f(
|
||||
`foo{vmrange="1...5"} 2 123
|
||||
foo{vmrange="0...5"} 3 123
|
||||
`,
|
||||
`foo{le="1"} 0 123
|
||||
foo{le="5"} 2 123
|
||||
foo{le="0"} 2 123
|
||||
foo{le="+Inf"} 2 123`,
|
||||
)
|
||||
|
||||
// A single empty vmrange bucket
|
||||
f(
|
||||
`foo{vmrange="0...1"} 0 123`,
|
||||
``,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="0...+Inf"} 0 123`,
|
||||
``,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="-Inf...0"} 0 123`,
|
||||
``,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="0...0"} 0 0`,
|
||||
``,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="-Inf...+Inf"} 0 456`,
|
||||
``,
|
||||
)
|
||||
|
||||
// Multiple empty vmrange buckets
|
||||
f(
|
||||
`foo{vmrange="2...3"} 0 123
|
||||
foo{vmrange="1...2"} 0 123`,
|
||||
``,
|
||||
)
|
||||
|
||||
// The bucket with negative value
|
||||
f(
|
||||
`foo{vmrange="4.084e+02...4.642e+02"} -5 1`,
|
||||
``,
|
||||
)
|
||||
|
||||
// Missing vmrange in the original metric
|
||||
f(
|
||||
`foo 3 6`,
|
||||
``,
|
||||
)
|
||||
|
||||
// Missing le label in the original metric
|
||||
f(
|
||||
`foo{le="456"} 3 6`,
|
||||
`foo{le="456"} 3 6`,
|
||||
)
|
||||
|
||||
// Invalid vmrange label value
|
||||
f(
|
||||
`foo{vmrange="foo...bar"} 1 1`,
|
||||
``,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="4.084e+02"} 1 1`,
|
||||
``,
|
||||
)
|
||||
f(
|
||||
`foo{vmrange="4.084e+02...foo"} 1 1`,
|
||||
``,
|
||||
)
|
||||
}
|
||||
|
||||
func promMetricsToTimeseries(s string) []*timeseries {
|
||||
var rows prometheus.Rows
|
||||
rows.UnmarshalWithErrLogger(s, func(errStr string) {
|
||||
panic(fmt.Errorf("cannot parse %q: %s", s, errStr))
|
||||
})
|
||||
var tss []*timeseries
|
||||
for _, row := range rows.Rows {
|
||||
var tags []storage.Tag
|
||||
for _, tag := range row.Tags {
|
||||
tags = append(tags, storage.Tag{
|
||||
Key: []byte(tag.Key),
|
||||
Value: []byte(tag.Value),
|
||||
})
|
||||
}
|
||||
var ts timeseries
|
||||
ts.MetricName.MetricGroup = []byte(row.Metric)
|
||||
ts.MetricName.Tags = tags
|
||||
ts.Timestamps = append(ts.Timestamps, row.Timestamp/1000)
|
||||
ts.Values = append(ts.Values, row.Value)
|
||||
tss = append(tss, &ts)
|
||||
}
|
||||
return tss
|
||||
}
|
||||
|
||||
func timeseriesToPromMetrics(tss []*timeseries) string {
|
||||
var a []string
|
||||
for _, ts := range tss {
|
||||
metricName := ts.MetricName.String()
|
||||
for i := range ts.Timestamps {
|
||||
line := fmt.Sprintf("%s %v %d", metricName, ts.Values[i], ts.Timestamps[i])
|
||||
a = append(a, line)
|
||||
}
|
||||
}
|
||||
return strings.Join(a, "\n")
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
|
|||
|
||||
ROOT_IMAGE ?= alpine:3.16.1
|
||||
CERTS_IMAGE := alpine:3.16.1
|
||||
GO_BUILDER_IMAGE := golang:1.18.4-alpine
|
||||
GO_BUILDER_IMAGE := golang:1.18.5-alpine
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||
|
||||
|
@ -16,6 +16,9 @@ package-base:
|
|||
--tag $(BASE_IMAGE) \
|
||||
deployment/docker/base
|
||||
|
||||
docker-scan: package-base
|
||||
docker scan --accept-license $(BASE_IMAGE) || (echo "❌ The build has been terminated because critical vulnerabilities were found in $(BASE_IMAGE)"; exit 1)
|
||||
|
||||
package-builder:
|
||||
(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep -q '$(BUILDER_IMAGE)$$') \
|
||||
|| docker build \
|
||||
|
|
|
@ -18,7 +18,7 @@ The recommended filesystem for VictoriaMetrics is [ext4](https://en.wikipedia.or
|
|||
mkfs.ext4 ... -O 64bit,huge_file,extent -T huge
|
||||
```
|
||||
|
||||
VictoriaMetrics should work OK with other filesystems, including network filesystems such as [NFS](https://en.wikipedia.org/wiki/Network_File_System), [Amazon EFS](https://aws.amazon.com/efs/) and [Google Filestore](https://cloud.google.com/filestore).
|
||||
VictoriaMetrics should work OK with other filesystems too.
|
||||
|
||||
## Operation System
|
||||
|
||||
|
|
|
@ -17,13 +17,24 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): allow configuring additional HTTP request headers for `-datasource.url`, `-remoteWrite.url` and `-remoteRead.url` via `-datasource.headers`, `-remoteWrite.headers` and `-remoteRead.headers` command-line flags. Additional HTTP request headers also can be set on group level via `headers` param - see [these docs](https://docs.victoriametrics.com/vmalert.html#groups) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2860).
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): execute left and right sides of certain operations in parallel. For example, `q1 or q2`, `aggr_func(q1) <op> q2`, `q1 <op> aggr_func(q1)`. This may improve query performance if VictoriaMetrics has enough free resources for parallel processing of both sides of the operation. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2886).
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmagent.html): allow duplicate username records with different passwords at configuration file. It should allow password rotation without username change.
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmagent.html): allow multiple sections with duplicate `username` but with different `password` values at `-auth.config` file.
|
||||
* FEATURE: add ability to push internal metrics (e.g. metrics exposed at `/metrics` page) to the configured remote storage from all the VictoriaMetrics components. See [these docs](https://docs.victoriametrics.com/#push-metrics).
|
||||
* FEATURE: improve performance for heavy queries over big number of time series on systems with big number of CPU cores. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2896). Thanks to @zqyzyq for [the idea](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/b596ac3745314fcc170a14e3ded062971cf7ced2).
|
||||
* FEATURE: improve performance for registering new time series in `indexdb` by up to 50%. Thanks to @ahfuzhang for [the issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2249).
|
||||
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): set `up` metric to `0` for partial scrapes in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously the `up` metric was set to `1` when at least a single metric has been scraped before the error. This aligns the behaviour of `vmselect` with Prometheus.
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): restart all the scrape jobs during [config reload](https://docs.victoriametrics.com/vmagent.html#configuration-update) after `global` section is changed inside `-promscrape.config`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly assume role with AWS ECS credentials. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2875). Thanks to @transacid for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2876).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): return series from `q1` if `q2` doesn't return matching time series in the query `q1 ifnot q2`. Previously series from `q1` weren't returned in this case.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly show date picker at `Table` tab. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2874).
|
||||
* BUGFIX: properly generate http redirects if `-http.pathPrefix` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918).
|
||||
|
||||
|
||||
## [v1.79.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.1)
|
||||
|
||||
Released at 02-08-2022
|
||||
|
||||
* SECURITY FIX: upgrade base docker image (alpine) from 3.16.0 to 3.16.1 . See [alpine 3.16.1 release notes](https://alpinelinux.org/posts/Alpine-3.16.1-released.html).
|
||||
|
||||
|
||||
## [v1.79.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.0)
|
||||
|
|
|
@ -648,8 +648,8 @@ Below is the output for `/path/to/vminsert -help`:
|
|||
Trim timestamps for OpenTSDB HTTP data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
@ -786,8 +786,8 @@ Below is the output for `/path/to/vmselect -help`:
|
|||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
@ -978,8 +978,8 @@ Below is the output for `/path/to/vmstorage -help`:
|
|||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-precisionBits int
|
||||
The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss (default 64)
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -66,6 +66,7 @@ VictoriaMetrics has the following prominent features:
|
|||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
|
||||
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/products/enterprise/).
|
||||
* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
* It can store data on [NFS-based storages](https://en.wikipedia.org/wiki/Network_File_System) such as [Amazon EFS](https://aws.amazon.com/efs/) and [Google Filestore](https://cloud.google.com/filestore).
|
||||
|
||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
|
@ -1634,16 +1635,32 @@ See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting
|
|||
|
||||
## Push metrics
|
||||
|
||||
All the VictoriaMetrics apps support pushing their metrics exposed at `/metrics` page to remote storage in Prometheus text exposition format. This can be done by specifying the following command-line flags:
|
||||
All the VictoriaMetrics components support pushing their metrics exposed at `/metrics` page to remote storage in Prometheus text exposition format.
|
||||
This functionality may be used instead of [classic Prometheus-like metrics scraping](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
|
||||
if VictoriaMetrics components are located in isolated networks, so they cannot be scraped by local [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
|
||||
* `-pushmetrics.url` - the url to push metrics to. For example, `-pushmetrics.url=http://victoria-metrics:8428/api/v1/import/prometheus` instructs to push internal metrics to `/api/v1/import/prometheus` endpoint according to [these docs](#how-to-import-data-in-prometheus-exposition-format). The `-pushmetrics.url` can be specified multiple times. In this case metrics are pushed to all the specified urls. The url can contain basic auth params in the form http://user:pass@hostname/api/v1/import/prometheus .
|
||||
The following command-line flags are related to pushing metrics from VictoriaMetrics components:
|
||||
|
||||
* `-pushmetrics.url` - the url to push metrics to. For example, `-pushmetrics.url=http://victoria-metrics:8428/api/v1/import/prometheus` instructs
|
||||
to push internal metrics to `/api/v1/import/prometheus` endpoint according to [these docs](#how-to-import-data-in-prometheus-exposition-format).
|
||||
The `-pushmetrics.url` can be specified multiple times. In this case metrics are pushed to all the specified urls.
|
||||
The url can contain basic auth params in the form `http://user:pass@hostname/api/v1/import/prometheus`.
|
||||
Metrics are pushed to the provided `-pushmetrics.url` in a compressed form with `Content-Encoding: gzip` request header.
|
||||
This allows reducing the required network bandwidth for metrics push.
|
||||
* `-pushmetrics.extraLabel` - labels to add to all the metrics before sending them to `-pushmetrics.url`. Each label must be specified in the format `label="value"`.
|
||||
It is OK to specify multiple `-pushmetrics.extraLabel` command-line flags. In this case all the specified labels
|
||||
are added to all the metrics before sending them to all the configured `-pushmetrics.url` addresses.
|
||||
* `-pushmetrics.interval` - the interval between pushes. By default it is set to 10 seconds.
|
||||
* `-pushmetrics.extraLabel` - label to add to all the metrics before sending them to `-pushmetrics.url`. The label must be specified in the format `label="value"`. It is OK to specify multiple `-pushmetrics.extraLabel` command-line flags. In this case all the specified labels are added to all the metrics sending them to `-pushmetrics.url`.
|
||||
|
||||
For example, the following command instructs VictoriaMetrics to push metrics from `/metrics` page to `https://maas.victoriametrics.com/api/v1/import/prometheus` with `user:pass` [Basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication). The `instance="foobar"` and `job="vm"` labels are added to all the metrics before sending them to the remote storage:
|
||||
For example, the following command instructs VictoriaMetrics to push metrics from `/metrics` page to `https://maas.victoriametrics.com/api/v1/import/prometheus`
|
||||
with `user:pass` [Basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication). The `instance="foobar"` and `job="vm"` labels
|
||||
are added to all the metrics before sending them to the remote storage:
|
||||
|
||||
```console
|
||||
/path/to/victoria-metrics -pushmetrics.url=https://user:pass@maas.victoriametrics.com/api/v1/import/prometheus -pushmetrics.extraLabel='instance="foobar",job="vm"'
|
||||
/path/to/victoria-metrics \
|
||||
-pushmetrics.url=https://user:pass@maas.victoriametrics.com/api/v1/import/prometheus \
|
||||
-pushmetrics.extraLabel='instance="foobar"' \
|
||||
-pushmetrics.extraLabel='job="vm"'
|
||||
```
|
||||
|
||||
## Cache removal
|
||||
|
@ -2100,8 +2117,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay
|
||||
-promscrape.suppressScrapeErrorsDelay duration
|
||||
The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -7,7 +7,8 @@ sort: 18
|
|||
## Release version and Docker images
|
||||
|
||||
0. Make sure that the release commits have no security issues.
|
||||
1. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md).
|
||||
1a. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md).
|
||||
1b. Add `(available in v1.xx.y)` line to feature docs introduced in the upcoming release.
|
||||
2. Create the following release tags:
|
||||
* `git tag -s v1.xx.y` in `master` branch
|
||||
* `git tag -s v1.xx.y-cluster` in `cluster` branch
|
||||
|
|
|
@ -70,6 +70,7 @@ VictoriaMetrics has the following prominent features:
|
|||
* It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and [high churn rate](https://docs.victoriametrics.com/FAQ.html#what-is-high-churn-rate) issues via [series limiter](#cardinality-limiter).
|
||||
* It ideally works with big amounts of time series data from APM, Kubernetes, IoT sensors, connected cars, industrial telemetry, financial data and various [Enterprise workloads](https://victoriametrics.com/products/enterprise/).
|
||||
* It has open source [cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster).
|
||||
* It can store data on [NFS-based storages](https://en.wikipedia.org/wiki/Network_File_System) such as [Amazon EFS](https://aws.amazon.com/efs/) and [Google Filestore](https://cloud.google.com/filestore).
|
||||
|
||||
See also [various Articles about VictoriaMetrics](https://docs.victoriametrics.com/Articles.html).
|
||||
|
||||
|
@ -1638,16 +1639,32 @@ See also [troubleshooting docs](https://docs.victoriametrics.com/Troubleshooting
|
|||
|
||||
## Push metrics
|
||||
|
||||
All the VictoriaMetrics apps support pushing their metrics exposed at `/metrics` page to remote storage in Prometheus text exposition format. This can be done by specifying the following command-line flags:
|
||||
All the VictoriaMetrics components support pushing their metrics exposed at `/metrics` page to remote storage in Prometheus text exposition format.
|
||||
This functionality may be used instead of [classic Prometheus-like metrics scraping](https://docs.victoriametrics.com/#how-to-scrape-prometheus-exporters-such-as-node-exporter)
|
||||
if VictoriaMetrics components are located in isolated networks, so they cannot be scraped by local [vmagent](https://docs.victoriametrics.com/vmagent.html).
|
||||
|
||||
* `-pushmetrics.url` - the url to push metrics to. For example, `-pushmetrics.url=http://victoria-metrics:8428/api/v1/import/prometheus` instructs to push internal metrics to `/api/v1/import/prometheus` endpoint according to [these docs](#how-to-import-data-in-prometheus-exposition-format). The `-pushmetrics.url` can be specified multiple times. In this case metrics are pushed to all the specified urls. The url can contain basic auth params in the form http://user:pass@hostname/api/v1/import/prometheus .
|
||||
The following command-line flags are related to pushing metrics from VictoriaMetrics components:
|
||||
|
||||
* `-pushmetrics.url` - the url to push metrics to. For example, `-pushmetrics.url=http://victoria-metrics:8428/api/v1/import/prometheus` instructs
|
||||
to push internal metrics to `/api/v1/import/prometheus` endpoint according to [these docs](#how-to-import-data-in-prometheus-exposition-format).
|
||||
The `-pushmetrics.url` can be specified multiple times. In this case metrics are pushed to all the specified urls.
|
||||
The url can contain basic auth params in the form `http://user:pass@hostname/api/v1/import/prometheus`.
|
||||
Metrics are pushed to the provided `-pushmetrics.url` in a compressed form with `Content-Encoding: gzip` request header.
|
||||
This allows reducing the required network bandwidth for metrics push.
|
||||
* `-pushmetrics.extraLabel` - labels to add to all the metrics before sending them to `-pushmetrics.url`. Each label must be specified in the format `label="value"`.
|
||||
It is OK to specify multiple `-pushmetrics.extraLabel` command-line flags. In this case all the specified labels
|
||||
are added to all the metrics before sending them to all the configured `-pushmetrics.url` addresses.
|
||||
* `-pushmetrics.interval` - the interval between pushes. By default it is set to 10 seconds.
|
||||
* `-pushmetrics.extraLabel` - label to add to all the metrics before sending them to `-pushmetrics.url`. The label must be specified in the format `label="value"`. It is OK to specify multiple `-pushmetrics.extraLabel` command-line flags. In this case all the specified labels are added to all the metrics sending them to `-pushmetrics.url`.
|
||||
|
||||
For example, the following command instructs VictoriaMetrics to push metrics from `/metrics` page to `https://maas.victoriametrics.com/api/v1/import/prometheus` with `user:pass` [Basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication). The `instance="foobar"` and `job="vm"` labels are added to all the metrics before sending them to the remote storage:
|
||||
For example, the following command instructs VictoriaMetrics to push metrics from `/metrics` page to `https://maas.victoriametrics.com/api/v1/import/prometheus`
|
||||
with `user:pass` [Basic auth](https://en.wikipedia.org/wiki/Basic_access_authentication). The `instance="foobar"` and `job="vm"` labels
|
||||
are added to all the metrics before sending them to the remote storage:
|
||||
|
||||
```console
|
||||
/path/to/victoria-metrics -pushmetrics.url=https://user:pass@maas.victoriametrics.com/api/v1/import/prometheus -pushmetrics.extraLabel='instance="foobar",job="vm"'
|
||||
/path/to/victoria-metrics \
|
||||
-pushmetrics.url=https://user:pass@maas.victoriametrics.com/api/v1/import/prometheus \
|
||||
-pushmetrics.extraLabel='instance="foobar"' \
|
||||
-pushmetrics.extraLabel='job="vm"'
|
||||
```
|
||||
|
||||
## Cache removal
|
||||
|
@ -2104,8 +2121,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay
|
||||
-promscrape.suppressScrapeErrorsDelay duration
|
||||
The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -1083,8 +1083,8 @@ See the docs at https://docs.victoriametrics.com/vmagent.html .
|
|||
Whether to suppress scrape errors logging. The last error for each target is always available at '/targets' page even if scrape errors logging is suppressed. See also -promscrape.suppressScrapeErrorsDelay
|
||||
-promscrape.suppressScrapeErrorsDelay duration
|
||||
The delay for suppressing repeated scrape errors logging per each scrape targets. This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -803,8 +803,8 @@ The shortlist of configuration flags is the following:
|
|||
The maximum duration for waiting to perform API requests if more than -promscrape.discovery.concurrency requests are simultaneously performed (default 1m0s)
|
||||
-promscrape.dnsSDCheckInterval duration
|
||||
Interval for checking for changes in dns. This works only if dns_sd_configs is configured in '-promscrape.config' file. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config for details (default 30s)
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -292,8 +292,8 @@ See the docs at https://docs.victoriametrics.com/vmauth.html .
|
|||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -243,8 +243,8 @@ See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-
|
|||
Optional origin directory on the remote storage with old backup for server-side copying when performing full backup. This speeds up full backups
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -242,8 +242,8 @@ vmbackupmanager performs regular backups according to the provided configs.
|
|||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -39,7 +39,7 @@ jwt token must be in following format:
|
|||
"team": "dev",
|
||||
"project": "mobile"
|
||||
},
|
||||
"extra_filters": ["{env~=\"prod|dev\",team!=\"test\"}"],
|
||||
"extra_filters": ["{env=~\"prod|dev\",team!=\"test\"}"],
|
||||
"mode": 1
|
||||
}
|
||||
}
|
||||
|
@ -285,8 +285,8 @@ The shortlist of configuration flags include the following:
|
|||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
|
@ -145,8 +145,8 @@ i.e. the end result would be similar to [rsync --delete](https://askubuntu.com/q
|
|||
Auth key for /metrics endpoint. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pprofAuthKey string
|
||||
Auth key for /debug/pprof/* endpoints. It must be passed via authKey query arg. It overrides httpAuth.* settings
|
||||
-pushmetrics.extraLabels array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
-pushmetrics.extraLabel array
|
||||
Optional labels to add to metrics pushed to -pushmetrics.url . For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url
|
||||
Supports an array of values separated by comma or specified via multiple flags.
|
||||
-pushmetrics.interval duration
|
||||
Interval for pushing metrics to -pushmetrics.url (default 10s)
|
||||
|
|
22
go.mod
22
go.mod
|
@ -9,9 +9,9 @@ require (
|
|||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.19.3
|
||||
github.com/VictoriaMetrics/metrics v1.20.1
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1
|
||||
github.com/aws/aws-sdk-go v1.44.59
|
||||
github.com/aws/aws-sdk-go v1.44.67
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
|
||||
|
@ -35,10 +35,10 @@ require (
|
|||
github.com/valyala/fasttemplate v1.2.1
|
||||
github.com/valyala/gozstd v1.17.0
|
||||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129
|
||||
golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8
|
||||
google.golang.org/api v0.88.0
|
||||
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462
|
||||
golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c
|
||||
golang.org/x/sys v0.0.0-20220731174439-a90be440212d
|
||||
google.golang.org/api v0.90.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
|
@ -62,8 +62,8 @@ require (
|
|||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/prometheus/client_golang v1.12.2 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rivo/uniseg v0.3.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/histogram v1.2.0 // indirect
|
||||
|
@ -71,11 +71,11 @@ require (
|
|||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 // indirect
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f // indirect
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220720214146-176da50484ac // indirect
|
||||
google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f // indirect
|
||||
google.golang.org/grpc v1.48.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
)
|
||||
|
|
40
go.sum
40
go.sum
|
@ -109,8 +109,8 @@ github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJ
|
|||
github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0=
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metrics v1.19.3 h1:cr7yyS6fHSzjvwCAYsJbvh8qaRfFzilkcqgHgO97e6Y=
|
||||
github.com/VictoriaMetrics/metrics v1.19.3/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metrics v1.20.1 h1:XqQbRKYzwkmo0DKKDbvp6V7upUqErlqd0vXPoeBsEbU=
|
||||
github.com/VictoriaMetrics/metrics v1.20.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1 h1:qGoRt0g84uMUscVjS7P3uDZKmjJubWKaIx9v0iHKgck=
|
||||
github.com/VictoriaMetrics/metricsql v0.44.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
|
@ -148,8 +148,8 @@ github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQ
|
|||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.59 h1:bkdnNsMvMhFmNLqKDAJ6rKR+S0hjOt/3AIJp2mxOK9o=
|
||||
github.com/aws/aws-sdk-go v1.44.59/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.67 h1:+nxfXbMe8QUB6svLsuLYsp+WhZBKM26w62Zidir739A=
|
||||
github.com/aws/aws-sdk-go v1.44.67/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
|
@ -754,14 +754,16 @@ github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4
|
|||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9 h1:F2A86PGVYqn3P7oWbrSmSlJHae9y6wwpAdoWb/pZi6Q=
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.3.1 h1:SDPP7SHNl1L7KrEFCSJslJ/DM9DT02Nq2C61XrfHMmk=
|
||||
github.com/rivo/uniseg v0.3.1/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
|
@ -1003,8 +1005,8 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0=
|
||||
golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462 h1:UreQrH7DbFXSi9ZFox6FNT3WBooWmdANpU+IfkT1T4I=
|
||||
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -1026,8 +1028,8 @@ golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j
|
|||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
|
||||
golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92 h1:oVlhw3Oe+1reYsE2Nqu19PDJfLzwdU3QUUrG86rLK68=
|
||||
golang.org/x/oauth2 v0.0.0-20220718184931-c8730f7fcb92/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c h1:q3gFqPqH7NVofKo3c3yETAP//pPI+G5mvB7qqj1Y5kY=
|
||||
golang.org/x/oauth2 v0.0.0-20220722155238-128564f6959c/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -1040,8 +1042,9 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
|
@ -1138,8 +1141,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220731174439-a90be440212d h1:Sv5ogFZatcgIMMtBSTTAgMYsicp25MXBubjXNDKwm80=
|
||||
golang.org/x/sys v0.0.0-20220731174439-a90be440212d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
@ -1287,8 +1290,8 @@ google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3
|
|||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g=
|
||||
google.golang.org/api v0.86.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.88.0 h1:MPwxQRqpyskYhr2iNyfsQ8R06eeyhe7UEuR30p136ZQ=
|
||||
google.golang.org/api v0.88.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/api v0.90.0 h1:WMnUWAvihIClUYFNeFA69VTuR3duKS3IalMGDQcLvq8=
|
||||
google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1382,8 +1385,8 @@ google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljW
|
|||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20220720214146-176da50484ac h1:EOa+Yrhx1C0O+4pHeXeWrCwdI0tWI6IfUU56Vebs9wQ=
|
||||
google.golang.org/genproto v0.0.0-20220720214146-176da50484ac/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE=
|
||||
google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f h1:XVHpVMvPs4MtH3h6cThzKs2snNexcfd35vQx2T3IuIY=
|
||||
google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
@ -1436,8 +1439,9 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
|||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
|
@ -236,13 +236,27 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
|||
connTimeoutClosedConns.Inc()
|
||||
w.Header().Set("Connection", "close")
|
||||
}
|
||||
path, err := getCanonicalPath(r.URL.Path)
|
||||
if err != nil {
|
||||
Errorf(w, r, "cannot get canonical path: %s", err)
|
||||
unsupportedRequestErrors.Inc()
|
||||
return
|
||||
path := r.URL.Path
|
||||
prefix := GetPathPrefix()
|
||||
if prefix != "" {
|
||||
// Trim -http.pathPrefix from path
|
||||
prefixNoTrailingSlash := strings.TrimSuffix(prefix, "/")
|
||||
if path == prefixNoTrailingSlash {
|
||||
// Redirect to url with / at the end.
|
||||
// This is needed for proper handling of relative urls in web browsers.
|
||||
// Intentionally ignore query args, since it is expected that the requested url
|
||||
// is composed by a human, so it doesn't contain query args.
|
||||
RedirectPermanent(w, prefix)
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(path, prefix) {
|
||||
Errorf(w, r, "missing -http.pathPrefix=%q in the requested path %q", *pathPrefix, path)
|
||||
unsupportedRequestErrors.Inc()
|
||||
return
|
||||
}
|
||||
path = path[len(prefix)-1:]
|
||||
r.URL.Path = path
|
||||
}
|
||||
r.URL.Path = path
|
||||
switch r.URL.Path {
|
||||
case "/health":
|
||||
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
||||
|
@ -328,24 +342,6 @@ func handlerWrapper(s *server, w http.ResponseWriter, r *http.Request, rh Reques
|
|||
}
|
||||
}
|
||||
|
||||
func getCanonicalPath(path string) (string, error) {
|
||||
if len(*pathPrefix) == 0 || path == "/" {
|
||||
return path, nil
|
||||
}
|
||||
if *pathPrefix == path {
|
||||
return "/", nil
|
||||
}
|
||||
prefix := *pathPrefix
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix = prefix + "/"
|
||||
}
|
||||
if !strings.HasPrefix(path, prefix) {
|
||||
return "", fmt.Errorf("missing `-pathPrefix=%q` in the requested path: %q", *pathPrefix, path)
|
||||
}
|
||||
path = path[len(prefix)-1:]
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func checkBasicAuth(w http.ResponseWriter, r *http.Request) bool {
|
||||
if len(*httpAuthUsername) == 0 {
|
||||
// HTTP Basic Auth is disabled.
|
||||
|
@ -644,7 +640,17 @@ func IsTLS() bool {
|
|||
|
||||
// GetPathPrefix - returns http server path prefix.
|
||||
func GetPathPrefix() string {
|
||||
return *pathPrefix
|
||||
prefix := *pathPrefix
|
||||
if prefix == "" {
|
||||
return ""
|
||||
}
|
||||
if !strings.HasPrefix(prefix, "/") {
|
||||
prefix = "/" + prefix
|
||||
}
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
prefix += "/"
|
||||
}
|
||||
return prefix
|
||||
}
|
||||
|
||||
// WriteAPIHelp writes pathList to w in HTML format.
|
||||
|
@ -672,3 +678,12 @@ func GetRequestURI(r *http.Request) string {
|
|||
}
|
||||
return requestURI + delimiter + queryArgs
|
||||
}
|
||||
|
||||
// RedirectPermanent redirects to the given url using 301 status code.
|
||||
func RedirectPermanent(w http.ResponseWriter, url string) {
|
||||
// Do not use http.Redirect, since it breaks relative redirects
|
||||
// if the http.Request.URL contains unexpected url.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918
|
||||
w.Header().Set("Location", url)
|
||||
w.WriteHeader(http.StatusMovedPermanently)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,11 @@ type blockStreamReader struct {
|
|||
// Block contains the current block if Next returned true.
|
||||
Block inmemoryBlock
|
||||
|
||||
blockItemIdx int
|
||||
// isInmemoryBlock is set to true if bsr was initialized with InitFromInmemoryBlock().
|
||||
isInmemoryBlock bool
|
||||
|
||||
// The index of the current item in the Block, which is returned from CurrItem()
|
||||
currItemIdx int
|
||||
|
||||
path string
|
||||
|
||||
|
@ -66,7 +70,8 @@ type blockStreamReader struct {
|
|||
|
||||
func (bsr *blockStreamReader) reset() {
|
||||
bsr.Block.Reset()
|
||||
bsr.blockItemIdx = 0
|
||||
bsr.isInmemoryBlock = false
|
||||
bsr.currItemIdx = 0
|
||||
bsr.path = ""
|
||||
bsr.ph.Reset()
|
||||
bsr.mrs = nil
|
||||
|
@ -98,6 +103,14 @@ func (bsr *blockStreamReader) String() string {
|
|||
return bsr.ph.String()
|
||||
}
|
||||
|
||||
// InitFromInmemoryBlock initializes bsr from the given ib.
|
||||
func (bsr *blockStreamReader) InitFromInmemoryBlock(ib *inmemoryBlock) {
|
||||
bsr.reset()
|
||||
bsr.Block.CopyFrom(ib)
|
||||
bsr.Block.SortItems()
|
||||
bsr.isInmemoryBlock = true
|
||||
}
|
||||
|
||||
// InitFromInmemoryPart initializes bsr from the given mp.
|
||||
func (bsr *blockStreamReader) InitFromInmemoryPart(mp *inmemoryPart) {
|
||||
bsr.reset()
|
||||
|
@ -178,17 +191,26 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
|
|||
//
|
||||
// It closes *Reader files passed to Init.
|
||||
func (bsr *blockStreamReader) MustClose() {
|
||||
bsr.indexReader.MustClose()
|
||||
bsr.itemsReader.MustClose()
|
||||
bsr.lensReader.MustClose()
|
||||
|
||||
if !bsr.isInmemoryBlock {
|
||||
bsr.indexReader.MustClose()
|
||||
bsr.itemsReader.MustClose()
|
||||
bsr.lensReader.MustClose()
|
||||
}
|
||||
bsr.reset()
|
||||
}
|
||||
|
||||
func (bsr *blockStreamReader) CurrItem() string {
|
||||
return bsr.Block.items[bsr.currItemIdx].String(bsr.Block.data)
|
||||
}
|
||||
|
||||
func (bsr *blockStreamReader) Next() bool {
|
||||
if bsr.err != nil {
|
||||
return false
|
||||
}
|
||||
if bsr.isInmemoryBlock {
|
||||
bsr.err = io.EOF
|
||||
return true
|
||||
}
|
||||
|
||||
if bsr.bhIdx >= len(bsr.bhs) {
|
||||
// The current index block is over. Try reading the next index block.
|
||||
|
@ -233,7 +255,7 @@ func (bsr *blockStreamReader) Next() bool {
|
|||
bsr.err = fmt.Errorf("too many blocks read: %d; must be smaller than partHeader.blocksCount %d", bsr.blocksRead, bsr.ph.blocksCount)
|
||||
return false
|
||||
}
|
||||
bsr.blockItemIdx = 0
|
||||
bsr.currItemIdx = 0
|
||||
bsr.itemsRead += uint64(len(bsr.Block.items))
|
||||
if bsr.itemsRead > bsr.ph.itemsCount {
|
||||
bsr.err = fmt.Errorf("too many items read: %d; must be smaller than partHeader.itemsCount %d", bsr.itemsRead, bsr.ph.itemsCount)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package mergeset
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
|
@ -36,7 +37,7 @@ func (it Item) Bytes(data []byte) []byte {
|
|||
return data
|
||||
}
|
||||
|
||||
// String returns string represetnation of it obtained from data.
|
||||
// String returns string representation of it obtained from data.
|
||||
//
|
||||
// The returned string representation belongs to data.
|
||||
func (it Item) String(data []byte) string {
|
||||
|
@ -56,7 +57,7 @@ func (ib *inmemoryBlock) Less(i, j int) bool {
|
|||
a.Start += cpLen
|
||||
b.Start += cpLen
|
||||
data := ib.data
|
||||
return string(a.Bytes(data)) < string(b.Bytes(data))
|
||||
return a.String(data) < b.String(data)
|
||||
}
|
||||
|
||||
func (ib *inmemoryBlock) Swap(i, j int) {
|
||||
|
@ -76,6 +77,21 @@ type inmemoryBlock struct {
|
|||
items []Item
|
||||
}
|
||||
|
||||
func (ib *inmemoryBlock) CopyFrom(src *inmemoryBlock) {
|
||||
ib.commonPrefix = append(ib.commonPrefix[:0], src.commonPrefix...)
|
||||
ib.data = append(ib.data[:0], src.data...)
|
||||
ib.items = append(ib.items[:0], src.items...)
|
||||
}
|
||||
|
||||
func (ib *inmemoryBlock) SortItems() {
|
||||
if !ib.isSorted() {
|
||||
ib.updateCommonPrefixUnsorted()
|
||||
sort.Sort(ib)
|
||||
} else {
|
||||
ib.updateCommonPrefixSorted()
|
||||
}
|
||||
}
|
||||
|
||||
func (ib *inmemoryBlock) SizeBytes() int {
|
||||
return int(unsafe.Sizeof(*ib)) + cap(ib.commonPrefix) + cap(ib.data) + cap(ib.items)*int(unsafe.Sizeof(Item{}))
|
||||
}
|
||||
|
@ -110,7 +126,11 @@ func (ib *inmemoryBlock) updateCommonPrefixUnsorted() {
|
|||
data := ib.data
|
||||
cp := items[0].Bytes(data)
|
||||
for _, it := range items[1:] {
|
||||
cpLen := commonPrefixLen(cp, it.Bytes(data))
|
||||
item := it.Bytes(data)
|
||||
if bytes.HasPrefix(item, cp) {
|
||||
continue
|
||||
}
|
||||
cpLen := commonPrefixLen(cp, item)
|
||||
if cpLen == 0 {
|
||||
return
|
||||
}
|
||||
|
@ -199,12 +219,7 @@ func (ib *inmemoryBlock) isSorted() bool {
|
|||
// - returns the number of items encoded including the first item.
|
||||
// - returns the marshal type used for the encoding.
|
||||
func (ib *inmemoryBlock) MarshalUnsortedData(sb *storageBlock, firstItemDst, commonPrefixDst []byte, compressLevel int) ([]byte, []byte, uint32, marshalType) {
|
||||
if !ib.isSorted() {
|
||||
ib.updateCommonPrefixUnsorted()
|
||||
sort.Sort(ib)
|
||||
} else {
|
||||
ib.updateCommonPrefixSorted()
|
||||
}
|
||||
ib.SortItems()
|
||||
return ib.marshalData(sb, firstItemDst, commonPrefixDst, compressLevel)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,27 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
func TestCommonPrefixLen(t *testing.T) {
|
||||
f := func(a, b string, expectedPrefixLen int) {
|
||||
t.Helper()
|
||||
prefixLen := commonPrefixLen([]byte(a), []byte(b))
|
||||
if prefixLen != expectedPrefixLen {
|
||||
t.Fatalf("unexpected prefix len; got %d; want %d", prefixLen, expectedPrefixLen)
|
||||
}
|
||||
}
|
||||
f("", "", 0)
|
||||
f("a", "", 0)
|
||||
f("", "a", 0)
|
||||
f("a", "a", 1)
|
||||
f("abc", "xy", 0)
|
||||
f("abc", "abd", 2)
|
||||
f("01234567", "01234567", 8)
|
||||
f("01234567", "012345678", 8)
|
||||
f("012345679", "012345678", 8)
|
||||
f("01234569", "012345678", 7)
|
||||
f("01234569", "01234568", 7)
|
||||
}
|
||||
|
||||
func TestInmemoryBlockAdd(t *testing.T) {
|
||||
var ib inmemoryBlock
|
||||
|
||||
|
|
|
@ -8,11 +8,51 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
var benchPrefixes = []string{
|
||||
"", "x", "xy", "xyz", "xyz1", "xyz12",
|
||||
"xyz123", "xyz1234", "01234567", "xyz123456", "xyz123456789012345678901234567890",
|
||||
"aljkljfdpjopoewpoirerop934093094poipdfidpfdsfkjljdfpjoejkdjfljpfdkl",
|
||||
"aljkljfdpjopoewpoirerop934093094poipdfidpfdsfkjljdfpjoejkdjfljpfdkllkj321oiiou321oijlkfdfjjlfdsjdslkfjdslfjldskafjldsflkfdsjlkj",
|
||||
}
|
||||
|
||||
func BenchmarkCommonPrefixLen(b *testing.B) {
|
||||
for _, prefix := range benchPrefixes {
|
||||
b.Run(fmt.Sprintf("prefix-len-%d", len(prefix)), func(b *testing.B) {
|
||||
benchmarkCommonPrefixLen(b, prefix)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkCommonPrefixLen(b *testing.B, prefix string) {
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(len(prefix)))
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
a := append([]byte{}, prefix...)
|
||||
a = append(a, 'a')
|
||||
b := append([]byte{}, prefix...)
|
||||
b = append(b, 'b')
|
||||
for pb.Next() {
|
||||
n := commonPrefixLen(a, b)
|
||||
if n != len(prefix) {
|
||||
panic(fmt.Errorf("unexpected prefix len; got %d; want %d", n, len(prefix)))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkInmemoryBlockMarshal(b *testing.B) {
|
||||
const itemsCount = 1000
|
||||
for _, prefix := range benchPrefixes {
|
||||
b.Run(fmt.Sprintf("prefix-len-%d", len(prefix)), func(b *testing.B) {
|
||||
benchmarkInmemoryBlockMarshal(b, prefix)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkInmemoryBlockMarshal(b *testing.B, prefix string) {
|
||||
const itemsCount = 500
|
||||
var ibSrc inmemoryBlock
|
||||
for i := 0; i < itemsCount; i++ {
|
||||
item := []byte(fmt.Sprintf("key %d", i))
|
||||
item := []byte(fmt.Sprintf("%s%d", prefix, i))
|
||||
if !ibSrc.Add(item) {
|
||||
b.Fatalf("cannot add more than %d items", i)
|
||||
}
|
||||
|
@ -20,7 +60,7 @@ func BenchmarkInmemoryBlockMarshal(b *testing.B) {
|
|||
sort.Sort(&ibSrc)
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(itemsCount)
|
||||
b.SetBytes(int64(itemsCount * len(prefix)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var sb storageBlock
|
||||
|
@ -36,9 +76,17 @@ func BenchmarkInmemoryBlockMarshal(b *testing.B) {
|
|||
}
|
||||
|
||||
func BenchmarkInmemoryBlockUnmarshal(b *testing.B) {
|
||||
for _, prefix := range benchPrefixes {
|
||||
b.Run(fmt.Sprintf("prefix-len-%d", len(prefix)), func(b *testing.B) {
|
||||
benchmarkInmemoryBlockUnmarshal(b, prefix)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func benchmarkInmemoryBlockUnmarshal(b *testing.B, prefix string) {
|
||||
var ibSrc inmemoryBlock
|
||||
for i := 0; i < 1000; i++ {
|
||||
item := []byte(fmt.Sprintf("key %d", i))
|
||||
for i := 0; i < 500; i++ {
|
||||
item := []byte(fmt.Sprintf("%s%d", prefix, i))
|
||||
if !ibSrc.Add(item) {
|
||||
b.Fatalf("cannot add more than %d items", i)
|
||||
}
|
||||
|
@ -47,7 +95,7 @@ func BenchmarkInmemoryBlockUnmarshal(b *testing.B) {
|
|||
firstItem, commonPrefix, itemsCount, mt := ibSrc.MarshalUnsortedData(&sbSrc, nil, nil, 0)
|
||||
|
||||
b.ResetTimer()
|
||||
b.SetBytes(int64(itemsCount))
|
||||
b.SetBytes(int64(itemsCount) * int64(len(prefix)))
|
||||
b.ReportAllocs()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
var ib inmemoryBlock
|
||||
|
|
|
@ -116,18 +116,18 @@ again:
|
|||
|
||||
bsr := bsm.bsrHeap[0]
|
||||
|
||||
var nextItem []byte
|
||||
var nextItem string
|
||||
hasNextItem := false
|
||||
if len(bsm.bsrHeap) > 1 {
|
||||
bsr := bsm.bsrHeap.getNextReader()
|
||||
nextItem = bsr.bh.firstItem
|
||||
nextItem = bsr.CurrItem()
|
||||
hasNextItem = true
|
||||
}
|
||||
items := bsr.Block.items
|
||||
data := bsr.Block.data
|
||||
for bsr.blockItemIdx < len(bsr.Block.items) {
|
||||
item := items[bsr.blockItemIdx].Bytes(data)
|
||||
if hasNextItem && string(item) > string(nextItem) {
|
||||
for bsr.currItemIdx < len(bsr.Block.items) {
|
||||
item := items[bsr.currItemIdx].Bytes(data)
|
||||
if hasNextItem && string(item) > nextItem {
|
||||
break
|
||||
}
|
||||
if !bsm.ib.Add(item) {
|
||||
|
@ -135,9 +135,9 @@ again:
|
|||
bsm.flushIB(bsw, ph, itemsMerged)
|
||||
continue
|
||||
}
|
||||
bsr.blockItemIdx++
|
||||
bsr.currItemIdx++
|
||||
}
|
||||
if bsr.blockItemIdx == len(bsr.Block.items) {
|
||||
if bsr.currItemIdx == len(bsr.Block.items) {
|
||||
// bsr.Block is fully read. Proceed to the next block.
|
||||
if bsr.Next() {
|
||||
heap.Fix(&bsm.bsrHeap, 0)
|
||||
|
@ -151,8 +151,7 @@ again:
|
|||
}
|
||||
|
||||
// The next item in the bsr.Block exceeds nextItem.
|
||||
// Adjust bsr.bh.firstItem and return bsr to heap.
|
||||
bsr.bh.firstItem = append(bsr.bh.firstItem[:0], bsr.Block.items[bsr.blockItemIdx].String(bsr.Block.data)...)
|
||||
// Return bsr to heap.
|
||||
heap.Fix(&bsm.bsrHeap, 0)
|
||||
goto again
|
||||
}
|
||||
|
@ -212,7 +211,7 @@ func (bh bsrHeap) getNextReader() *blockStreamReader {
|
|||
}
|
||||
a := bh[1]
|
||||
b := bh[2]
|
||||
if string(a.bh.firstItem) <= string(b.bh.firstItem) {
|
||||
if a.CurrItem() <= b.CurrItem() {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
|
@ -229,7 +228,7 @@ func (bh *bsrHeap) Swap(i, j int) {
|
|||
|
||||
func (bh *bsrHeap) Less(i, j int) bool {
|
||||
x := *bh
|
||||
return string(x[i].bh.firstItem) < string(x[j].bh.firstItem)
|
||||
return x[i].CurrItem() < x[j].CurrItem()
|
||||
}
|
||||
|
||||
func (bh *bsrHeap) Pop() interface{} {
|
||||
|
|
|
@ -718,23 +718,28 @@ func (tb *Table) mergeRawItemsBlocks(ibs []*inmemoryBlock, isFinal bool) {
|
|||
}
|
||||
|
||||
func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper {
|
||||
// Convert ibs into inmemoryPart's
|
||||
mps := make([]*inmemoryPart, 0, len(ibs))
|
||||
atomic.AddUint64(&tb.mergesCount, 1)
|
||||
atomic.AddUint64(&tb.activeMerges, 1)
|
||||
defer atomic.AddUint64(&tb.activeMerges, ^uint64(0))
|
||||
|
||||
// Prepare blockStreamReaders for source blocks.
|
||||
bsrs := make([]*blockStreamReader, 0, len(ibs))
|
||||
for _, ib := range ibs {
|
||||
if len(ib.items) == 0 {
|
||||
continue
|
||||
}
|
||||
mp := getInmemoryPart()
|
||||
mp.Init(ib)
|
||||
bsr := getBlockStreamReader()
|
||||
bsr.InitFromInmemoryBlock(ib)
|
||||
putInmemoryBlock(ib)
|
||||
mps = append(mps, mp)
|
||||
bsrs = append(bsrs, bsr)
|
||||
}
|
||||
if len(mps) == 0 {
|
||||
if len(bsrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(mps) == 1 {
|
||||
if len(bsrs) == 1 {
|
||||
// Nothing to merge. Just return a single inmemory part.
|
||||
mp := mps[0]
|
||||
mp := getInmemoryPart()
|
||||
mp.Init(&bsrs[0].Block)
|
||||
p := mp.NewPart()
|
||||
return &partWrapper{
|
||||
p: p,
|
||||
|
@ -742,24 +747,6 @@ func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper {
|
|||
refCount: 1,
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
// Return source inmemoryParts to pool.
|
||||
for _, mp := range mps {
|
||||
putInmemoryPart(mp)
|
||||
}
|
||||
}()
|
||||
|
||||
atomic.AddUint64(&tb.mergesCount, 1)
|
||||
atomic.AddUint64(&tb.activeMerges, 1)
|
||||
defer atomic.AddUint64(&tb.activeMerges, ^uint64(0))
|
||||
|
||||
// Prepare blockStreamReaders for source parts.
|
||||
bsrs := make([]*blockStreamReader, 0, len(mps))
|
||||
for _, mp := range mps {
|
||||
bsr := getBlockStreamReader()
|
||||
bsr.InitFromInmemoryPart(mp)
|
||||
bsrs = append(bsrs, bsr)
|
||||
}
|
||||
|
||||
// Prepare blockStreamWriter for destination part.
|
||||
bsw := getBlockStreamWriter()
|
||||
|
|
|
@ -585,9 +585,9 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
|
|||
scrapeResponseSize.Update(float64(sbr.bodyLen))
|
||||
up := 1
|
||||
if err != nil {
|
||||
if samplesScraped == 0 {
|
||||
up = 0
|
||||
}
|
||||
// Mark the scrape as failed even if it already read and pushed some samples
|
||||
// to remote storage. This makes the logic compatible with Prometheus.
|
||||
up = 0
|
||||
scrapesFailed.Inc()
|
||||
}
|
||||
seriesAdded := 0
|
||||
|
|
|
@ -7,15 +7,16 @@ import (
|
|||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/appmetrics"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
var (
|
||||
pushURL = flagutil.NewArray("pushmetrics.url", "Optional URL to push metrics exposed at /metrics page. See https://docs.victoriametrics.com/#push-metrics . "+
|
||||
"By default metrics exposed at /metrics page aren't pushed to any remote storage")
|
||||
pushInterval = flag.Duration("pushmetrics.interval", 10*time.Second, "Interval for pushing metrics to -pushmetrics.url")
|
||||
pushExtraLabels = flagutil.NewArray("pushmetrics.extraLabels", "Optional labels to add to metrics pushed to -pushmetrics.url . "+
|
||||
`For example, -pushmetrics.extraLabels='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url`)
|
||||
pushInterval = flag.Duration("pushmetrics.interval", 10*time.Second, "Interval for pushing metrics to -pushmetrics.url")
|
||||
pushExtraLabel = flagutil.NewArray("pushmetrics.extraLabel", "Optional labels to add to metrics pushed to -pushmetrics.url . "+
|
||||
`For example, -pushmetrics.extraLabel='instance="foo"' adds instance="foo" label to all the metrics pushed to -pushmetrics.url`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -23,10 +24,12 @@ func init() {
|
|||
flagutil.RegisterSecretFlag("pushmetrics.url")
|
||||
}
|
||||
|
||||
// Init must be called after flag.Parse.
|
||||
// Init must be called after logger.Init
|
||||
func Init() {
|
||||
extraLabels := strings.Join(*pushExtraLabels, ",")
|
||||
extraLabels := strings.Join(*pushExtraLabel, ",")
|
||||
for _, pu := range *pushURL {
|
||||
metrics.InitPushExt(pu, *pushInterval, extraLabels, appmetrics.WritePrometheusMetrics)
|
||||
if err := metrics.InitPushExt(pu, *pushInterval, extraLabels, appmetrics.WritePrometheusMetrics); err != nil {
|
||||
logger.Fatalf("cannot initialize pushmetrics: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -322,11 +322,22 @@ func (db *indexDB) getFromTagFiltersCache(qt *querytracer.Tracer, key []byte) ([
|
|||
qt.Printf("cache miss")
|
||||
return nil, false
|
||||
}
|
||||
if compressedBuf.B[0] == 0 {
|
||||
// Fast path - tsids are stored in uncompressed form.
|
||||
qt.Printf("found tsids with size: %d bytes", len(compressedBuf.B))
|
||||
tsids, err := unmarshalTSIDs(nil, compressedBuf.B[1:])
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot unmarshal tsids from tagFiltersCache: %s", err)
|
||||
}
|
||||
qt.Printf("unmarshaled %d tsids", len(tsids))
|
||||
return tsids, true
|
||||
}
|
||||
// Slow path - tsids are stored in compressed form.
|
||||
qt.Printf("found tsids with compressed size: %d bytes", len(compressedBuf.B))
|
||||
buf := tagBufPool.Get()
|
||||
defer tagBufPool.Put(buf)
|
||||
var err error
|
||||
buf.B, err = encoding.DecompressZSTD(buf.B[:0], compressedBuf.B)
|
||||
buf.B, err = encoding.DecompressZSTD(buf.B[:0], compressedBuf.B[1:])
|
||||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot decompress tsids from tagFiltersCache: %s", err)
|
||||
}
|
||||
|
@ -344,15 +355,30 @@ var tagBufPool bytesutil.ByteBufferPool
|
|||
func (db *indexDB) putToTagFiltersCache(qt *querytracer.Tracer, tsids []TSID, key []byte) {
|
||||
qt = qt.NewChild("put %d tsids in cache", len(tsids))
|
||||
defer qt.Done()
|
||||
if len(tsids) <= 2 {
|
||||
// Fast path - store small number of tsids in uncompressed form.
|
||||
// This saves CPU time on compress / decompress.
|
||||
buf := tagBufPool.Get()
|
||||
buf.B = append(buf.B[:0], 0)
|
||||
buf.B = marshalTSIDs(buf.B, tsids)
|
||||
qt.Printf("marshaled %d tsids into %d bytes", len(tsids), len(buf.B))
|
||||
db.tagFiltersCache.SetBig(key, buf.B)
|
||||
qt.Printf("store %d tsids into cache", len(tsids))
|
||||
tagBufPool.Put(buf)
|
||||
return
|
||||
}
|
||||
// Slower path - store big number of tsids in compressed form.
|
||||
// This increases cache capacity.
|
||||
buf := tagBufPool.Get()
|
||||
buf.B = marshalTSIDs(buf.B[:0], tsids)
|
||||
qt.Printf("marshaled %d tsids into %d bytes", len(tsids), len(buf.B))
|
||||
compressedBuf := tagBufPool.Get()
|
||||
compressedBuf.B = encoding.CompressZSTDLevel(compressedBuf.B[:0], buf.B, 1)
|
||||
compressedBuf.B = append(compressedBuf.B[:0], 1)
|
||||
compressedBuf.B = encoding.CompressZSTDLevel(compressedBuf.B, buf.B, 1)
|
||||
qt.Printf("compressed %d tsids into %d bytes", len(tsids), len(compressedBuf.B))
|
||||
tagBufPool.Put(buf)
|
||||
db.tagFiltersCache.SetBig(key, compressedBuf.B)
|
||||
qt.Printf("store %d compressed tsids into cache", len(tsids))
|
||||
qt.Printf("stored %d compressed tsids into cache", len(tsids))
|
||||
tagBufPool.Put(compressedBuf)
|
||||
}
|
||||
|
||||
|
@ -528,11 +554,19 @@ func (is *indexSearch) GetOrCreateTSIDByName(dst *TSID, metricName, metricNameRa
|
|||
if is.tsidByNameMisses < 100 {
|
||||
err := is.getTSIDByMetricName(dst, metricName)
|
||||
if err == nil {
|
||||
// Fast path - the TSID for the given metricName has been found in the index.
|
||||
is.tsidByNameMisses = 0
|
||||
return is.db.s.registerSeriesCardinality(dst.MetricID, metricNameRaw)
|
||||
if err = is.db.s.registerSeriesCardinality(dst.MetricID, metricNameRaw); err != nil {
|
||||
return err
|
||||
}
|
||||
// There is no need in checking whether the TSID is present in the per-day index for the given date,
|
||||
// since this check must be performed by the caller in an optimized way.
|
||||
// See storage.updatePerDateData() function.
|
||||
return nil
|
||||
}
|
||||
if err != io.EOF {
|
||||
return fmt.Errorf("cannot search TSID by MetricName %q: %w", metricName, err)
|
||||
userReadableMetricName := getUserReadableMetricName(metricNameRaw)
|
||||
return fmt.Errorf("cannot search TSID by MetricName %s: %w", userReadableMetricName, err)
|
||||
}
|
||||
is.tsidByNameMisses++
|
||||
} else {
|
||||
|
@ -547,7 +581,8 @@ func (is *indexSearch) GetOrCreateTSIDByName(dst *TSID, metricName, metricNameRa
|
|||
// It is OK if duplicate TSID for mn is created by concurrent goroutines.
|
||||
// Metric results will be merged by mn after TableSearch.
|
||||
if err := is.createTSIDByName(dst, metricName, metricNameRaw, date); err != nil {
|
||||
return fmt.Errorf("cannot create TSID by MetricName %q: %w", metricName, err)
|
||||
userReadableMetricName := getUserReadableMetricName(metricNameRaw)
|
||||
return fmt.Errorf("cannot create TSID by MetricName %s: %w", userReadableMetricName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
27
vendor/github.com/VictoriaMetrics/metrics/push.go
generated
vendored
27
vendor/github.com/VictoriaMetrics/metrics/push.go
generated
vendored
|
@ -9,6 +9,8 @@ import (
|
|||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"compress/gzip"
|
||||
)
|
||||
|
||||
// InitPushProcessMetrics sets up periodic push for 'process_*' metrics to the given pushURL with the given interval.
|
||||
|
@ -86,6 +88,9 @@ func (s *Set) InitPush(pushURL string, interval time.Duration, extraLabels strin
|
|||
//
|
||||
// It is OK calling InitPushExt multiple times with different pushURL -
|
||||
// in this case metrics are pushed to all the provided pushURL urls.
|
||||
//
|
||||
// It is OK calling InitPushExt multiple times with different writeMetrics -
|
||||
// in this case all the metrics generated by writeMetrics callbacks are writte to pushURL.
|
||||
func InitPushExt(pushURL string, interval time.Duration, extraLabels string, writeMetrics func(w io.Writer)) error {
|
||||
if interval <= 0 {
|
||||
return fmt.Errorf("interval must be positive; got %s", interval)
|
||||
|
@ -111,15 +116,33 @@ func InitPushExt(pushURL string, interval time.Duration, extraLabels string, wri
|
|||
ticker := time.NewTicker(interval)
|
||||
var bb bytes.Buffer
|
||||
var tmpBuf []byte
|
||||
zw := gzip.NewWriter(&bb)
|
||||
for range ticker.C {
|
||||
bb.Reset()
|
||||
writeMetrics(&bb)
|
||||
if len(extraLabels) > 0 {
|
||||
tmpBuf = addExtraLabels(tmpBuf[:0], bb.Bytes(), extraLabels)
|
||||
bb.Reset()
|
||||
bb.Write(tmpBuf)
|
||||
if _, err := bb.Write(tmpBuf); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot write %d bytes to bytes.Buffer: %s", len(tmpBuf), err))
|
||||
}
|
||||
}
|
||||
resp, err := c.Post(pushURL, "text/plain", &bb)
|
||||
tmpBuf = append(tmpBuf[:0], bb.Bytes()...)
|
||||
bb.Reset()
|
||||
zw.Reset(&bb)
|
||||
if _, err := zw.Write(tmpBuf); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot write %d bytes to gzip writer: %s", len(tmpBuf), err))
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
panic(fmt.Errorf("BUG: cannot flush metrics to gzip writer: %s", err))
|
||||
}
|
||||
req, err := http.NewRequest("GET", pushURL, &bb)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics.push: cannot initialize request for metrics push to %q: %s", pushURLRedacted, err)
|
||||
}
|
||||
req.Header.Set("Content-Type", "text/plain")
|
||||
req.Header.Set("Content-Encoding", "gzip")
|
||||
resp, err := c.Do(req)
|
||||
if err != nil {
|
||||
log.Printf("ERROR: metrics.push: cannot push metrics to %q: %s", pushURLRedacted, err)
|
||||
continue
|
||||
|
|
30
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
30
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -10820,6 +10820,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
|
@ -15703,6 +15706,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -15882,6 +15888,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -19897,6 +19906,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
|
@ -19906,6 +19918,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -25082,9 +25097,21 @@ var awscnPartition = partition{
|
|||
endpointKey{
|
||||
Region: "cn-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "cn-north-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "cn-northwest-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "cn-northwest-1",
|
||||
Variant: dualStackVariant,
|
||||
}: endpoint{
|
||||
Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn",
|
||||
},
|
||||
},
|
||||
},
|
||||
"fms": service{
|
||||
|
@ -31753,6 +31780,9 @@ var awsisoPartition = partition{
|
|||
endpointKey{
|
||||
Region: "us-iso-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-iso-west-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"sts": service{
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.59"
|
||||
const SDKVersion = "1.44.67"
|
||||
|
|
3
vendor/github.com/prometheus/procfs/.gitignore
generated
vendored
3
vendor/github.com/prometheus/procfs/.gitignore
generated
vendored
|
@ -1 +1,2 @@
|
|||
/fixtures/
|
||||
/testdata/fixtures/
|
||||
/fixtures
|
||||
|
|
10
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
10
vendor/github.com/prometheus/procfs/.golangci.yml
generated
vendored
|
@ -1,4 +1,12 @@
|
|||
---
|
||||
linters:
|
||||
enable:
|
||||
- golint
|
||||
- godot
|
||||
- revive
|
||||
|
||||
linter-settings:
|
||||
godot:
|
||||
capital: true
|
||||
exclude:
|
||||
# Ignore "See: URL"
|
||||
- 'See:'
|
||||
|
|
4
vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
generated
vendored
4
vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
generated
vendored
|
@ -1,3 +1,3 @@
|
|||
## Prometheus Community Code of Conduct
|
||||
# Prometheus Community Code of Conduct
|
||||
|
||||
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
|
||||
|
|
4
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
4
vendor/github.com/prometheus/procfs/CONTRIBUTING.md
generated
vendored
|
@ -97,7 +97,7 @@ Many of the files are changing continuously and the data being read can in some
|
|||
reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
|
||||
to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
|
||||
full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
|
||||
This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of
|
||||
This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
|
||||
the file.
|
||||
|
||||
Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
|
||||
|
@ -113,7 +113,7 @@ the full file, and then using a scanner on the `[]byte` or `string` containing t
|
|||
```
|
||||
|
||||
The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
|
||||
can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does
|
||||
can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
|
||||
not bother to check the size of the file before reading.
|
||||
```
|
||||
data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
|
||||
|
|
10
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
10
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
|
@ -14,18 +14,18 @@
|
|||
include Makefile.common
|
||||
|
||||
%/.unpacked: %.ttar
|
||||
@echo ">> extracting fixtures"
|
||||
@echo ">> extracting fixtures $*"
|
||||
./ttar -C $(dir $*) -x -f $*.ttar
|
||||
touch $@
|
||||
|
||||
fixtures: fixtures/.unpacked
|
||||
fixtures: testdata/fixtures/.unpacked
|
||||
|
||||
update_fixtures:
|
||||
rm -vf fixtures/.unpacked
|
||||
./ttar -c -f fixtures.ttar fixtures/
|
||||
rm -vf testdata/fixtures/.unpacked
|
||||
./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
|
||||
|
||||
.PHONY: build
|
||||
build:
|
||||
|
||||
.PHONY: test
|
||||
test: fixtures/.unpacked common-test
|
||||
test: testdata/fixtures/.unpacked common-test
|
||||
|
|
89
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
89
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
|
@ -36,29 +36,6 @@ GO_VERSION ?= $(shell $(GO) version)
|
|||
GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
|
||||
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
|
||||
|
||||
GOVENDOR :=
|
||||
GO111MODULE :=
|
||||
ifeq (, $(PRE_GO_111))
|
||||
ifneq (,$(wildcard go.mod))
|
||||
# Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
|
||||
GO111MODULE := on
|
||||
|
||||
ifneq (,$(wildcard vendor))
|
||||
# Always use the local vendor/ directory to satisfy the dependencies.
|
||||
GOOPTS := $(GOOPTS) -mod=vendor
|
||||
endif
|
||||
endif
|
||||
else
|
||||
ifneq (,$(wildcard go.mod))
|
||||
ifneq (,$(wildcard vendor))
|
||||
$(warning This repository requires Go >= 1.11 because of Go modules)
|
||||
$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
|
||||
endif
|
||||
else
|
||||
# This repository isn't using Go modules (yet).
|
||||
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
||||
endif
|
||||
endif
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
pkgs = ./...
|
||||
|
||||
|
@ -78,17 +55,23 @@ ifneq ($(shell which gotestsum),)
|
|||
endif
|
||||
endif
|
||||
|
||||
PROMU_VERSION ?= 0.12.0
|
||||
PROMU_VERSION ?= 0.13.0
|
||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||
|
||||
GOLANGCI_LINT :=
|
||||
GOLANGCI_LINT_OPTS ?=
|
||||
GOLANGCI_LINT_VERSION ?= v1.39.0
|
||||
GOLANGCI_LINT_VERSION ?= v1.45.2
|
||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||
# windows isn't included here because of the path separator being different.
|
||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||
ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
# If we're in CI and there is an Actions file, that means the linter
|
||||
# is being run in Actions, so we don't need to run it here.
|
||||
ifeq (,$(CIRCLE_JOB))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
|
||||
GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
|
@ -144,32 +127,25 @@ common-check_license:
|
|||
.PHONY: common-deps
|
||||
common-deps:
|
||||
@echo ">> getting dependencies"
|
||||
ifdef GO111MODULE
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod download
|
||||
else
|
||||
$(GO) get $(GOOPTS) -t ./...
|
||||
endif
|
||||
$(GO) mod download
|
||||
|
||||
.PHONY: update-go-deps
|
||||
update-go-deps:
|
||||
@echo ">> updating Go dependencies"
|
||||
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
|
||||
$(GO) get $$m; \
|
||||
$(GO) get -d $$m; \
|
||||
done
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||
ifneq (,$(wildcard vendor))
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||
endif
|
||||
$(GO) mod tidy
|
||||
|
||||
.PHONY: common-test-short
|
||||
common-test-short: $(GOTEST_DIR)
|
||||
@echo ">> running short tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
|
||||
$(GOTEST) -short $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-test
|
||||
common-test: $(GOTEST_DIR)
|
||||
@echo ">> running all tests"
|
||||
GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
|
||||
$(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
|
||||
|
||||
$(GOTEST_DIR):
|
||||
@mkdir -p $@
|
||||
|
@ -177,25 +153,21 @@ $(GOTEST_DIR):
|
|||
.PHONY: common-format
|
||||
common-format:
|
||||
@echo ">> formatting code"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
|
||||
$(GO) fmt $(pkgs)
|
||||
|
||||
.PHONY: common-vet
|
||||
common-vet:
|
||||
@echo ">> vetting code"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
|
||||
$(GO) vet $(GOOPTS) $(pkgs)
|
||||
|
||||
.PHONY: common-lint
|
||||
common-lint: $(GOLANGCI_LINT)
|
||||
ifdef GOLANGCI_LINT
|
||||
@echo ">> running golangci-lint"
|
||||
ifdef GO111MODULE
|
||||
# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
|
||||
# Otherwise staticcheck might fail randomly for some reason not yet explained.
|
||||
GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||
GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
else
|
||||
$(GOLANGCI_LINT) run $(pkgs)
|
||||
endif
|
||||
$(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
|
||||
$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
|
||||
endif
|
||||
|
||||
.PHONY: common-yamllint
|
||||
|
@ -212,28 +184,15 @@ endif
|
|||
common-staticcheck: lint
|
||||
|
||||
.PHONY: common-unused
|
||||
common-unused: $(GOVENDOR)
|
||||
ifdef GOVENDOR
|
||||
@echo ">> running check for unused packages"
|
||||
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
||||
else
|
||||
ifdef GO111MODULE
|
||||
common-unused:
|
||||
@echo ">> running check for unused/missing packages in go.mod"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod tidy
|
||||
ifeq (,$(wildcard vendor))
|
||||
$(GO) mod tidy
|
||||
@git diff --exit-code -- go.sum go.mod
|
||||
else
|
||||
@echo ">> running check for unused packages in vendor/"
|
||||
GO111MODULE=$(GO111MODULE) $(GO) mod vendor
|
||||
@git diff --exit-code -- go.sum go.mod vendor/
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
|
||||
.PHONY: common-build
|
||||
common-build: promu
|
||||
@echo ">> building binaries"
|
||||
GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
|
||||
$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
|
||||
|
||||
.PHONY: common-tarball
|
||||
common-tarball: promu
|
||||
|
@ -289,12 +248,6 @@ $(GOLANGCI_LINT):
|
|||
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
|
||||
endif
|
||||
|
||||
ifdef GOVENDOR
|
||||
.PHONY: $(GOVENDOR)
|
||||
$(GOVENDOR):
|
||||
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
||||
endif
|
||||
|
||||
.PHONY: precheck
|
||||
precheck::
|
||||
|
||||
|
|
2
vendor/github.com/prometheus/procfs/SECURITY.md
generated
vendored
2
vendor/github.com/prometheus/procfs/SECURITY.md
generated
vendored
|
@ -3,4 +3,4 @@
|
|||
The Prometheus security policy, including how to report vulnerabilities, can be
|
||||
found here:
|
||||
|
||||
https://prometheus.io/docs/operating/security/
|
||||
<https://prometheus.io/docs/operating/security/>
|
||||
|
|
45
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
45
vendor/github.com/prometheus/procfs/arp.go
generated
vendored
|
@ -15,11 +15,28 @@ package procfs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Learned from include/uapi/linux/if_arp.h.
|
||||
const (
|
||||
// completed entry (ha valid).
|
||||
ATFComplete = 0x02
|
||||
// permanent entry.
|
||||
ATFPermanent = 0x04
|
||||
// Publish entry.
|
||||
ATFPublish = 0x08
|
||||
// Has requested trailers.
|
||||
ATFUseTrailers = 0x10
|
||||
// Obsoleted: Want to use a netmask (only for proxy entries).
|
||||
ATFNetmask = 0x20
|
||||
// Don't answer this addresses.
|
||||
ATFDontPublish = 0x40
|
||||
)
|
||||
|
||||
// ARPEntry contains a single row of the columnar data represented in
|
||||
// /proc/net/arp.
|
||||
type ARPEntry struct {
|
||||
|
@ -29,12 +46,14 @@ type ARPEntry struct {
|
|||
HWAddr net.HardwareAddr
|
||||
// Name of the device
|
||||
Device string
|
||||
// Flags
|
||||
Flags byte
|
||||
}
|
||||
|
||||
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
|
||||
// and then return a slice of ARPEntry's.
|
||||
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
|
||||
data, err := os.ReadFile(fs.proc.Path("net/arp"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
|
||||
}
|
||||
|
@ -72,14 +91,26 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
|
|||
}
|
||||
|
||||
func parseARPEntry(columns []string) (ARPEntry, error) {
|
||||
entry := ARPEntry{Device: columns[5]}
|
||||
ip := net.ParseIP(columns[0])
|
||||
mac := net.HardwareAddr(columns[3])
|
||||
entry.IPAddr = ip
|
||||
|
||||
entry := ARPEntry{
|
||||
IPAddr: ip,
|
||||
HWAddr: mac,
|
||||
Device: columns[5],
|
||||
if mac, err := net.ParseMAC(columns[3]); err == nil {
|
||||
entry.HWAddr = mac
|
||||
} else {
|
||||
return ARPEntry{}, err
|
||||
}
|
||||
|
||||
if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
|
||||
entry.Flags = byte(flags)
|
||||
} else {
|
||||
return ARPEntry{}, err
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
// IsComplete returns true if ARP entry is marked with complete flag.
|
||||
func (entry *ARPEntry) IsComplete() bool {
|
||||
return entry.Flags&ATFComplete != 0
|
||||
}
|
||||
|
|
5
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
5
vendor/github.com/prometheus/procfs/cpuinfo.go
generated
vendored
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
@ -27,7 +28,7 @@ import (
|
|||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
|
||||
// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
|
||||
type CPUInfo struct {
|
||||
Processor uint
|
||||
VendorID string
|
||||
|
@ -469,7 +470,7 @@ func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
|
|||
}
|
||||
|
||||
// firstNonEmptyLine advances the scanner to the first non-empty line
|
||||
// and returns the contents of that line
|
||||
// and returns the contents of that line.
|
||||
func firstNonEmptyLine(scanner *bufio.Scanner) string {
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
|
1
vendor/github.com/prometheus/procfs/cpuinfo_armx.go
generated
vendored
1
vendor/github.com/prometheus/procfs/cpuinfo_armx.go
generated
vendored
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux && (arm || arm64)
|
||||
// +build linux
|
||||
// +build arm arm64
|
||||
|
||||
|
|
1
vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
generated
vendored
1
vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
generated
vendored
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux && (mips || mipsle || mips64 || mips64le)
|
||||
// +build linux
|
||||
// +build mips mipsle mips64 mips64le
|
||||
|
||||
|
|
4
vendor/github.com/prometheus/procfs/cpuinfo_others.go
generated
vendored
4
vendor/github.com/prometheus/procfs/cpuinfo_others.go
generated
vendored
|
@ -11,8 +11,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
|
||||
//go:build linux && !386 && !amd64 && !arm && !arm64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
|
||||
// +build linux,!386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
|
||||
|
||||
package procfs
|
||||
|
||||
|
|
1
vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
generated
vendored
1
vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
generated
vendored
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux && (ppc64 || ppc64le)
|
||||
// +build linux
|
||||
// +build ppc64 ppc64le
|
||||
|
||||
|
|
1
vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
generated
vendored
1
vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
generated
vendored
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux && (riscv || riscv64)
|
||||
// +build linux
|
||||
// +build riscv riscv64
|
||||
|
||||
|
|
1
vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
generated
vendored
1
vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
generated
vendored
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package procfs
|
||||
|
|
1
vendor/github.com/prometheus/procfs/cpuinfo_x86.go
generated
vendored
1
vendor/github.com/prometheus/procfs/cpuinfo_x86.go
generated
vendored
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build linux && (386 || amd64)
|
||||
// +build linux
|
||||
// +build 386 amd64
|
||||
|
||||
|
|
7673
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
7673
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
File diff suppressed because it is too large
Load diff
2
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
2
vendor/github.com/prometheus/procfs/internal/fs/fs.go
generated
vendored
|
@ -26,7 +26,7 @@ const (
|
|||
// DefaultSysMountPoint is the common mount point of the sys filesystem.
|
||||
DefaultSysMountPoint = "/sys"
|
||||
|
||||
// DefaultConfigfsMountPoint is the common mount point of the configfs
|
||||
// DefaultConfigfsMountPoint is the common mount point of the configfs.
|
||||
DefaultConfigfsMountPoint = "/sys/kernel/config"
|
||||
)
|
||||
|
||||
|
|
6
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
6
vendor/github.com/prometheus/procfs/internal/util/parse.go
generated
vendored
|
@ -14,7 +14,7 @@
|
|||
package util
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
@ -66,7 +66,7 @@ func ParsePInt64s(ss []string) ([]*int64, error) {
|
|||
|
||||
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
|
||||
func ReadUintFromFile(path string) (uint64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func ReadUintFromFile(path string) (uint64, error) {
|
|||
|
||||
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
|
||||
func ReadIntFromFile(path string) (int64, error) {
|
||||
data, err := ioutil.ReadFile(path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
|
11
vendor/github.com/prometheus/procfs/internal/util/readfile.go
generated
vendored
11
vendor/github.com/prometheus/procfs/internal/util/readfile.go
generated
vendored
|
@ -15,17 +15,16 @@ package util
|
|||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
|
||||
// This is similar to ioutil.ReadFile but without the call to os.Stat, because
|
||||
// ReadFileNoStat uses io.ReadAll to read contents of entire file.
|
||||
// This is similar to os.ReadFile but without the call to os.Stat, because
|
||||
// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
|
||||
// Reads a max file size of 512kB. For files larger than this, a scanner
|
||||
// Reads a max file size of 1024kB. For files larger than this, a scanner
|
||||
// should be used.
|
||||
func ReadFileNoStat(filename string) ([]byte, error) {
|
||||
const maxBufferSize = 1024 * 512
|
||||
const maxBufferSize = 1024 * 1024
|
||||
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
|
@ -34,5 +33,5 @@ func ReadFileNoStat(filename string) ([]byte, error) {
|
|||
defer f.Close()
|
||||
|
||||
reader := io.LimitReader(f, maxBufferSize)
|
||||
return ioutil.ReadAll(reader)
|
||||
return io.ReadAll(reader)
|
||||
}
|
||||
|
|
8
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
8
vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
generated
vendored
|
@ -11,7 +11,9 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux,!appengine
|
||||
//go:build (linux || darwin) && !appengine
|
||||
// +build linux darwin
|
||||
// +build !appengine
|
||||
|
||||
package util
|
||||
|
||||
|
@ -21,7 +23,7 @@ import (
|
|||
"syscall"
|
||||
)
|
||||
|
||||
// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
|
||||
// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
|
||||
// https://github.com/prometheus/node_exporter/pull/728/files
|
||||
//
|
||||
// Note that this function will not read files larger than 128 bytes.
|
||||
|
@ -33,7 +35,7 @@ func SysReadFile(file string) (string, error) {
|
|||
defer f.Close()
|
||||
|
||||
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
|
||||
// Go's ioutil.ReadFile implementation to poll forever.
|
||||
// Go's os.ReadFile implementation to poll forever.
|
||||
//
|
||||
// Since we either want to read data or bail immediately, do the simplest
|
||||
// possible read using syscall directly.
|
||||
|
|
3
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
generated
vendored
3
vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
generated
vendored
|
@ -11,7 +11,8 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux,appengine !linux
|
||||
//go:build (linux && appengine) || (!linux && !darwin)
|
||||
// +build linux,appengine !linux,!darwin
|
||||
|
||||
package util
|
||||
|
||||
|
|
3
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
3
vendor/github.com/prometheus/procfs/ipvs.go
generated
vendored
|
@ -20,7 +20,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
|
@ -84,7 +83,7 @@ func parseIPVSStats(r io.Reader) (IPVSStats, error) {
|
|||
stats IPVSStats
|
||||
)
|
||||
|
||||
statContent, err := ioutil.ReadAll(r)
|
||||
statContent, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return IPVSStats{}, err
|
||||
}
|
||||
|
|
1
vendor/github.com/prometheus/procfs/kernel_random.go
generated
vendored
1
vendor/github.com/prometheus/procfs/kernel_random.go
generated
vendored
|
@ -11,6 +11,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package procfs
|
||||
|
|
2
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
2
vendor/github.com/prometheus/procfs/loadavg.go
generated
vendored
|
@ -21,7 +21,7 @@ import (
|
|||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// LoadAvg represents an entry in /proc/loadavg
|
||||
// LoadAvg represents an entry in /proc/loadavg.
|
||||
type LoadAvg struct {
|
||||
Load1 float64
|
||||
Load5 float64
|
||||
|
|
10
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
10
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
|
@ -15,7 +15,7 @@ package procfs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -64,7 +64,7 @@ type MDStat struct {
|
|||
// structs containing the relevant info. More information available here:
|
||||
// https://raid.wiki.kernel.org/index.php/Mdstat
|
||||
func (fs FS) MDStat() ([]MDStat, error) {
|
||||
data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
|
||||
data, err := os.ReadFile(fs.proc.Path("mdstat"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -166,8 +166,12 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||
}
|
||||
|
||||
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
|
||||
statusFields := strings.Fields(statusLine)
|
||||
if len(statusFields) < 1 {
|
||||
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine)
|
||||
}
|
||||
|
||||
sizeStr := strings.Fields(statusLine)[0]
|
||||
sizeStr := statusFields[0]
|
||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||
|
|
12
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
12
vendor/github.com/prometheus/procfs/net_conntrackstat.go
generated
vendored
|
@ -25,7 +25,7 @@ import (
|
|||
)
|
||||
|
||||
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
|
||||
// and contains netfilter conntrack statistics at one CPU core
|
||||
// and contains netfilter conntrack statistics at one CPU core.
|
||||
type ConntrackStatEntry struct {
|
||||
Entries uint64
|
||||
Found uint64
|
||||
|
@ -38,12 +38,12 @@ type ConntrackStatEntry struct {
|
|||
SearchRestart uint64
|
||||
}
|
||||
|
||||
// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
|
||||
// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
|
||||
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
|
||||
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
|
||||
}
|
||||
|
||||
// Parses a slice of ConntrackStatEntries from the given filepath
|
||||
// Parses a slice of ConntrackStatEntries from the given filepath.
|
||||
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
||||
// This file is small and can be read with one syscall.
|
||||
b, err := util.ReadFileNoStat(path)
|
||||
|
@ -61,7 +61,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
|
|||
return stat, nil
|
||||
}
|
||||
|
||||
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
|
||||
// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
|
||||
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
||||
var entries []ConntrackStatEntry
|
||||
|
||||
|
@ -79,7 +79,7 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
|
|||
return entries, nil
|
||||
}
|
||||
|
||||
// Parses a ConntrackStatEntry from given array of fields
|
||||
// Parses a ConntrackStatEntry from given array of fields.
|
||||
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
||||
if len(fields) != 17 {
|
||||
return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
|
||||
|
@ -143,7 +143,7 @@ func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
|
|||
return entry, nil
|
||||
}
|
||||
|
||||
// Parses a uint64 from given hex in string
|
||||
// Parses a uint64 from given hex in string.
|
||||
func parseConntrackStatField(field string) (uint64, error) {
|
||||
val, err := strconv.ParseUint(field, 16, 64)
|
||||
if err != nil {
|
||||
|
|
8
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
8
vendor/github.com/prometheus/procfs/net_dev.go
generated
vendored
|
@ -87,17 +87,17 @@ func newNetDev(file string) (NetDev, error) {
|
|||
// parseLine parses a single line from the /proc/net/dev file. Header lines
|
||||
// must be filtered prior to calling this method.
|
||||
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
|
||||
parts := strings.SplitN(rawLine, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
idx := strings.LastIndex(rawLine, ":")
|
||||
if idx == -1 {
|
||||
return nil, errors.New("invalid net/dev line, missing colon")
|
||||
}
|
||||
fields := strings.Fields(strings.TrimSpace(parts[1]))
|
||||
fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
|
||||
|
||||
var err error
|
||||
line := &NetDevLine{}
|
||||
|
||||
// Interface Name
|
||||
line.Name = strings.TrimSpace(parts[0])
|
||||
line.Name = strings.TrimSpace(rawLine[:idx])
|
||||
if line.Name == "" {
|
||||
return nil, errors.New("invalid net/dev line, empty interface name")
|
||||
}
|
||||
|
|
2
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
2
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
|
@ -34,7 +34,7 @@ const (
|
|||
readLimit = 4294967296 // Byte -> 4 GiB
|
||||
)
|
||||
|
||||
// this contains generic data structures for both udp and tcp sockets
|
||||
// This contains generic data structures for both udp and tcp sockets.
|
||||
type (
|
||||
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
|
||||
NetIPSocket []*netIPSocketLine
|
||||
|
|
4
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
4
vendor/github.com/prometheus/procfs/net_protocols.go
generated
vendored
|
@ -23,7 +23,7 @@ import (
|
|||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// NetProtocolStats stores the contents from /proc/net/protocols
|
||||
// NetProtocolStats stores the contents from /proc/net/protocols.
|
||||
type NetProtocolStats map[string]NetProtocolStatLine
|
||||
|
||||
// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
|
||||
|
@ -41,7 +41,7 @@ type NetProtocolStatLine struct {
|
|||
Capabilities NetProtocolCapabilities
|
||||
}
|
||||
|
||||
// NetProtocolCapabilities contains a list of capabilities for each protocol
|
||||
// NetProtocolCapabilities contains a list of capabilities for each protocol.
|
||||
type NetProtocolCapabilities struct {
|
||||
Close bool // 8
|
||||
Connect bool // 9
|
||||
|
|
8
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
8
vendor/github.com/prometheus/procfs/net_softnet.go
generated
vendored
|
@ -30,13 +30,13 @@ import (
|
|||
// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
|
||||
// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
|
||||
|
||||
// SoftnetStat contains a single row of data from /proc/net/softnet_stat
|
||||
// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
|
||||
type SoftnetStat struct {
|
||||
// Number of processed packets
|
||||
// Number of processed packets.
|
||||
Processed uint32
|
||||
// Number of dropped packets
|
||||
// Number of dropped packets.
|
||||
Dropped uint32
|
||||
// Number of times processing packets ran out of quota
|
||||
// Number of times processing packets ran out of quota.
|
||||
TimeSqueezed uint32
|
||||
}
|
||||
|
||||
|
|
|
@ -79,10 +79,13 @@ type XfrmStat struct {
|
|||
// Policy is dead
|
||||
XfrmOutPolDead int
|
||||
// Policy Error
|
||||
XfrmOutPolError int
|
||||
XfrmFwdHdrError int
|
||||
XfrmOutPolError int
|
||||
// Forward routing of a packet is not allowed
|
||||
XfrmFwdHdrError int
|
||||
// State is invalid, perhaps expired
|
||||
XfrmOutStateInvalid int
|
||||
XfrmAcquireError int
|
||||
// State hasn’t been fully acquired before use
|
||||
XfrmAcquireError int
|
||||
}
|
||||
|
||||
// NewXfrmStat reads the xfrm_stat statistics.
|
8
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
8
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
|
@ -21,13 +21,13 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// NetStat contains statistics for all the counters from one file
|
||||
// NetStat contains statistics for all the counters from one file.
|
||||
type NetStat struct {
|
||||
Filename string
|
||||
Stats map[string][]uint64
|
||||
Filename string
|
||||
}
|
||||
|
||||
// NetStat retrieves stats from /proc/net/stat/
|
||||
// NetStat retrieves stats from `/proc/net/stat/`.
|
||||
func (fs FS) NetStat() ([]NetStat, error) {
|
||||
statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
|
||||
if err != nil {
|
||||
|
@ -55,7 +55,7 @@ func (fs FS) NetStat() ([]NetStat, error) {
|
|||
// Other strings represent per-CPU counters
|
||||
for scanner.Scan() {
|
||||
for num, counter := range strings.Fields(scanner.Text()) {
|
||||
value, err := strconv.ParseUint(counter, 16, 32)
|
||||
value, err := strconv.ParseUint(counter, 16, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
10
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
10
vendor/github.com/prometheus/procfs/proc.go
generated
vendored
|
@ -16,7 +16,7 @@ package procfs
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -82,7 +82,7 @@ func (fs FS) Self() (Proc, error) {
|
|||
|
||||
// NewProc returns a process for the given pid.
|
||||
//
|
||||
// Deprecated: use fs.Proc() instead
|
||||
// Deprecated: Use fs.Proc() instead.
|
||||
func (fs FS) NewProc(pid int) (Proc, error) {
|
||||
return fs.Proc(pid)
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ func (p Proc) Wchan() (string, error) {
|
|||
}
|
||||
defer f.Close()
|
||||
|
||||
data, err := ioutil.ReadAll(f)
|
||||
data, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ func (p Proc) Cwd() (string, error) {
|
|||
return wd, err
|
||||
}
|
||||
|
||||
// RootDir returns the absolute path to the process's root directory (as set by chroot)
|
||||
// RootDir returns the absolute path to the process's root directory (as set by chroot).
|
||||
func (p Proc) RootDir() (string, error) {
|
||||
rdir, err := os.Readlink(p.path("root"))
|
||||
if os.IsNotExist(err) {
|
||||
|
@ -311,7 +311,7 @@ func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
|
|||
|
||||
// Schedstat returns task scheduling information for the process.
|
||||
func (p Proc) Schedstat() (ProcSchedstat, error) {
|
||||
contents, err := ioutil.ReadFile(p.path("schedstat"))
|
||||
contents, err := os.ReadFile(p.path("schedstat"))
|
||||
if err != nil {
|
||||
return ProcSchedstat{}, err
|
||||
}
|
||||
|
|
6
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
6
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
|
@ -45,7 +45,7 @@ type Cgroup struct {
|
|||
}
|
||||
|
||||
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
|
||||
// Line format is hierarchyID:[controller1,controller2]:path
|
||||
// Line format is hierarchyID:[controller1,controller2]:path.
|
||||
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
|
||||
var err error
|
||||
|
||||
|
@ -69,7 +69,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
|
|||
return cgroup, nil
|
||||
}
|
||||
|
||||
// parseCgroups reads each line of the /proc/[pid]/cgroup file
|
||||
// parseCgroups reads each line of the /proc/[pid]/cgroup file.
|
||||
func parseCgroups(data []byte) ([]Cgroup, error) {
|
||||
var cgroups []Cgroup
|
||||
scanner := bufio.NewScanner(bytes.NewReader(data))
|
||||
|
@ -88,7 +88,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) {
|
|||
|
||||
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
|
||||
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
|
||||
// so the len of the returned struct is equal to the number of active hierarchies on this system
|
||||
// so the len of the returned struct is equal to the number of active hierarchies on this system.
|
||||
func (p Proc) Cgroups() ([]Cgroup, error) {
|
||||
data, err := util.ReadFileNoStat(p.path("cgroup"))
|
||||
if err != nil {
|
||||
|
|
98
vendor/github.com/prometheus/procfs/proc_cgroups.go
generated
vendored
Normal file
98
vendor/github.com/prometheus/procfs/proc_cgroups.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2021 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package procfs
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// CgroupSummary models one line from /proc/cgroups.
|
||||
// This file contains information about the controllers that are compiled into the kernel.
|
||||
//
|
||||
// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
|
||||
type CgroupSummary struct {
|
||||
// The name of the controller. controller is also known as subsystem.
|
||||
SubsysName string
|
||||
// The unique ID of the cgroup hierarchy on which this controller is mounted.
|
||||
Hierarchy int
|
||||
// The number of control groups in this hierarchy using this controller.
|
||||
Cgroups int
|
||||
// This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
|
||||
Enabled int
|
||||
}
|
||||
|
||||
// parseCgroupSummary parses each line of the /proc/cgroup file
|
||||
// Line format is `subsys_name hierarchy num_cgroups enabled`.
|
||||
func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
|
||||
var err error
|
||||
|
||||
fields := strings.Fields(CgroupSummaryStr)
|
||||
// require at least 4 fields
|
||||
if len(fields) < 4 {
|
||||
return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr)
|
||||
}
|
||||
|
||||
CgroupSummary := &CgroupSummary{
|
||||
SubsysName: fields[0],
|
||||
}
|
||||
CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse hierarchy ID")
|
||||
}
|
||||
CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse Cgroup Num")
|
||||
}
|
||||
CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse Enabled")
|
||||
}
|
||||
return CgroupSummary, nil
|
||||
}
|
||||
|
||||
// parseCgroupSummary reads each line of the /proc/cgroup file.
|
||||
func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
|
||||
var CgroupSummarys []CgroupSummary
|
||||
scanner := bufio.NewScanner(bytes.NewReader(data))
|
||||
for scanner.Scan() {
|
||||
CgroupSummaryString := scanner.Text()
|
||||
// ignore comment lines
|
||||
if strings.HasPrefix(CgroupSummaryString, "#") {
|
||||
continue
|
||||
}
|
||||
CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
|
||||
}
|
||||
|
||||
err := scanner.Err()
|
||||
return CgroupSummarys, err
|
||||
}
|
||||
|
||||
// CgroupSummarys returns information about current /proc/cgroups.
|
||||
func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
|
||||
data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseCgroupSummary(data)
|
||||
}
|
2
vendor/github.com/prometheus/procfs/proc_environ.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_environ.go
generated
vendored
|
@ -19,7 +19,7 @@ import (
|
|||
"github.com/prometheus/procfs/internal/util"
|
||||
)
|
||||
|
||||
// Environ reads process environments from /proc/<pid>/environ
|
||||
// Environ reads process environments from `/proc/<pid>/environ`.
|
||||
func (p Proc) Environ() ([]string, error) {
|
||||
environments := make([]string, 0)
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue