mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app,lib: fix typos in comments (#3804)
This commit is contained in:
parent
438b2e11bd
commit
9fff48c3e3
41 changed files with 54 additions and 54 deletions
|
@ -41,7 +41,7 @@ type Alert struct {
|
||||||
LastSent time.Time
|
LastSent time.Time
|
||||||
// Value stores the value returned from evaluating expression from Expr field
|
// Value stores the value returned from evaluating expression from Expr field
|
||||||
Value float64
|
Value float64
|
||||||
// ID is the unique identifer for the Alert
|
// ID is the unique identifier for the Alert
|
||||||
ID uint64
|
ID uint64
|
||||||
// Restored is true if Alert was restored after restart
|
// Restored is true if Alert was restored after restart
|
||||||
Restored bool
|
Restored bool
|
||||||
|
|
|
@ -29,7 +29,7 @@ type Config struct {
|
||||||
// ConsulSDConfigs contains list of settings for service discovery via Consul
|
// ConsulSDConfigs contains list of settings for service discovery via Consul
|
||||||
// see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
|
// see https://prometheus.io/docs/prometheus/latest/configuration/configuration/#consul_sd_config
|
||||||
ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
|
ConsulSDConfigs []consul.SDConfig `yaml:"consul_sd_configs,omitempty"`
|
||||||
// DNSSDConfigs ontains list of settings for service discovery via DNS.
|
// DNSSDConfigs contains list of settings for service discovery via DNS.
|
||||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config
|
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dns_sd_config
|
||||||
DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs,omitempty"`
|
DNSSDConfigs []dns.SDConfig `yaml:"dns_sd_configs,omitempty"`
|
||||||
|
|
||||||
|
|
|
@ -225,7 +225,7 @@ func templateFuncs() textTpl.FuncMap {
|
||||||
"toLower": strings.ToLower,
|
"toLower": strings.ToLower,
|
||||||
|
|
||||||
// crlfEscape replaces '\n' and '\r' chars with `\\n` and `\\r`.
|
// crlfEscape replaces '\n' and '\r' chars with `\\n` and `\\r`.
|
||||||
// This funcion is deprectated.
|
// This function is deprecated.
|
||||||
//
|
//
|
||||||
// It is better to use quotesEscape, jsonEscape, queryEscape or pathEscape instead -
|
// It is better to use quotesEscape, jsonEscape, queryEscape or pathEscape instead -
|
||||||
// these functions properly escape `\n` and `\r` chars according to their purpose.
|
// these functions properly escape `\n` and `\r` chars according to their purpose.
|
||||||
|
|
|
@ -110,7 +110,7 @@ type SrcPath struct {
|
||||||
re *regexp.Regexp
|
re *regexp.Regexp
|
||||||
}
|
}
|
||||||
|
|
||||||
// URLPrefix represents pased `url_prefix`
|
// URLPrefix represents passed `url_prefix`
|
||||||
type URLPrefix struct {
|
type URLPrefix struct {
|
||||||
n uint32
|
n uint32
|
||||||
bus []*backendURL
|
bus []*backendURL
|
||||||
|
|
|
@ -50,7 +50,7 @@ func normalizeURL(uOrig *url.URL) *url.URL {
|
||||||
// Prevent from attacks with using `..` in r.URL.Path
|
// Prevent from attacks with using `..` in r.URL.Path
|
||||||
u.Path = path.Clean(u.Path)
|
u.Path = path.Clean(u.Path)
|
||||||
if !strings.HasSuffix(u.Path, "/") && strings.HasSuffix(uOrig.Path, "/") {
|
if !strings.HasSuffix(u.Path, "/") && strings.HasSuffix(uOrig.Path, "/") {
|
||||||
// The path.Clean() removes traling slash.
|
// The path.Clean() removes trailing slash.
|
||||||
// Return it back if needed.
|
// Return it back if needed.
|
||||||
// This should fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1752
|
// This should fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1752
|
||||||
u.Path += "/"
|
u.Path += "/"
|
||||||
|
|
|
@ -21,7 +21,7 @@ func Push(wr *prompbmarshal.WriteRequest) {
|
||||||
|
|
||||||
tss := wr.Timeseries
|
tss := wr.Timeseries
|
||||||
for len(tss) > 0 {
|
for len(tss) > 0 {
|
||||||
// Process big tss in smaller blocks in order to reduce maxmimum memory usage
|
// Process big tss in smaller blocks in order to reduce maximum memory usage
|
||||||
samplesCount := 0
|
samplesCount := 0
|
||||||
i := 0
|
i := 0
|
||||||
for i < len(tss) {
|
for i < len(tss) {
|
||||||
|
|
|
@ -164,7 +164,7 @@ func (p *parser) parseString() (*StringExpr, error) {
|
||||||
return se, nil
|
return se, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StringExpr represents string contant.
|
// StringExpr represents string constant.
|
||||||
type StringExpr struct {
|
type StringExpr struct {
|
||||||
// S contains unquoted string contents.
|
// S contains unquoted string contents.
|
||||||
S string
|
S string
|
||||||
|
|
|
@ -193,7 +193,7 @@ func getTmpResult() *result {
|
||||||
func putTmpResult(r *result) {
|
func putTmpResult(r *result) {
|
||||||
currentTime := fasttime.UnixTimestamp()
|
currentTime := fasttime.UnixTimestamp()
|
||||||
if cap(r.rs.Values) > 1024*1024 && 4*len(r.rs.Values) < cap(r.rs.Values) && currentTime-r.lastResetTime > 10 {
|
if cap(r.rs.Values) > 1024*1024 && 4*len(r.rs.Values) < cap(r.rs.Values) && currentTime-r.lastResetTime > 10 {
|
||||||
// Reset r.rs in order to preseve memory usage after processing big time series with millions of rows.
|
// Reset r.rs in order to preserve memory usage after processing big time series with millions of rows.
|
||||||
r.rs = Result{}
|
r.rs = Result{}
|
||||||
r.lastResetTime = currentTime
|
r.lastResetTime = currentTime
|
||||||
}
|
}
|
||||||
|
@ -936,7 +936,7 @@ func TagValueSuffixes(qt *querytracer.Tracer, tr storage.TimeRange, tagKey, tagV
|
||||||
|
|
||||||
// TSDBStatus returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
|
// TSDBStatus returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
|
||||||
//
|
//
|
||||||
// It accepts aribtrary filters on time series in sq.
|
// It accepts arbitrary filters on time series in sq.
|
||||||
func TSDBStatus(qt *querytracer.Tracer, sq *storage.SearchQuery, focusLabel string, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
|
func TSDBStatus(qt *querytracer.Tracer, sq *storage.SearchQuery, focusLabel string, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
|
||||||
qt = qt.NewChild("get tsdb stats: %s, focusLabel=%q, topN=%d", sq, focusLabel, topN)
|
qt = qt.NewChild("get tsdb stats: %s, focusLabel=%q, topN=%d", sq, focusLabel, topN)
|
||||||
defer qt.Done()
|
defer qt.Done()
|
||||||
|
|
|
@ -901,7 +901,7 @@ func quantileSorted(phi float64, values []float64) float64 {
|
||||||
func aggrFuncMAD(tss []*timeseries) []*timeseries {
|
func aggrFuncMAD(tss []*timeseries) []*timeseries {
|
||||||
// Calculate medians for each point across tss.
|
// Calculate medians for each point across tss.
|
||||||
medians := getPerPointMedians(tss)
|
medians := getPerPointMedians(tss)
|
||||||
// Calculate MAD values multipled by tolerance for each point across tss.
|
// Calculate MAD values multiplied by tolerance for each point across tss.
|
||||||
// See https://en.wikipedia.org/wiki/Median_absolute_deviation
|
// See https://en.wikipedia.org/wiki/Median_absolute_deviation
|
||||||
mads := getPerPointMADs(tss, medians)
|
mads := getPerPointMADs(tss, medians)
|
||||||
tss[0].Values = append(tss[0].Values[:0], mads...)
|
tss[0].Values = append(tss[0].Values[:0], mads...)
|
||||||
|
@ -920,7 +920,7 @@ func aggrFuncOutliersMAD(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||||
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||||
// Calculate medians for each point across tss.
|
// Calculate medians for each point across tss.
|
||||||
medians := getPerPointMedians(tss)
|
medians := getPerPointMedians(tss)
|
||||||
// Calculate MAD values multipled by tolerance for each point across tss.
|
// Calculate MAD values multiplied by tolerance for each point across tss.
|
||||||
// See https://en.wikipedia.org/wiki/Median_absolute_deviation
|
// See https://en.wikipedia.org/wiki/Median_absolute_deviation
|
||||||
mads := getPerPointMADs(tss, medians)
|
mads := getPerPointMADs(tss, medians)
|
||||||
for n := range mads {
|
for n := range mads {
|
||||||
|
|
|
@ -466,7 +466,7 @@ func execBinaryOpArgs(qt *querytracer.Tracer, ec *EvalConfig, exprFirst, exprSec
|
||||||
// 1) execute the exprFirst
|
// 1) execute the exprFirst
|
||||||
// 2) get common label filters for series returned at step 1
|
// 2) get common label filters for series returned at step 1
|
||||||
// 3) push down the found common label filters to exprSecond. This filters out unneeded series
|
// 3) push down the found common label filters to exprSecond. This filters out unneeded series
|
||||||
// during exprSecond exection instead of spending compute resources on extracting and processing these series
|
// during exprSecond execution instead of spending compute resources on extracting and processing these series
|
||||||
// before they are dropped later when matching time series according to https://prometheus.io/docs/prometheus/latest/querying/operators/#vector-matching
|
// before they are dropped later when matching time series according to https://prometheus.io/docs/prometheus/latest/querying/operators/#vector-matching
|
||||||
// 4) execute the exprSecond with possible additional filters found at step 3
|
// 4) execute the exprSecond with possible additional filters found at step 3
|
||||||
//
|
//
|
||||||
|
|
|
@ -385,7 +385,7 @@ func getRollupFunc(funcName string) newRollupFunc {
|
||||||
}
|
}
|
||||||
|
|
||||||
type rollupFuncArg struct {
|
type rollupFuncArg struct {
|
||||||
// The value preceeding values if it fits staleness interval.
|
// The value preceding values if it fits staleness interval.
|
||||||
prevValue float64
|
prevValue float64
|
||||||
|
|
||||||
// The timestamp for prevValue.
|
// The timestamp for prevValue.
|
||||||
|
@ -397,7 +397,7 @@ type rollupFuncArg struct {
|
||||||
// Timestamps for values.
|
// Timestamps for values.
|
||||||
timestamps []int64
|
timestamps []int64
|
||||||
|
|
||||||
// Real value preceeding values without restrictions on staleness interval.
|
// Real value preceding values without restrictions on staleness interval.
|
||||||
realPrevValue float64
|
realPrevValue float64
|
||||||
|
|
||||||
// Real value which goes after values.
|
// Real value which goes after values.
|
||||||
|
@ -587,7 +587,7 @@ func (rc *rollupConfig) doInternal(dstValues []float64, tsm *timeseriesMap, valu
|
||||||
if window <= 0 {
|
if window <= 0 {
|
||||||
window = rc.Step
|
window = rc.Step
|
||||||
if rc.MayAdjustWindow && window < maxPrevInterval {
|
if rc.MayAdjustWindow && window < maxPrevInterval {
|
||||||
// Adjust lookbehind window only if it isn't set explicilty, e.g. rate(foo).
|
// Adjust lookbehind window only if it isn't set explicitly, e.g. rate(foo).
|
||||||
// In the case of missing lookbehind window it should be adjusted in order to return non-empty graph
|
// In the case of missing lookbehind window it should be adjusted in order to return non-empty graph
|
||||||
// when the window doesn't cover at least two raw samples (this is what most users expect).
|
// when the window doesn't cover at least two raw samples (this is what most users expect).
|
||||||
//
|
//
|
||||||
|
|
|
@ -239,7 +239,7 @@ func (cfg *Config) getAPICredentials() (*credentials, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// getECSRoleCredentialsByPath makes request to ecs metadata service
|
// getECSRoleCredentialsByPath makes request to ecs metadata service
|
||||||
// and retrieves instances credentails
|
// and retrieves instances credentials
|
||||||
// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html
|
// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html
|
||||||
func getECSRoleCredentialsByPath(client *http.Client, path string) (*credentials, error) {
|
func getECSRoleCredentialsByPath(client *http.Client, path string) (*credentials, error) {
|
||||||
resp, err := client.Get(path)
|
resp, err := client.Get(path)
|
||||||
|
@ -329,7 +329,7 @@ func getMetadataByPath(client *http.Client, apiPath string) ([]byte, error) {
|
||||||
return readResponseBody(resp, apiURL)
|
return readResponseBody(resp, apiURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRoleWebIdentityCredentials obtains credentials fo the given roleARN with webToken.
|
// getRoleWebIdentityCredentials obtains credentials for the given roleARN with webToken.
|
||||||
// https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html
|
// https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html
|
||||||
// aws IRSA for kubernetes.
|
// aws IRSA for kubernetes.
|
||||||
// https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/
|
// https://aws.amazon.com/blogs/opensource/introducing-fine-grained-iam-roles-service-accounts/
|
||||||
|
@ -365,7 +365,7 @@ func (cfg *Config) getSTSAPIResponse(action string, reqBuilder func(apiURL strin
|
||||||
return readResponseBody(resp, apiURL)
|
return readResponseBody(resp, apiURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getRoleARNCredentials obtains credentials fo the given roleARN.
|
// getRoleARNCredentials obtains credentials for the given roleARN.
|
||||||
func (cfg *Config) getRoleARNCredentials(creds *credentials) (*credentials, error) {
|
func (cfg *Config) getRoleARNCredentials(creds *credentials) (*credentials, error) {
|
||||||
data, err := cfg.getSTSAPIResponse("AssumeRole", func(apiURL string) (*http.Request, error) {
|
data, err := cfg.getSTSAPIResponse("AssumeRole", func(apiURL string) (*http.Request, error) {
|
||||||
return newSignedGetRequest(apiURL, "sts", cfg.region, creds)
|
return newSignedGetRequest(apiURL, "sts", cfg.region, creds)
|
||||||
|
|
|
@ -309,7 +309,7 @@ func (fs *FS) CreateFile(filePath string, data []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// HasFile returns ture if filePath exists at fs.
|
// HasFile returns true if filePath exists at fs.
|
||||||
func (fs *FS) HasFile(filePath string) (bool, error) {
|
func (fs *FS) HasFile(filePath string) (bool, error) {
|
||||||
path := fs.Dir + filePath
|
path := fs.Dir + filePath
|
||||||
|
|
||||||
|
|
|
@ -250,7 +250,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
// Use os.RemoveAll() instead of os.Remove(), since the dir may contain special files such as flock.lock and restore-in-progress,
|
// Use os.RemoveAll() instead of os.Remove(), since the dir may contain special files such as flock.lock and restore-in-progress,
|
||||||
// which must be ingored.
|
// which must be ignored.
|
||||||
if err := os.RemoveAll(dir); err != nil {
|
if err := os.RemoveAll(dir); err != nil {
|
||||||
return false, fmt.Errorf("cannot remove %q: %w", dir, err)
|
return false, fmt.Errorf("cannot remove %q: %w", dir, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ type FS struct {
|
||||||
// Directory in the bucket to write to.
|
// Directory in the bucket to write to.
|
||||||
Dir string
|
Dir string
|
||||||
|
|
||||||
// Set for using S3-compatible enpoint such as MinIO etc.
|
// Set for using S3-compatible endpoint such as MinIO etc.
|
||||||
CustomEndpoint string
|
CustomEndpoint string
|
||||||
|
|
||||||
// Force to use path style for s3, true by default.
|
// Force to use path style for s3, true by default.
|
||||||
|
|
|
@ -40,7 +40,7 @@ func WriteFileAndSync(path string, data []byte) error {
|
||||||
}
|
}
|
||||||
if _, err := f.Write(data); err != nil {
|
if _, err := f.Write(data); err != nil {
|
||||||
f.MustClose()
|
f.MustClose()
|
||||||
// Do not call MustRemoveAll(path), so the user could inpsect
|
// Do not call MustRemoveAll(path), so the user could inspect
|
||||||
// the file contents during investigation of the issue.
|
// the file contents during investigation of the issue.
|
||||||
return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err)
|
return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,7 @@ func sysTotalMemory() int {
|
||||||
}
|
}
|
||||||
mem := cgroup.GetMemoryLimit()
|
mem := cgroup.GetMemoryLimit()
|
||||||
if mem <= 0 || int64(int(mem)) != mem || int(mem) > totalMem {
|
if mem <= 0 || int64(int(mem)) != mem || int(mem) > totalMem {
|
||||||
// Try reading hierachical memory limit.
|
// Try reading hierarchical memory limit.
|
||||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/699
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/699
|
||||||
mem = cgroup.GetHierarchicalMemoryLimit()
|
mem = cgroup.GetHierarchicalMemoryLimit()
|
||||||
if mem <= 0 || int64(int(mem)) != mem || int(mem) > totalMem {
|
if mem <= 0 || int64(int(mem)) != mem || int(mem) > totalMem {
|
||||||
|
|
|
@ -157,7 +157,7 @@ func commonPrefixLen(a, b []byte) int {
|
||||||
|
|
||||||
// Add adds x to the end of ib.
|
// Add adds x to the end of ib.
|
||||||
//
|
//
|
||||||
// false is returned if x isn't added to ib due to block size contraints.
|
// false is returned if x isn't added to ib due to block size constraints.
|
||||||
func (ib *inmemoryBlock) Add(x []byte) bool {
|
func (ib *inmemoryBlock) Add(x []byte) bool {
|
||||||
data := ib.data
|
data := ib.data
|
||||||
if len(x)+len(data) > maxInmemoryBlockSize {
|
if len(x)+len(data) > maxInmemoryBlockSize {
|
||||||
|
|
|
@ -1665,7 +1665,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix, txnPath string) error {
|
||||||
srcPath, dstPath)
|
srcPath, dstPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush pathPrefix directory metadata to the underying storage.
|
// Flush pathPrefix directory metadata to the underlying storage.
|
||||||
fs.MustSyncPath(pathPrefix)
|
fs.MustSyncPath(pathPrefix)
|
||||||
|
|
||||||
pendingTxnDeletionsWG.Add(1)
|
pendingTxnDeletionsWG.Add(1)
|
||||||
|
|
|
@ -89,7 +89,7 @@ func TestParseProxyProtocolFail(t *testing.T) {
|
||||||
// unsupported command
|
// unsupported command
|
||||||
f([]byte{0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A, 0x22, 0x11, 0x00, 0x0C,
|
f([]byte{0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A, 0x22, 0x11, 0x00, 0x0C,
|
||||||
0x7F, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 80, 0, 0})
|
0x7F, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 80, 0, 0})
|
||||||
// mimatch ipv6 and ipv4
|
// mismatch ipv6 and ipv4
|
||||||
f([]byte{0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A, 0x21, 0x21, 0x00, 0x0C,
|
f([]byte{0x0D, 0x0A, 0x0D, 0x0A, 0x00, 0x0D, 0x0A, 0x51, 0x55, 0x49, 0x54, 0x0A, 0x21, 0x21, 0x00, 0x0C,
|
||||||
// ip data srcid,dstip,srcport
|
// ip data srcid,dstip,srcport
|
||||||
0x7F, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 80, 0, 0})
|
0x7F, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 80, 0, 0})
|
||||||
|
|
|
@ -76,7 +76,7 @@ func appendGraphiteMatchTemplateParts(dst []string, s string) []string {
|
||||||
// Match matches s against gmt.
|
// Match matches s against gmt.
|
||||||
//
|
//
|
||||||
// On success it adds matched captures to dst and returns it with true.
|
// On success it adds matched captures to dst and returns it with true.
|
||||||
// Of failre it returns false.
|
// On failure it returns false.
|
||||||
func (gmt *graphiteMatchTemplate) Match(dst []string, s string) ([]string, bool) {
|
func (gmt *graphiteMatchTemplate) Match(dst []string, s string) ([]string, bool) {
|
||||||
dst = append(dst, s)
|
dst = append(dst, s)
|
||||||
parts := gmt.parts
|
parts := gmt.parts
|
||||||
|
|
|
@ -119,7 +119,7 @@ func getToken(token *promauth.Secret) (string, error) {
|
||||||
return string(data), nil
|
return string(data), nil
|
||||||
}
|
}
|
||||||
t := os.Getenv("CONSUL_HTTP_TOKEN")
|
t := os.Getenv("CONSUL_HTTP_TOKEN")
|
||||||
// Allow empty token - it shouls work if authorization is disabled in Consul
|
// Allow empty token - it should work if authorization is disabled in Consul
|
||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,7 +145,7 @@ func maxWaitTime() time.Duration {
|
||||||
// Consul adds random delay up to wait/16, so reduce the timeout in order to keep it below BlockingClientReadTimeout.
|
// Consul adds random delay up to wait/16, so reduce the timeout in order to keep it below BlockingClientReadTimeout.
|
||||||
// See https://www.consul.io/api-docs/features/blocking
|
// See https://www.consul.io/api-docs/features/blocking
|
||||||
d -= d / 8
|
d -= d / 8
|
||||||
// The timeout cannot exceed 10 minuntes. See https://www.consul.io/api-docs/features/blocking
|
// The timeout cannot exceed 10 minutes. See https://www.consul.io/api-docs/features/blocking
|
||||||
if d > 10*time.Minute {
|
if d > 10*time.Minute {
|
||||||
d = 10 * time.Minute
|
d = 10 * time.Minute
|
||||||
}
|
}
|
||||||
|
@ -155,7 +155,7 @@ func maxWaitTime() time.Duration {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBlockingAPIResponse perfoms blocking request to Consul via client and returns response.
|
// getBlockingAPIResponse performs blocking request to Consul via client and returns response.
|
||||||
//
|
//
|
||||||
// See https://www.consul.io/api-docs/features/blocking .
|
// See https://www.consul.io/api-docs/features/blocking .
|
||||||
func getBlockingAPIResponse(ctx context.Context, client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
|
func getBlockingAPIResponse(ctx context.Context, client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
|
||||||
|
|
|
@ -150,7 +150,7 @@ func addTasksLabels(tasks []task, nodesLabels, servicesLabels []*promutils.Label
|
||||||
return ms
|
return ms
|
||||||
}
|
}
|
||||||
|
|
||||||
// addLabels adds lables from src to dst if they contain the given `key: value` pair.
|
// addLabels adds labels from src to dst if they contain the given `key: value` pair.
|
||||||
func addLabels(dst *promutils.Labels, src []*promutils.Labels, key, value string) {
|
func addLabels(dst *promutils.Labels, src []*promutils.Labels, key, value string) {
|
||||||
for _, m := range src {
|
for _, m := range src {
|
||||||
if m.Get(key) != value {
|
if m.Get(key) != value {
|
||||||
|
|
|
@ -22,7 +22,7 @@ type apiConfig struct {
|
||||||
parseErrors *metrics.Counter
|
parseErrors *metrics.Counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// httpGroupTarget respresent prometheus GroupTarget
|
// httpGroupTarget represent prometheus GroupTarget
|
||||||
// https://prometheus.io/docs/prometheus/latest/http_sd/
|
// https://prometheus.io/docs/prometheus/latest/http_sd/
|
||||||
type httpGroupTarget struct {
|
type httpGroupTarget struct {
|
||||||
Targets []string `json:"targets"`
|
Targets []string `json:"targets"`
|
||||||
|
|
|
@ -408,7 +408,7 @@ func (gw *groupWatcher) doRequest(requestURL string) (*http.Response, error) {
|
||||||
requestURL = strings.Replace(requestURL, "/apis/networking.k8s.io/v1/", "/apis/networking.k8s.io/v1beta1/", 1)
|
requestURL = strings.Replace(requestURL, "/apis/networking.k8s.io/v1/", "/apis/networking.k8s.io/v1beta1/", 1)
|
||||||
}
|
}
|
||||||
if strings.Contains(requestURL, "/apis/discovery.k8s.io/v1/") && atomic.LoadUint32(&gw.useDiscoveryV1Beta1) == 1 {
|
if strings.Contains(requestURL, "/apis/discovery.k8s.io/v1/") && atomic.LoadUint32(&gw.useDiscoveryV1Beta1) == 1 {
|
||||||
// Update discovery URL for old Kuberentes API, which supports only v1beta1 path.
|
// Update discovery URL for old Kubernetes API, which supports only v1beta1 path.
|
||||||
requestURL = strings.Replace(requestURL, "/apis/discovery.k8s.io/v1/", "/apis/discovery.k8s.io/v1beta1/", 1)
|
requestURL = strings.Replace(requestURL, "/apis/discovery.k8s.io/v1/", "/apis/discovery.k8s.io/v1beta1/", 1)
|
||||||
}
|
}
|
||||||
req, err := http.NewRequest("GET", requestURL, nil)
|
req, err := http.NewRequest("GET", requestURL, nil)
|
||||||
|
|
|
@ -82,7 +82,7 @@ type NodeDaemonEndpoints struct {
|
||||||
KubeletEndpoint DaemonEndpoint
|
KubeletEndpoint DaemonEndpoint
|
||||||
}
|
}
|
||||||
|
|
||||||
// getTargetLabels returs labels for the given n.
|
// getTargetLabels returns labels for the given n.
|
||||||
//
|
//
|
||||||
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#node
|
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#node
|
||||||
func (n *Node) getTargetLabels(gw *groupWatcher) []*promutils.Labels {
|
func (n *Node) getTargetLabels(gw *groupWatcher) []*promutils.Labels {
|
||||||
|
|
|
@ -104,7 +104,7 @@ func maxWaitTime() time.Duration {
|
||||||
// Nomad adds random delay up to wait/16, so reduce the timeout in order to keep it below BlockingClientReadTimeout.
|
// Nomad adds random delay up to wait/16, so reduce the timeout in order to keep it below BlockingClientReadTimeout.
|
||||||
// See https://developer.hashicorp.com/nomad/api-docs#blocking-queries
|
// See https://developer.hashicorp.com/nomad/api-docs#blocking-queries
|
||||||
d -= d / 16
|
d -= d / 16
|
||||||
// The timeout cannot exceed 10 minuntes. See https://developer.hashicorp.com/nomad/api-docs#blocking-queries
|
// The timeout cannot exceed 10 minutes. See https://developer.hashicorp.com/nomad/api-docs#blocking-queries
|
||||||
|
|
||||||
if d > 10*time.Minute {
|
if d > 10*time.Minute {
|
||||||
d = 10 * time.Minute
|
d = 10 * time.Minute
|
||||||
|
@ -115,7 +115,7 @@ func maxWaitTime() time.Duration {
|
||||||
return d
|
return d
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBlockingAPIResponse perfoms blocking request to Nomad via client and returns response.
|
// getBlockingAPIResponse performs blocking request to Nomad via client and returns response.
|
||||||
// See https://developer.hashicorp.com/nomad/api-docs#blocking-queries .
|
// See https://developer.hashicorp.com/nomad/api-docs#blocking-queries .
|
||||||
func getBlockingAPIResponse(ctx context.Context, client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
|
func getBlockingAPIResponse(ctx context.Context, client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
|
||||||
path += "&index=" + strconv.FormatInt(index, 10)
|
path += "&index=" + strconv.FormatInt(index, 10)
|
||||||
|
|
|
@ -49,7 +49,7 @@ func (x *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON returns JSON respresentation for x.
|
// MarshalJSON returns JSON representation for x.
|
||||||
func (x *Labels) MarshalJSON() ([]byte, error) {
|
func (x *Labels) MarshalJSON() ([]byte, error) {
|
||||||
m := x.ToMap()
|
m := x.ToMap()
|
||||||
return json.Marshal(m)
|
return json.Marshal(m)
|
||||||
|
@ -235,7 +235,7 @@ func (x *Labels) RemoveDuplicates() {
|
||||||
|
|
||||||
// RemoveMetaLabels removes all the `__meta_` labels from x.
|
// RemoveMetaLabels removes all the `__meta_` labels from x.
|
||||||
//
|
//
|
||||||
// See https://www.robustperception.io/life-of-a-label fo details.
|
// See https://www.robustperception.io/life-of-a-label for details.
|
||||||
func (x *Labels) RemoveMetaLabels() {
|
func (x *Labels) RemoveMetaLabels() {
|
||||||
src := x.Labels
|
src := x.Labels
|
||||||
dst := x.Labels[:0]
|
dst := x.Labels[:0]
|
||||||
|
|
|
@ -40,7 +40,7 @@ func StartUnmarshalWorkers() {
|
||||||
|
|
||||||
// StopUnmarshalWorkers stops unmarshal workers.
|
// StopUnmarshalWorkers stops unmarshal workers.
|
||||||
//
|
//
|
||||||
// No more calles to ScheduleUnmarshalWork are allowed after calling stopUnmarshalWorkers
|
// No more calls to ScheduleUnmarshalWork are allowed after calling stopUnmarshalWorkers
|
||||||
func StopUnmarshalWorkers() {
|
func StopUnmarshalWorkers() {
|
||||||
close(unmarshalWorkCh)
|
close(unmarshalWorkCh)
|
||||||
unmarshalWorkersWG.Wait()
|
unmarshalWorkersWG.Wait()
|
||||||
|
|
|
@ -157,7 +157,7 @@ func putRequest(req *Request) {
|
||||||
|
|
||||||
var requestPool sync.Pool
|
var requestPool sync.Pool
|
||||||
|
|
||||||
// sanitizeName performs DataDog-compatible santizing for metric names
|
// sanitizeName performs DataDog-compatible sanitizing for metric names
|
||||||
//
|
//
|
||||||
// See https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics
|
// See https://docs.datadoghq.com/metrics/custom_metrics/#naming-custom-metrics
|
||||||
func sanitizeName(name string) string {
|
func sanitizeName(name string) string {
|
||||||
|
|
|
@ -65,7 +65,7 @@ func NewPromRegex(expr string) (*PromRegex, error) {
|
||||||
return pr, nil
|
return pr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MatchString retruns true if s matches pr.
|
// MatchString returns true if s matches pr.
|
||||||
//
|
//
|
||||||
// The pr is automatically anchored to the beginning and to the end
|
// The pr is automatically anchored to the beginning and to the end
|
||||||
// of the matching string with '^' and '$'.
|
// of the matching string with '^' and '$'.
|
||||||
|
|
|
@ -79,7 +79,7 @@ func (bsw *blockStreamWriter) reset() {
|
||||||
bsw.prevTimestampsBlockOffset = 0
|
bsw.prevTimestampsBlockOffset = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitFromInmemoryPart initialzes bsw from inmemory part.
|
// InitFromInmemoryPart initializes bsw from inmemory part.
|
||||||
func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart, compressLevel int) {
|
func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart, compressLevel int) {
|
||||||
bsw.reset()
|
bsw.reset()
|
||||||
|
|
||||||
|
|
|
@ -24,7 +24,7 @@ func isDedupEnabled() bool {
|
||||||
return globalDedupInterval > 0
|
return globalDedupInterval > 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeduplicateSamples removes samples from src* if they are closer to each other than dedupInterval in millseconds.
|
// DeduplicateSamples removes samples from src* if they are closer to each other than dedupInterval in milliseconds.
|
||||||
func DeduplicateSamples(srcTimestamps []int64, srcValues []float64, dedupInterval int64) ([]int64, []float64) {
|
func DeduplicateSamples(srcTimestamps []int64, srcValues []float64, dedupInterval int64) ([]int64, []float64) {
|
||||||
if !needsDedup(srcTimestamps, dedupInterval) {
|
if !needsDedup(srcTimestamps, dedupInterval) {
|
||||||
// Fast path - nothing to deduplicate
|
// Fast path - nothing to deduplicate
|
||||||
|
|
|
@ -650,7 +650,7 @@ func generateTSID(dst *TSID, mn *MetricName) {
|
||||||
// This assumption is true because mn.Tags must be sorted with mn.sortTags() before calling generateTSID() function.
|
// This assumption is true because mn.Tags must be sorted with mn.sortTags() before calling generateTSID() function.
|
||||||
// This allows grouping data blocks for the same (job, instance) close to each other on disk.
|
// This allows grouping data blocks for the same (job, instance) close to each other on disk.
|
||||||
// This reduces disk seeks and disk read IO when data blocks are read from disk for the same job and/or instance.
|
// This reduces disk seeks and disk read IO when data blocks are read from disk for the same job and/or instance.
|
||||||
// For example, data blocks for time series matching `process_resident_memory_bytes{job="vmstorage"}` are physically adjancent on disk.
|
// For example, data blocks for time series matching `process_resident_memory_bytes{job="vmstorage"}` are physically adjacent on disk.
|
||||||
if len(mn.Tags) > 0 {
|
if len(mn.Tags) > 0 {
|
||||||
dst.JobID = uint32(xxhash.Sum64(mn.Tags[0].Value))
|
dst.JobID = uint32(xxhash.Sum64(mn.Tags[0].Value))
|
||||||
}
|
}
|
||||||
|
@ -2754,7 +2754,7 @@ func (is *indexSearch) getMetricIDsForDateAndFilters(qt *querytracer.Tracer, dat
|
||||||
// Intersect metricIDs with the rest of filters.
|
// Intersect metricIDs with the rest of filters.
|
||||||
//
|
//
|
||||||
// Do not run these tag filters in parallel, since this may result in CPU and RAM waste
|
// Do not run these tag filters in parallel, since this may result in CPU and RAM waste
|
||||||
// when the intial tag filters significantly reduce the number of found metricIDs,
|
// when the initial tag filters significantly reduce the number of found metricIDs,
|
||||||
// so the remaining filters could be performed via much faster metricName matching instead
|
// so the remaining filters could be performed via much faster metricName matching instead
|
||||||
// of slow selecting of matching metricIDs.
|
// of slow selecting of matching metricIDs.
|
||||||
qtChild = qt.NewChild("intersect the remaining %d filters with the found %d metric ids", len(tfws), metricIDs.Len())
|
qtChild = qt.NewChild("intersect the remaining %d filters with the found %d metric ids", len(tfws), metricIDs.Len())
|
||||||
|
|
|
@ -57,7 +57,7 @@ const finalPartsToMerge = 3
|
||||||
// Higher number of shards reduces CPU contention and increases the max bandwidth on multi-core systems.
|
// Higher number of shards reduces CPU contention and increases the max bandwidth on multi-core systems.
|
||||||
var rawRowsShardsPerPartition = (cgroup.AvailableCPUs() + 1) / 2
|
var rawRowsShardsPerPartition = (cgroup.AvailableCPUs() + 1) / 2
|
||||||
|
|
||||||
// The interval for flushing bufferred rows into parts, so they become visible to search.
|
// The interval for flushing buffered rows into parts, so they become visible to search.
|
||||||
const pendingRowsFlushInterval = time.Second
|
const pendingRowsFlushInterval = time.Second
|
||||||
|
|
||||||
// The interval for guaranteed flush of recently ingested data from memory to on-disk parts,
|
// The interval for guaranteed flush of recently ingested data from memory to on-disk parts,
|
||||||
|
@ -2144,7 +2144,7 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, txnPath str
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush pathPrefix* directory metadata to the underying storage,
|
// Flush pathPrefix* directory metadata to the underlying storage,
|
||||||
// so the moved files become visible there.
|
// so the moved files become visible there.
|
||||||
fs.MustSyncPath(pathPrefix1)
|
fs.MustSyncPath(pathPrefix1)
|
||||||
fs.MustSyncPath(pathPrefix2)
|
fs.MustSyncPath(pathPrefix2)
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
// rawRow reperesents raw timeseries row.
|
// rawRow represents raw timeseries row.
|
||||||
type rawRow struct {
|
type rawRow struct {
|
||||||
// TSID is time series id.
|
// TSID is time series id.
|
||||||
TSID TSID
|
TSID TSID
|
||||||
|
|
|
@ -100,7 +100,7 @@ type Search struct {
|
||||||
|
|
||||||
ts tableSearch
|
ts tableSearch
|
||||||
|
|
||||||
// tr contains time range used in the serach.
|
// tr contains time range used in the search.
|
||||||
tr TimeRange
|
tr TimeRange
|
||||||
|
|
||||||
// tfss contains tag filters used in the search.
|
// tfss contains tag filters used in the search.
|
||||||
|
@ -165,7 +165,7 @@ func (s *Search) Init(qt *querytracer.Tracer, storage *Storage, tfss []*TagFilte
|
||||||
}
|
}
|
||||||
// It is ok to call Init on non-nil err.
|
// It is ok to call Init on non-nil err.
|
||||||
// Init must be called before returning because it will fail
|
// Init must be called before returning because it will fail
|
||||||
// on Seach.MustClose otherwise.
|
// on Search.MustClose otherwise.
|
||||||
s.ts.Init(storage.tb, tsids, tr)
|
s.ts.Init(storage.tb, tsids, tr)
|
||||||
qt.Printf("search for parts with data for %d series", len(tsids))
|
qt.Printf("search for parts with data for %d series", len(tsids))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1972,7 +1972,7 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
|
||||||
s.pendingHourEntriesLock.Unlock()
|
s.pendingHourEntriesLock.Unlock()
|
||||||
}
|
}
|
||||||
if len(pendingDateMetricIDs) == 0 {
|
if len(pendingDateMetricIDs) == 0 {
|
||||||
// Fast path - there are no new (date, metricID) entires in rows.
|
// Fast path - there are no new (date, metricID) entries in rows.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -425,7 +425,7 @@ func (tb *table) retentionWatcher() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// There are paritions to drop. Drop them.
|
// There are partitions to drop. Drop them.
|
||||||
|
|
||||||
// Remove table references from partitions, so they will be eventually
|
// Remove table references from partitions, so they will be eventually
|
||||||
// closed and dropped after all the pending searches are done.
|
// closed and dropped after all the pending searches are done.
|
||||||
|
|
|
@ -51,7 +51,7 @@ func timestampToPartitionName(timestamp int64) string {
|
||||||
return t.Format("2006_01")
|
return t.Format("2006_01")
|
||||||
}
|
}
|
||||||
|
|
||||||
// fromPartitionName initializes tr from the given parition name.
|
// fromPartitionName initializes tr from the given partition name.
|
||||||
func (tr *TimeRange) fromPartitionName(name string) error {
|
func (tr *TimeRange) fromPartitionName(name string) error {
|
||||||
t, err := time.Parse("2006_01", name)
|
t, err := time.Parse("2006_01", name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -199,7 +199,7 @@ type aggregator struct {
|
||||||
|
|
||||||
// suffix contains a suffix, which should be added to aggregate metric names
|
// suffix contains a suffix, which should be added to aggregate metric names
|
||||||
//
|
//
|
||||||
// It contains the interval, lables in (by, without), plus output name.
|
// It contains the interval, labels in (by, without), plus output name.
|
||||||
// For example, foo_bar metric name is transformed to foo_bar:1m_by_job
|
// For example, foo_bar metric name is transformed to foo_bar:1m_by_job
|
||||||
// for `interval: 1m`, `by: [job]`
|
// for `interval: 1m`, `by: [job]`
|
||||||
suffix string
|
suffix string
|
||||||
|
|
Loading…
Reference in a new issue