mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
08219faf8d
24 changed files with 251 additions and 108 deletions
16
README.md
16
README.md
|
@ -837,7 +837,7 @@ unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) val
|
|||
|
||||
The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv](#how-to-import-csv-data).
|
||||
|
||||
The [deduplication](#deduplication) isn't applied for the data exported in CSV. It is expected that the de-duplication is performed during data import.
|
||||
The [deduplication](#deduplication) is applied for the data exported in CSV by default. It is possible to export raw data without de-duplication by passing `reduce_mem_usage=1` query arg to `/api/v1/export/csv`.
|
||||
|
||||
|
||||
### How to export data in native format
|
||||
|
@ -1083,7 +1083,7 @@ It is recommended leaving the following amounts of spare resources:
|
|||
|
||||
* 50% of free RAM for reducing the probability of OOM (out of memory) crashes and slowdowns during temporary spikes in workload.
|
||||
* 50% of spare CPU for reducing the probability of slowdowns during temporary spikes in workload.
|
||||
* At least 30% of free storage space at the directory pointed by `-storageDataPath` command-line flag.
|
||||
* At least 30% of free storage space at the directory pointed by `-storageDataPath` command-line flag. See also `-storage.minFreeDiskSpaceBytes` command-line flag description [here](#list-of-command-line-flags).
|
||||
|
||||
|
||||
## High availability
|
||||
|
@ -1441,6 +1441,18 @@ We also provide [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanag
|
|||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Note, that vendors (including VictoriaMetrics) are often biased when doing such tests. E.g. they try highlighting
|
||||
the best parts of their product, while highlighting the worst parts of competing products.
|
||||
So we encourage users and all independent third parties to conduct their becnhmarks for various products
|
||||
they are evaluating in production and publish the results.
|
||||
|
||||
As a reference, please see [benchmarks](https://docs.victoriametrics.com/Articles.html#benchmarks) conducted by
|
||||
VictoriaMetrics team. Please also see the [helm chart](https://github.com/VictoriaMetrics/benchmark)
|
||||
for running ingestion benchmarks based on node_exporter metrics.
|
||||
|
||||
|
||||
## Profiling
|
||||
|
||||
VictoriaMetrics provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
|
|
@ -159,13 +159,15 @@ func (s *VMStorage) setPrometheusReqParams(r *http.Request, query string) {
|
|||
}
|
||||
}
|
||||
q.Set("query", query)
|
||||
if s.evaluationInterval > 0 {
|
||||
// set step as evaluationInterval by default
|
||||
q.Set("step", s.evaluationInterval.String())
|
||||
if s.evaluationInterval > 0 { // set step as evaluationInterval by default
|
||||
// always convert to seconds to keep compatibility with older
|
||||
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
|
||||
q.Set("step", fmt.Sprintf("%ds", int(s.evaluationInterval.Seconds())))
|
||||
}
|
||||
if s.queryStep > 0 {
|
||||
// override step with user-specified value
|
||||
q.Set("step", s.queryStep.String())
|
||||
if s.queryStep > 0 { // override step with user-specified value
|
||||
// always convert to seconds to keep compatibility with older
|
||||
// Prometheus versions. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943
|
||||
q.Set("step", fmt.Sprintf("%ds", int(s.queryStep.Seconds())))
|
||||
}
|
||||
r.URL.RawQuery = q.Encode()
|
||||
}
|
||||
|
|
|
@ -436,7 +436,20 @@ func TestRequestParams(t *testing.T) {
|
|||
queryStep: time.Minute,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
exp := fmt.Sprintf("query=%s&step=%v&time=%d", query, time.Minute, timestamp.Unix())
|
||||
exp := fmt.Sprintf("query=%s&step=%ds&time=%d", query, int(time.Minute.Seconds()), timestamp.Unix())
|
||||
checkEqualString(t, exp, r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
{
|
||||
"step to seconds",
|
||||
false,
|
||||
&VMStorage{
|
||||
evaluationInterval: 3 * time.Hour,
|
||||
},
|
||||
func(t *testing.T, r *http.Request) {
|
||||
evalInterval := 3 * time.Hour
|
||||
tt := timestamp.Truncate(evalInterval)
|
||||
exp := fmt.Sprintf("query=%s&step=%ds&time=%d", query, int(evalInterval.Seconds()), tt.Unix())
|
||||
checkEqualString(t, exp, r.URL.RawQuery)
|
||||
},
|
||||
},
|
||||
|
|
|
@ -129,6 +129,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reduceMemUsage := searchutils.GetBool(r, "reduce_mem_usage")
|
||||
deadline := searchutils.GetDeadlineForExport(r, startTime)
|
||||
tagFilterss, err := getTagFilterssFromRequest(r)
|
||||
if err != nil {
|
||||
|
@ -140,7 +141,38 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
defer bufferedwriter.Put(bw)
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs())
|
||||
doneCh := make(chan error)
|
||||
writeCSVLine := func(xb *exportBlock) {
|
||||
if len(xb.timestamps) == 0 {
|
||||
return
|
||||
}
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
WriteExportCSVLine(bb, xb, fieldNames)
|
||||
resultsCh <- bb
|
||||
}
|
||||
doneCh := make(chan error, 1)
|
||||
if !reduceMemUsage {
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
|
||||
}
|
||||
go func() {
|
||||
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
|
||||
if err := bw.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
xb := exportBlockPool.Get().(*exportBlock)
|
||||
xb.mn = &rs.MetricName
|
||||
xb.timestamps = rs.Timestamps
|
||||
xb.values = rs.Values
|
||||
writeCSVLine(xb)
|
||||
xb.reset()
|
||||
exportBlockPool.Put(xb)
|
||||
return nil
|
||||
})
|
||||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
} else {
|
||||
go func() {
|
||||
err := netstorage.ExportBlocks(sq, deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
|
||||
if err := bw.Error(); err != nil {
|
||||
|
@ -152,11 +184,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
xb := exportBlockPool.Get().(*exportBlock)
|
||||
xb.mn = mn
|
||||
xb.timestamps, xb.values = b.AppendRowsWithTimeRangeFilter(xb.timestamps[:0], xb.values[:0], tr)
|
||||
if len(xb.timestamps) > 0 {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
WriteExportCSVLine(bb, xb, fieldNames)
|
||||
resultsCh <- bb
|
||||
}
|
||||
writeCSVLine(xb)
|
||||
xb.reset()
|
||||
exportBlockPool.Put(xb)
|
||||
return nil
|
||||
|
@ -164,6 +192,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
close(resultsCh)
|
||||
doneCh <- err
|
||||
}()
|
||||
}
|
||||
// Consume all the data from resultsCh.
|
||||
for bb := range resultsCh {
|
||||
// Do not check for error in bw.Write, since this error is checked inside netstorage.ExportBlocks above.
|
||||
|
@ -360,7 +389,7 @@ func exportHandler(w http.ResponseWriter, matches []string, etfs [][]storage.Tag
|
|||
defer bufferedwriter.Put(bw)
|
||||
|
||||
resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs())
|
||||
doneCh := make(chan error)
|
||||
doneCh := make(chan error, 1)
|
||||
if !reduceMemUsage {
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
|
||||
if err != nil {
|
||||
|
|
|
@ -593,6 +593,29 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run("timestamp(alias(time()>=1600))", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `timestamp(alias(time()>=1600,"foo"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{nan, nan, nan, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run("timestamp_with_name(alias(time()>=1600))", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `timestamp_with_name(alias(time()>=1600,"foo"))`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{nan, nan, nan, 1600, 1800, 2000},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r.MetricName.MetricGroup = []byte("foo")
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run("time()/100", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `time()/100`
|
||||
|
@ -7382,6 +7405,7 @@ func TestExecError(t *testing.T) {
|
|||
f(`sort_by_label()`)
|
||||
f(`sort_by_label_desc()`)
|
||||
f(`timestamp()`)
|
||||
f(`timestamp_with_name()`)
|
||||
f(`vector()`)
|
||||
f(`histogram_quantile()`)
|
||||
f(`histogram_quantiles()`)
|
||||
|
|
|
@ -83,6 +83,7 @@ var rollupFuncs = map[string]newRollupFunc{
|
|||
// in order to properly handle offset and timestamps unaligned to the current step.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/415 for details.
|
||||
"timestamp": newRollupFuncOneArg(rollupTlast),
|
||||
"timestamp_with_name": newRollupFuncOneArg(rollupTlast), // + rollupFuncsKeepMetricGroup
|
||||
"tlast_over_time": newRollupFuncOneArg(rollupTlast),
|
||||
"tmax_over_time": newRollupFuncOneArg(rollupTmax),
|
||||
"tmin_over_time": newRollupFuncOneArg(rollupTmin),
|
||||
|
@ -130,6 +131,7 @@ var rollupAggrFuncs = map[string]rollupFunc{
|
|||
"sum2_over_time": rollupSum2,
|
||||
"tfirst_over_time": rollupTfirst,
|
||||
"timestamp": rollupTlast,
|
||||
"timestamp_with_name": rollupTlast,
|
||||
"tlast_over_time": rollupTlast,
|
||||
"tmax_over_time": rollupTmax,
|
||||
"tmin_over_time": rollupTmin,
|
||||
|
@ -186,6 +188,7 @@ var rollupFuncsKeepMetricGroup = map[string]bool{
|
|||
"quantiles_over_time": true,
|
||||
"rollup": true,
|
||||
"rollup_candlestick": true,
|
||||
"timestamp_with_name": true,
|
||||
}
|
||||
|
||||
func getRollupAggrFuncNames(expr metricsql.Expr) ([]string, error) {
|
||||
|
|
|
@ -524,6 +524,7 @@ func TestRollupNewRollupFuncSuccess(t *testing.T) {
|
|||
f("descent_over_time", 231)
|
||||
f("zscore_over_time", -0.4254336383156416)
|
||||
f("timestamp", 0.13)
|
||||
f("timestamp_with_name", 0.13)
|
||||
f("mode_over_time", 34)
|
||||
f("rate_over_sum", 4520)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.83d9ae2d.chunk.css",
|
||||
"main.js": "./static/js/main.6651c49c.chunk.js",
|
||||
"main.js": "./static/js/main.fa15a895.chunk.js",
|
||||
"runtime-main.js": "./static/js/runtime-main.c4b656b8.js",
|
||||
"static/css/2.77671664.chunk.css": "./static/css/2.77671664.chunk.css",
|
||||
"static/js/2.ef1db8c8.chunk.js": "./static/js/2.ef1db8c8.chunk.js",
|
||||
|
@ -14,6 +14,6 @@
|
|||
"static/css/2.77671664.chunk.css",
|
||||
"static/js/2.ef1db8c8.chunk.js",
|
||||
"static/css/main.83d9ae2d.chunk.css",
|
||||
"static/js/main.6651c49c.chunk.js"
|
||||
"static/js/main.fa15a895.chunk.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.77671664.chunk.css" rel="stylesheet"><link href="./static/css/main.83d9ae2d.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"65648506"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.ef1db8c8.chunk.js"></script><script src="./static/js/main.6651c49c.chunk.js"></script></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.77671664.chunk.css" rel="stylesheet"><link href="./static/css/main.83d9ae2d.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"65648506"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.ef1db8c8.chunk.js"></script><script src="./static/js/main.fa15a895.chunk.js"></script></body></html>
|
File diff suppressed because one or more lines are too long
|
@ -1,9 +1,10 @@
|
|||
import React, {FC, useState} from "react";
|
||||
import React, {FC, useEffect, useState} from "react";
|
||||
import {Box, TextField, Tooltip, IconButton} from "@mui/material";
|
||||
import SecurityIcon from "@mui/icons-material/Security";
|
||||
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||
import {AuthDialog} from "../Auth/AuthDialog";
|
||||
import {ErrorTypes} from "../../../../types";
|
||||
import {getAppModeEnable, getAppModeParams} from "../../../../utils/app-mode";
|
||||
|
||||
export interface ServerConfiguratorProps {
|
||||
error?: ErrorTypes | string;
|
||||
|
@ -11,6 +12,9 @@ export interface ServerConfiguratorProps {
|
|||
|
||||
const ServerConfigurator: FC<ServerConfiguratorProps> = ({error}) => {
|
||||
|
||||
const appModeEnable = getAppModeEnable();
|
||||
const {serverURL: appServerUrl} = getAppModeParams();
|
||||
|
||||
const {serverUrl} = useAppState();
|
||||
const dispatch = useAppDispatch();
|
||||
|
||||
|
@ -19,9 +23,13 @@ const ServerConfigurator: FC<ServerConfiguratorProps> = ({error}) => {
|
|||
};
|
||||
const [dialogOpen, setDialogOpen] = useState(false);
|
||||
|
||||
useEffect(() => {
|
||||
if (appModeEnable) dispatch({type: "SET_SERVER", payload: appServerUrl});
|
||||
}, [appServerUrl]);
|
||||
|
||||
return <>
|
||||
<Box display="grid" gridTemplateColumns="1fr auto" gap="4px" alignItems="center" width="100%" mb={2} minHeight={50}>
|
||||
<TextField variant="outlined" fullWidth label="Server URL" value={serverUrl}
|
||||
<TextField variant="outlined" fullWidth label="Server URL" value={serverUrl || ""} disabled={appModeEnable}
|
||||
error={error === ErrorTypes.validServer || error === ErrorTypes.emptyServer}
|
||||
inputProps={{style: {fontFamily: "Monospace"}}}
|
||||
onChange={onSetServer}/>
|
||||
|
|
|
@ -6,6 +6,10 @@ import {isValidHttpUrl} from "../../../../utils/url";
|
|||
import {useAuthState} from "../../../../state/auth/AuthStateContext";
|
||||
import {ErrorTypes, TimeParams} from "../../../../types";
|
||||
import {useGraphState} from "../../../../state/graph/GraphStateContext";
|
||||
import {getAppModeEnable, getAppModeParams} from "../../../../utils/app-mode";
|
||||
|
||||
const appModeEnable = getAppModeEnable();
|
||||
const {serverURL: appServerUrl} = getAppModeParams();
|
||||
|
||||
export const useFetchQuery = (): {
|
||||
fetchUrl?: string[],
|
||||
|
@ -80,18 +84,19 @@ export const useFetchQuery = (): {
|
|||
};
|
||||
|
||||
const fetchUrl = useMemo(() => {
|
||||
const server = appModeEnable ? appServerUrl : serverUrl;
|
||||
if (!period) return;
|
||||
if (!serverUrl) {
|
||||
if (!server) {
|
||||
setError(ErrorTypes.emptyServer);
|
||||
} else if (query.every(q => !q.trim())) {
|
||||
setError(ErrorTypes.validQuery);
|
||||
} else if (isValidHttpUrl(serverUrl)) {
|
||||
} else if (isValidHttpUrl(server)) {
|
||||
const duration = (period.end - period.start) / 2;
|
||||
const bufferPeriod = {...period, start: period.start - duration, end: period.end + duration};
|
||||
if (customStep.enable) bufferPeriod.step = customStep.value;
|
||||
return query.filter(q => q.trim()).map(q => displayType === "chart"
|
||||
? getQueryRangeUrl(serverUrl, q, bufferPeriod, nocache)
|
||||
: getQueryUrl(serverUrl, q, period));
|
||||
? getQueryRangeUrl(server, q, bufferPeriod, nocache)
|
||||
: getQueryUrl(server, q, period));
|
||||
} else {
|
||||
setError(ErrorTypes.validServer);
|
||||
}
|
||||
|
|
12
app/vmui/packages/vmui/src/utils/app-mode.ts
Normal file
12
app/vmui/packages/vmui/src/utils/app-mode.ts
Normal file
|
@ -0,0 +1,12 @@
|
|||
export interface AppParams {
|
||||
serverURL: string
|
||||
}
|
||||
|
||||
const getAppModeParams = (): AppParams => {
|
||||
const dataParams = document.getElementById("root")?.dataset.params || "{}";
|
||||
return JSON.parse(dataParams);
|
||||
};
|
||||
|
||||
const getAppModeEnable = (): boolean => !!Object.keys(getAppModeParams()).length;
|
||||
|
||||
export {getAppModeEnable, getAppModeParams};
|
|
@ -16,12 +16,16 @@ sort: 15
|
|||
* FEATURE: vminsert: add `-maxLabelValueLen` command-line flag for the ability to configure the maximum length of label value. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1908).
|
||||
* FEATURE: preserve the order of time series passed to [limit_offset](https://docs.victoriametrics.com/MetricsQL.html#limit_offset) function. This allows implementing series paging via `limit_offset(limit, offset, sort_by_label(...))`. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1920) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/951) issues.
|
||||
* FEATURE: automaticall convert `(value1|...|valueN)` into `{value1,...,valueN}` inside `__graphite__` pseudo-label. This allows using [Grafana multi-value template variables](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, `{__graphite__=~"foo.($bar)"}` is expanded to `{__graphite__=~"foo.{x,y}"}` if both `x` and `y` are selected for `$bar` template variable. See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics) for details.
|
||||
* FEATURE: add [timestamp_with_name](https://docs.victoriametrics.com/MetricsQL.html#timestamp_with_name) function. It works the same as [timestamp](https://docs.victoriametrics.com/MetricsQL.html#timestamp), but leaves the original time series names, so it can be used in queries, which match multiple time series names: `timestamp_with_name({foo="bar"}[1h])`. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/949#issuecomment-995222388) for more context.
|
||||
|
||||
* BUGFIX: fix `unaligned 64-bit atomic operation` panic on 32-bit architectures, which has been introduced in v1.70.0. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1944).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): restore the ability to use `$labels.alertname` in labels templating. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1921).
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): add missing `query` caption to the input field for the query. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1900).
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix navigation over query history with `Ctrl+up/down` and fix zoom relatively to the cursor position. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1936).
|
||||
* BUGFIX: deduplicate samples more thoroughly if [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled. Previously some duplicate samples may be left on disk for time series with high churn rate. This may result in bigger storage space requirements.
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): follow up to 5 redirects when `follow_redirects: true` is set for a particular scrape config. Previously only a single redirect was performed in this case. It is expected these redirects are performed to the original hostname. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1945).
|
||||
* BUGFIX: de-duplicate data exported via [/api/v1/export/csv](https://docs.victoriametrics.com/#how-to-export-csv-data) by default if [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled. The de-duplication can be disabled by passing `reduce_mem_usage=1` query arg to `/api/v1/export/csv`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1837).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly store [historical data](https://docs.victoriametrics.com/vmalert.html#rules-backfilling) to old Prometheus versions. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1943).
|
||||
|
||||
|
||||
## [v1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0)
|
||||
|
|
|
@ -315,7 +315,8 @@ It is recommended leaving the following amounts of spare resources:
|
|||
|
||||
* 50% of free RAM across all the node types for reducing the probability of OOM (out of memory) crashes and slowdowns during temporary spikes in workload.
|
||||
* 50% of spare CPU across all the node types for reducing the probability of slowdowns during temporary spikes in workload.
|
||||
* At least 30% of free storage space at the directory pointed by `-storageDataPath` command-line flag at `vmstorage` nodes.
|
||||
* At least 30% of free storage space at the directory pointed by `-storageDataPath` command-line flag at `vmstorage` nodes. See also `-storage.minFreeDiskSpaceBytes` command-line flag [description for vmstorage](#list-of-command-line-flags-for-vmstorage).
|
||||
|
||||
|
||||
Some capacity planning tips for VictoriaMetrics cluster:
|
||||
|
||||
|
|
|
@ -316,7 +316,11 @@ See also [implicit query conversions](#implicit-query-conversions).
|
|||
|
||||
#### timestamp
|
||||
|
||||
`timestamp(series_selector[d])` returns the timestamp in seconds for the last raw sample on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Metric names are stripped from the resulting rollups. This function is supported by PromQL.
|
||||
`timestamp(series_selector[d])` returns the timestamp in seconds for the last raw sample on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Metric names are stripped from the resulting rollups. This function is supported by PromQL. See also [timestamp_with_name](#timestamp_with_name).
|
||||
|
||||
#### timestamp_with_name
|
||||
|
||||
`timestamp_with_name(series_selector[d])` returns the timestamp in seconds for the last raw sample on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Metric names are preserved in the resulting rollups. See also [timestamp](#timestamp).
|
||||
|
||||
#### tfirst_over_time
|
||||
|
||||
|
@ -914,11 +918,11 @@ See also [implicit query conversions](#implicit-query-conversions).
|
|||
|
||||
## Subqueries
|
||||
|
||||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details. Any nested [rollup functions](#rollup-functions) form a subquery. Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions). For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m[1i]))[1i:1i])`, so it becomes a subquery, since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
|
||||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details. Any [rollup function](#rollup-functions) for something other than [series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) form a subquery. Nested rollup functions can be implicit thanks to the [implicit query conversions](#implicit-query-conversions). For example, `delta(sum(m))` is implicitly converted to `delta(sum(default_rollup(m[1i]))[1i:1i])`, so it becomes a subquery, since it contains [default_rollup](#default_rollup) nested into [delta](#delta).
|
||||
|
||||
VictoriaMetrics performs subqueries in the following way:
|
||||
|
||||
* It calculates the inner rollup function using the `step` value from the outer rollup function. For example, if `max_over_time(rate(http_requests_total[5m])[1h:30s])` is executed, then the `rate(http_requests_total[5m])` is calculated with the `step` equal to `30s`. The resulting data points are algined by the `step`.
|
||||
* It calculates the inner rollup function using the `step` value from the outer rollup function. For example, for expression `max_over_time(rate(http_requests_total[5m])[1h:30s])` the inner function `rate(http_requests_total[5m])` is calculated with `step=30s`. The resulting data points are aligned by the `step`.
|
||||
* It calculates the outer rollup function over the results of the inner rollup function using the `step` value passed by Grafana to [/api/v1/query_range](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries).
|
||||
|
||||
|
||||
|
|
|
@ -837,7 +837,7 @@ unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) val
|
|||
|
||||
The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv](#how-to-import-csv-data).
|
||||
|
||||
The [deduplication](#deduplication) isn't applied for the data exported in CSV. It is expected that the de-duplication is performed during data import.
|
||||
The [deduplication](#deduplication) is applied for the data exported in CSV by default. It is possible to export raw data without de-duplication by passing `reduce_mem_usage=1` query arg to `/api/v1/export/csv`.
|
||||
|
||||
|
||||
### How to export data in native format
|
||||
|
@ -1083,7 +1083,7 @@ It is recommended leaving the following amounts of spare resources:
|
|||
|
||||
* 50% of free RAM for reducing the probability of OOM (out of memory) crashes and slowdowns during temporary spikes in workload.
|
||||
* 50% of spare CPU for reducing the probability of slowdowns during temporary spikes in workload.
|
||||
* At least 30% of free storage space at the directory pointed by `-storageDataPath` command-line flag.
|
||||
* At least 30% of free storage space at the directory pointed by `-storageDataPath` command-line flag. See also `-storage.minFreeDiskSpaceBytes` command-line flag description [here](#list-of-command-line-flags).
|
||||
|
||||
|
||||
## High availability
|
||||
|
@ -1441,6 +1441,18 @@ We also provide [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanag
|
|||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Note, that vendors (including VictoriaMetrics) are often biased when doing such tests. E.g. they try highlighting
|
||||
the best parts of their product, while highlighting the worst parts of competing products.
|
||||
So we encourage users and all independent third parties to conduct their becnhmarks for various products
|
||||
they are evaluating in production and publish the results.
|
||||
|
||||
As a reference, please see [benchmarks](https://docs.victoriametrics.com/Articles.html#benchmarks) conducted by
|
||||
VictoriaMetrics team. Please also see the [helm chart](https://github.com/VictoriaMetrics/benchmark)
|
||||
for running ingestion benchmarks based on node_exporter metrics.
|
||||
|
||||
|
||||
## Profiling
|
||||
|
||||
VictoriaMetrics provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
|
|
@ -841,7 +841,7 @@ unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) val
|
|||
|
||||
The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv](#how-to-import-csv-data).
|
||||
|
||||
The [deduplication](#deduplication) isn't applied for the data exported in CSV. It is expected that the de-duplication is performed during data import.
|
||||
The [deduplication](#deduplication) is applied for the data exported in CSV by default. It is possible to export raw data without de-duplication by passing `reduce_mem_usage=1` query arg to `/api/v1/export/csv`.
|
||||
|
||||
|
||||
### How to export data in native format
|
||||
|
@ -1087,7 +1087,7 @@ It is recommended leaving the following amounts of spare resources:
|
|||
|
||||
* 50% of free RAM for reducing the probability of OOM (out of memory) crashes and slowdowns during temporary spikes in workload.
|
||||
* 50% of spare CPU for reducing the probability of slowdowns during temporary spikes in workload.
|
||||
* At least 30% of free storage space at the directory pointed by `-storageDataPath` command-line flag.
|
||||
* At least 30% of free storage space at the directory pointed by `-storageDataPath` command-line flag. See also `-storage.minFreeDiskSpaceBytes` command-line flag description [here](#list-of-command-line-flags).
|
||||
|
||||
|
||||
## High availability
|
||||
|
@ -1445,6 +1445,18 @@ We also provide [vmbackupmanager](https://docs.victoriametrics.com/vmbackupmanag
|
|||
Enterprise binaries can be downloaded and evaluated for free from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Note, that vendors (including VictoriaMetrics) are often biased when doing such tests. E.g. they try highlighting
|
||||
the best parts of their product, while highlighting the worst parts of competing products.
|
||||
So we encourage users and all independent third parties to conduct their becnhmarks for various products
|
||||
they are evaluating in production and publish the results.
|
||||
|
||||
As a reference, please see [benchmarks](https://docs.victoriametrics.com/Articles.html#benchmarks) conducted by
|
||||
VictoriaMetrics team. Please also see the [helm chart](https://github.com/VictoriaMetrics/benchmark)
|
||||
for running ingestion benchmarks based on node_exporter metrics.
|
||||
|
||||
|
||||
## Profiling
|
||||
|
||||
VictoriaMetrics provides handlers for collecting the following [Go profiles](https://blog.golang.org/profiling-go-programs):
|
||||
|
|
2
go.mod
2
go.mod
|
@ -9,7 +9,7 @@ require (
|
|||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.1
|
||||
github.com/VictoriaMetrics/metricsql v0.32.0
|
||||
github.com/VictoriaMetrics/metricsql v0.33.0
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.42.22
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -110,8 +110,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR
|
|||
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
|
||||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metricsql v0.32.0 h1:yTZFB1FvbOsD5ahl6mxKYprHpZ248nVk3s8Kl7UBg5c=
|
||||
github.com/VictoriaMetrics/metricsql v0.32.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||
github.com/VictoriaMetrics/metricsql v0.33.0 h1:UBj7+Tf4dhD47tIxMYfAiy/4GXJN6xPYTweCZ+sRqw0=
|
||||
github.com/VictoriaMetrics/metricsql v0.33.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
|
|
|
@ -236,19 +236,27 @@ func (c *client) ReadData(dst []byte) ([]byte, error) {
|
|||
}
|
||||
err := doRequestWithPossibleRetry(c.hc, req, resp, deadline)
|
||||
statusCode := resp.StatusCode()
|
||||
if err == nil && (statusCode == fasthttp.StatusMovedPermanently || statusCode == fasthttp.StatusFound) {
|
||||
redirectsCount := 0
|
||||
for err == nil && (statusCode == fasthttp.StatusMovedPermanently || statusCode == fasthttp.StatusFound) {
|
||||
if redirectsCount > 5 {
|
||||
err = fmt.Errorf("too many redirects")
|
||||
break
|
||||
}
|
||||
if c.denyRedirects {
|
||||
err = fmt.Errorf("cannot follow redirects if `follow_redirects: false` is set")
|
||||
} else {
|
||||
// Allow a single redirect.
|
||||
break
|
||||
}
|
||||
// It is expected that the redirect is made on the same host.
|
||||
// Otherwise it won't work.
|
||||
if location := resp.Header.Peek("Location"); len(location) > 0 {
|
||||
location := resp.Header.Peek("Location")
|
||||
if len(location) == 0 {
|
||||
err = fmt.Errorf("missing Location header")
|
||||
break
|
||||
}
|
||||
req.URI().UpdateBytes(location)
|
||||
err = c.hc.DoDeadline(req, resp, deadline)
|
||||
err = doRequestWithPossibleRetry(c.hc, req, resp, deadline)
|
||||
statusCode = resp.StatusCode()
|
||||
}
|
||||
}
|
||||
redirectsCount++
|
||||
}
|
||||
if swapResponseBodies {
|
||||
dst = resp.SwapBody(dst)
|
||||
|
|
|
@ -1067,35 +1067,41 @@ func atomicSetBool(p *uint64, b bool) {
|
|||
}
|
||||
|
||||
func (pt *partition) runFinalDedup() error {
|
||||
if !isDedupNeeded(pt) {
|
||||
requiredDedupInterval, actualDedupInterval := pt.getRequiredDedupInterval()
|
||||
if requiredDedupInterval <= actualDedupInterval {
|
||||
// Deduplication isn't needed.
|
||||
return nil
|
||||
}
|
||||
t := time.Now()
|
||||
logger.Infof("starting final dedup for partition %s", pt.name)
|
||||
logger.Infof("starting final dedup for partition %s using requiredDedupInterval=%d ms, since the partition has smaller actualDedupInterval=%d ms",
|
||||
pt.bigPartsPath, requiredDedupInterval, actualDedupInterval)
|
||||
if err := pt.ForceMergeAllParts(); err != nil {
|
||||
return fmt.Errorf("cannot perform final dedup for partition %s: %w", pt.name, err)
|
||||
return fmt.Errorf("cannot perform final dedup for partition %s: %w", pt.bigPartsPath, err)
|
||||
}
|
||||
logger.Infof("final dedup for partition %s finished in %.3f seconds", pt.name, time.Since(t).Seconds())
|
||||
logger.Infof("final dedup for partition %s has been finished in %.3f seconds", pt.bigPartsPath, time.Since(t).Seconds())
|
||||
return nil
|
||||
}
|
||||
|
||||
func isDedupNeeded(pt *partition) bool {
|
||||
func (pt *partition) getRequiredDedupInterval() (int64, int64) {
|
||||
pws := pt.GetParts(nil)
|
||||
defer pt.PutParts(pws)
|
||||
// Get dedup interval, which covers all the parts in the partition.
|
||||
var maxTimestamp int64
|
||||
for _, pw := range pws {
|
||||
if maxTimestamp < pw.p.ph.MaxTimestamp {
|
||||
maxTimestamp = pw.p.ph.MaxTimestamp
|
||||
}
|
||||
}
|
||||
dedupInterval := GetDedupInterval(maxTimestamp)
|
||||
if dedupInterval <= 0 {
|
||||
// The deduplication isn't needed.
|
||||
return false
|
||||
}
|
||||
dedupInterval := GetDedupInterval(pt.tr.MaxTimestamp)
|
||||
minDedupInterval := getMinDedupInterval(pws)
|
||||
return minDedupInterval < dedupInterval
|
||||
return dedupInterval, minDedupInterval
|
||||
}
|
||||
|
||||
func getMinDedupInterval(pws []*partWrapper) int64 {
|
||||
if len(pws) == 0 {
|
||||
return 0
|
||||
}
|
||||
dMin := pws[0].p.ph.MinDedupInterval
|
||||
for _, pw := range pws[1:] {
|
||||
d := pw.p.ph.MinDedupInterval
|
||||
if d < dMin {
|
||||
dMin = d
|
||||
}
|
||||
}
|
||||
return dMin
|
||||
}
|
||||
|
||||
// mergeParts merges pws.
|
||||
|
@ -1188,7 +1194,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro
|
|||
}
|
||||
bsrs = nil
|
||||
|
||||
ph.MinDedupInterval = getMinDedupInterval(pws)
|
||||
ph.MinDedupInterval = GetDedupInterval(ph.MaxTimestamp)
|
||||
if err := ph.writeMinDedupInterval(tmpPartPath); err != nil {
|
||||
return fmt.Errorf("cannot store min dedup interval for part %q: %w", tmpPartPath, err)
|
||||
}
|
||||
|
@ -1272,20 +1278,6 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
func getMinDedupInterval(pws []*partWrapper) int64 {
|
||||
if len(pws) == 0 {
|
||||
return 0
|
||||
}
|
||||
dMin := pws[0].p.ph.MinDedupInterval
|
||||
for _, pw := range pws[1:] {
|
||||
d := pw.p.ph.MinDedupInterval
|
||||
if d < dMin {
|
||||
dMin = d
|
||||
}
|
||||
}
|
||||
return dMin
|
||||
}
|
||||
|
||||
func getCompressLevelForRowsCount(rowsCount, blocksCount uint64) int {
|
||||
avgRowsPerBlock := rowsCount / blocksCount
|
||||
if avgRowsPerBlock <= 200 {
|
||||
|
|
1
vendor/github.com/VictoriaMetrics/metricsql/rollup.go
generated
vendored
1
vendor/github.com/VictoriaMetrics/metricsql/rollup.go
generated
vendored
|
@ -69,6 +69,7 @@ var rollupFuncs = map[string]bool{
|
|||
// in order to properly handle offset and timestamps unaligned to the current step.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/415 for details.
|
||||
"timestamp": true,
|
||||
"timestamp_with_name": true,
|
||||
"tlast_over_time": true,
|
||||
"tmax_over_time": true,
|
||||
"tmin_over_time": true,
|
||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -22,7 +22,7 @@ github.com/VictoriaMetrics/fasthttp/stackless
|
|||
# github.com/VictoriaMetrics/metrics v1.18.1
|
||||
## explicit
|
||||
github.com/VictoriaMetrics/metrics
|
||||
# github.com/VictoriaMetrics/metricsql v0.32.0
|
||||
# github.com/VictoriaMetrics/metricsql v0.33.0
|
||||
## explicit
|
||||
github.com/VictoriaMetrics/metricsql
|
||||
github.com/VictoriaMetrics/metricsql/binaryop
|
||||
|
|
Loading…
Reference in a new issue