= ({ data, displayColumns }) => {
{rowMeta}
))}
-
- {row.value}
+ |
+ {!row.values.length ? row.value : row.values.map(val => {val} )}
|
{hasCopyValue && (
diff --git a/app/vmui/packages/vmui/src/pages/ExploreMetrics/hooks/useSetQueryParams.ts b/app/vmui/packages/vmui/src/pages/ExploreMetrics/hooks/useSetQueryParams.ts
index a4ff11d228..c0fe6d70bc 100644
--- a/app/vmui/packages/vmui/src/pages/ExploreMetrics/hooks/useSetQueryParams.ts
+++ b/app/vmui/packages/vmui/src/pages/ExploreMetrics/hooks/useSetQueryParams.ts
@@ -2,6 +2,7 @@ import { useEffect } from "react";
import { compactObject } from "../../../utils/object";
import { useTimeState } from "../../../state/time/TimeStateContext";
import { setQueryStringWithoutPageReload } from "../../../utils/query-string";
+import { useGraphState } from "../../../state/graph/GraphStateContext";
interface queryProps {
job: string
@@ -11,13 +12,14 @@ interface queryProps {
}
export const useSetQueryParams = ({ job, instance, metrics, size }: queryProps) => {
- const { duration, relativeTime, period: { date, step } } = useTimeState();
+ const { duration, relativeTime, period: { date } } = useTimeState();
+ const { customStep } = useGraphState();
const setSearchParamsFromState = () => {
const params = compactObject({
["g0.range_input"]: duration,
["g0.end_input"]: date,
- ["g0.step_input"]: step,
+ ["g0.step_input"]: customStep,
["g0.relative_time"]: relativeTime,
size,
job,
@@ -28,6 +30,6 @@ export const useSetQueryParams = ({ job, instance, metrics, size }: queryProps)
setQueryStringWithoutPageReload(params);
};
- useEffect(setSearchParamsFromState, [duration, relativeTime, date, step, job, instance, metrics, size]);
+ useEffect(setSearchParamsFromState, [duration, relativeTime, date, customStep, job, instance, metrics, size]);
useEffect(setSearchParamsFromState, []);
};
diff --git a/app/vmui/packages/vmui/src/pages/PredefinedPanels/PredefinedPanel/PredefinedPanel.tsx b/app/vmui/packages/vmui/src/pages/PredefinedPanels/PredefinedPanel/PredefinedPanel.tsx
index 0caf115917..ae214b02a6 100644
--- a/app/vmui/packages/vmui/src/pages/PredefinedPanels/PredefinedPanel/PredefinedPanel.tsx
+++ b/app/vmui/packages/vmui/src/pages/PredefinedPanels/PredefinedPanel/PredefinedPanel.tsx
@@ -4,7 +4,6 @@ import { AxisRange, YaxisState } from "../../../state/graph/reducer";
import GraphView from "../../../components/Views/GraphView/GraphView";
import { useFetchQuery } from "../../../hooks/useFetchQuery";
import Spinner from "../../../components/Main/Spinner/Spinner";
-import StepConfigurator from "../../../components/Configurators/StepConfigurator/StepConfigurator";
import GraphSettings from "../../../components/Configurators/GraphSettings/GraphSettings";
import { marked } from "marked";
import { useTimeDispatch, useTimeState } from "../../../state/time/TimeStateContext";
@@ -12,7 +11,7 @@ import { InfoIcon } from "../../../components/Main/Icons";
import "./style.scss";
import Alert from "../../../components/Main/Alert/Alert";
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
-import usePrevious from "../../../hooks/usePrevious";
+import { useGraphState } from "../../../state/graph/GraphStateContext";
export interface PredefinedPanelsProps extends PanelSettings {
filename: string;
@@ -28,13 +27,12 @@ const PredefinedPanel: FC = ({
alias
}) => {
- const { period, duration } = useTimeState();
+ const { period } = useTimeState();
+ const { customStep } = useGraphState();
const dispatch = useTimeDispatch();
- const prevDuration = usePrevious(duration);
const containerRef = useRef(null);
const [visible, setVisible] = useState(true);
- const [customStep, setCustomStep] = useState(period.step || "1s");
const [yaxis, setYaxis] = useState({
limits: {
enable: false,
@@ -77,11 +75,6 @@ const PredefinedPanel: FC = ({
};
}, []);
- useEffect(() => {
- if (duration === prevDuration || !prevDuration) return;
- if (customStep) setCustomStep(period.step || "1s");
- }, [duration, prevDuration]);
-
if (!validExpr) return (
"expr" not found. Check the configuration file {filename}.
@@ -123,13 +116,6 @@ const PredefinedPanel: FC = ({
{title || ""}
-
-
-
{
- const { duration, relativeTime, period: { date, step } } = useTimeState();
+ const { duration, relativeTime, period: { date } } = useTimeState();
+ const { customStep } = useGraphState();
const setSearchParamsFromState = () => {
const params = compactObject({
["g0.range_input"]: duration,
["g0.end_input"]: date,
- ["g0.step_input"]: step,
+ ["g0.step_input"]: customStep,
["g0.relative_time"]: relativeTime
});
setQueryStringWithoutPageReload(params);
};
- useEffect(setSearchParamsFromState, [duration, relativeTime, date, step]);
+ useEffect(setSearchParamsFromState, [duration, relativeTime, date, customStep]);
useEffect(setSearchParamsFromState, []);
};
diff --git a/app/vmui/packages/vmui/src/pages/TracePage/index.tsx b/app/vmui/packages/vmui/src/pages/TracePage/index.tsx
index a3b99cd183..6f0cb3e446 100644
--- a/app/vmui/packages/vmui/src/pages/TracePage/index.tsx
+++ b/app/vmui/packages/vmui/src/pages/TracePage/index.tsx
@@ -148,7 +148,7 @@ const TracePage: FC = () => {
{"\n"}
In order to use tracing please refer to the doc:
{
- let hash = 0;
- for (let i = 0; i < str.length; i++) {
- hash = str.charCodeAt(i) + ((hash << 5) - hash);
+export const baseContrastColors = [
+ "#e54040",
+ "#32a9dc",
+ "#2ee329",
+ "#7126a1",
+ "#e38f0f",
+ "#3d811a",
+ "#ffea00",
+ "#2d2d2d",
+ "#da42a6",
+ "#a44e0c",
+];
+
+export const getColorFromString = (text: string): string => {
+ const SEED = 16777215;
+ const FACTOR = 49979693;
+
+ let b = 1;
+ let d = 0;
+ let f = 1;
+
+ if (text.length > 0) {
+ for (let i = 0; i < text.length; i++) {
+ text[i].charCodeAt(0) > d && (d = text[i].charCodeAt(0));
+ f = parseInt(String(SEED / d));
+ b = (b + text[i].charCodeAt(0) * f * FACTOR) % SEED;
+ }
}
- let colour = "#";
- for (let i = 0; i < 3; i++) {
- const value = (hash >> (i * 8)) & 0xFF;
- colour += ("00" + value.toString(16)).substr(-2);
- }
- return colour;
+
+ let hex = ((b * text.length) % SEED).toString(16);
+ hex = hex.padEnd(6, hex);
+ return `#${hex}`;
};
export const hexToRGB = (hex: string): string => {
diff --git a/app/vmui/packages/vmui/src/utils/metric.ts b/app/vmui/packages/vmui/src/utils/metric.ts
index d23348ad5a..3ca26780cd 100644
--- a/app/vmui/packages/vmui/src/utils/metric.ts
+++ b/app/vmui/packages/vmui/src/utils/metric.ts
@@ -1,9 +1,12 @@
import { MetricBase } from "../api/types";
-export const getNameForMetric = (result: MetricBase, alias?: string): string => {
+export const getNameForMetric = (result: MetricBase, alias?: string, showQueryNum = true): string => {
const { __name__, ...freeFormFields } = result.metric;
- const name = alias || `[Query ${result.group}] ${__name__ || ""}`;
+ const name = alias || `${showQueryNum ? `[Query ${result.group}] ` : ""}${__name__ || ""}`;
if (Object.keys(freeFormFields).length == 0) {
+ if (!name) {
+ return "value";
+ }
return name;
}
return `${name}{${Object.entries(freeFormFields).map(e =>
diff --git a/app/vmui/packages/vmui/src/utils/uplot/helpers.ts b/app/vmui/packages/vmui/src/utils/uplot/helpers.ts
index d55fa54cb2..b095c86f76 100644
--- a/app/vmui/packages/vmui/src/utils/uplot/helpers.ts
+++ b/app/vmui/packages/vmui/src/utils/uplot/helpers.ts
@@ -1,5 +1,4 @@
import uPlot, { Axis } from "uplot";
-import { getColorFromString } from "../color";
export const defaultOptions = {
legend: {
@@ -37,7 +36,18 @@ export const formatPrettyNumber = (n: number | null | undefined, min = 0, max =
if (n === undefined || n === null) {
return "";
}
- let digits = 3 + Math.floor(1 + Math.log10(Math.max(Math.abs(min), Math.abs(max))) - Math.log10(Math.abs(min - max)));
+ const range = Math.abs(max - min);
+ if (isNaN(range) || range == 0) {
+ // Return the constant number as is if the range isn't set of it is too small.
+ if (Math.abs(n) >= 1000) {
+ return n.toLocaleString("en-US");
+ }
+ return n.toString();
+ }
+ // Make sure n has 3 significant digits on the given range.
+ // This precision should be enough for most UX cases,
+ // since the remaining digits are usually a white noise.
+ let digits = 3 + Math.floor(1 + Math.log10(Math.max(Math.abs(min), Math.abs(max))) - Math.log10(range));
if (isNaN(digits) || digits > 20) {
digits = 20;
}
@@ -74,6 +84,4 @@ export const sizeAxis = (u: uPlot, values: string[], axisIdx: number, cycleNum:
return Math.ceil(axisSize);
};
-export const getColorLine = (label: string): string => getColorFromString(label);
-
export const getDashLine = (group: number): number[] => group <= 1 ? [] : [group*4, group*1.2];
diff --git a/app/vmui/packages/vmui/src/utils/uplot/series.ts b/app/vmui/packages/vmui/src/utils/uplot/series.ts
index 31e8c149fc..2314a4f1c4 100644
--- a/app/vmui/packages/vmui/src/utils/uplot/series.ts
+++ b/app/vmui/packages/vmui/src/utils/uplot/series.ts
@@ -2,26 +2,34 @@ import { MetricResult } from "../../api/types";
import { Series } from "uplot";
import { getNameForMetric } from "../metric";
import { BarSeriesItem, Disp, Fill, LegendItemType, Stroke } from "./types";
-import { getColorLine } from "./helpers";
import { HideSeriesArgs } from "./types";
+import { baseContrastColors, getColorFromString } from "../color";
interface SeriesItem extends Series {
freeFormFields: {[key: string]: string};
}
-export const getSeriesItem = (d: MetricResult, hideSeries: string[], alias: string[]): SeriesItem => {
- const label = getNameForMetric(d, alias[d.group - 1]);
- return {
- label,
- freeFormFields: d.metric,
- width: 1.4,
- stroke: getColorLine(label),
- show: !includesHideSeries(label, hideSeries),
- scale: "1",
- points: {
- size: 4.2,
- width: 1.4
- }
+export const getSeriesItemContext = () => {
+ const colorState: {[key: string]: string} = {};
+
+ return (d: MetricResult, hideSeries: string[], alias: string[]): SeriesItem => {
+ const label = getNameForMetric(d, alias[d.group - 1]);
+ const countSavedColors = Object.keys(colorState).length;
+ const hasBasicColors = countSavedColors < baseContrastColors.length;
+ if (hasBasicColors) colorState[label] = colorState[label] || baseContrastColors[countSavedColors];
+
+ return {
+ label,
+ freeFormFields: d.metric,
+ width: 1.4,
+ stroke: colorState[label] || getColorFromString(label),
+ show: !includesHideSeries(label, hideSeries),
+ scale: "1",
+ points: {
+ size: 4.2,
+ width: 1.4
+ }
+ };
};
};
diff --git a/dashboards/operator.json b/dashboards/operator.json
index 17966fd325..4f9cc6c7ab 100644
--- a/dashboards/operator.json
+++ b/dashboards/operator.json
@@ -1,4 +1,38 @@
{
+ "__inputs": [],
+ "__elements": {},
+ "__requires": [
+ {
+ "type": "grafana",
+ "id": "grafana",
+ "name": "Grafana",
+ "version": "9.2.2"
+ },
+ {
+ "type": "panel",
+ "id": "graph",
+ "name": "Graph (old)",
+ "version": ""
+ },
+ {
+ "type": "datasource",
+ "id": "prometheus",
+ "name": "Prometheus",
+ "version": "1.0.0"
+ },
+ {
+ "type": "panel",
+ "id": "stat",
+ "name": "Stat",
+ "version": ""
+ },
+ {
+ "type": "panel",
+ "id": "text",
+ "name": "Text",
+ "version": ""
+ }
+ ],
"annotations": {
"list": [
{
@@ -25,13 +59,16 @@
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
- "id": 38,
- "iteration": 1653261405647,
+ "id": null,
"links": [],
"liveNow": false,
"panels": [
{
"collapsed": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -40,6 +77,15 @@
},
"id": 8,
"panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Overview",
"type": "row"
},
@@ -56,10 +102,24 @@
},
"id": 24,
"options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
"content": "$version ",
"mode": "markdown"
},
- "pluginVersion": "8.3.2",
+ "pluginVersion": "9.2.2",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Version",
"type": "text"
},
@@ -113,7 +173,7 @@
"text": {},
"textMode": "auto"
},
- "pluginVersion": "8.3.2",
+ "pluginVersion": "9.2.2",
"targets": [
{
"datasource": {
@@ -179,7 +239,7 @@
},
"textMode": "auto"
},
- "pluginVersion": "8.3.2",
+ "pluginVersion": "9.2.2",
"targets": [
{
"datasource": {
@@ -236,7 +296,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "8.3.2",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -324,7 +384,7 @@
"alertThreshold": true
},
"percentage": false,
- "pluginVersion": "8.3.2",
+ "pluginVersion": "9.2.2",
"pointradius": 2,
"points": false,
"renderer": "flot",
@@ -377,6 +437,10 @@
},
{
"collapsed": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -385,6 +449,15 @@
},
"id": 6,
"panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "refId": "A"
+ }
+ ],
"title": "Troubleshooting",
"type": "row"
},
@@ -762,6 +835,10 @@
},
{
"collapsed": false,
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
"gridPos": {
"h": 1,
"w": 24,
@@ -770,6 +847,15 @@
},
"id": 4,
"panels": [],
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "refId": "A"
+ }
+ ],
"title": "resources",
"type": "row"
},
@@ -1169,7 +1255,7 @@
}
],
"refresh": "",
- "schemaVersion": 33,
+ "schemaVersion": 37,
"style": "dark",
"tags": [
"operator",
@@ -1179,9 +1265,9 @@
"list": [
{
"current": {
- "selected": true,
- "text": "VictoriaMetrics",
- "value": "VictoriaMetrics"
+ "selected": false,
+ "text": "cloud-c15",
+ "value": "cloud-c15"
},
"hide": 0,
"includeAll": false,
@@ -1196,11 +1282,7 @@
"type": "datasource"
},
{
- "current": {
- "selected": false,
- "text": "vm-operator-victoria-metrics-operator",
- "value": "vm-operator-victoria-metrics-operator"
- },
+ "current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
@@ -1222,11 +1304,7 @@
"type": "query"
},
{
- "current": {
- "selected": false,
- "text": "All",
- "value": "$__all"
- },
+ "current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
@@ -1248,12 +1326,7 @@
"type": "query"
},
{
- "current": {
- "isNone": true,
- "selected": false,
- "text": "None",
- "value": ""
- },
+ "current": {},
"datasource": {
"type": "prometheus",
"uid": "$ds"
@@ -1286,4 +1359,4 @@
"uid": "1H179hunk",
"version": 1,
"weekStart": ""
-}
+}
\ No newline at end of file
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index 662dfee087..f6919a69da 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -16,9 +16,21 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add ability to show custom dashboards at vmui by specifying a path to a directory with dashboard config files via `-vmui.customDashboardsPath` command-line flag. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3322) and [these docs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/app/vmui/packages/vmui/public/dashboards).
+* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): apply the `step` globally to all the displayed graphs. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3574).
+* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): improve the appearance of graph lines by using more visually distinct colors. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3571).
+* BUGFIX: do not slow down concurrently executed queries during assisted merges, since assisted merges already prioritize data ingestion over queries. The probability of assisted merges has been increased starting from [v1.85.0](https://docs.victoriametrics.com/CHANGELOG.html#v1850) because of internal refactoring. This could result in slowed down queries when there is a plenty of free CPU resources. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3647) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3641) issues.
+* BUGFIX: reduce the increased CPU usage at `vmselect` to v1.85.3 level when processing heavy queries. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3641).
+* BUGFIX: [retention filters](https://docs.victoriametrics.com/#retention-filters): fix `FATAL: cannot locate metric name for metricID=...: EOF` panic, which could occur when retention filters are enabled.
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly cancel in-flight service discovery requests for [consul_sd_configs](https://docs.victoriametrics.com/sd_configs.html#consul_sd_configs) and [nomad_sd_configs](https://docs.victoriametrics.com/sd_configs.html#nomad_sd_configs) when the service list changes. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3468).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): [dockerswarm_sd_configs](https://docs.victoriametrics.com/sd_configs.html#dockerswarm_sd_configs): apply `filters` only to objects of the specified `role`. Previously filters were applied to all the objects, which could cause errors when different types of objects were used with filters that were not compatible with them. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3579).
-
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): suppress all the scrape errors when `-promscrape.suppressScrapeErrors` is enabled. Previously some scrape errors were logged even if `-promscrape.suppressScrapeErrors` flag was set.
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): consistently put the scrape url with scrape target labels to all error logs for failed scrapes. Previously some failed scrapes were logged without this information.
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not send stale markers to remote storage for series exceeding the configured [series limit](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3660).
+* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly apply [series limit](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter) when [staleness tracking](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) is disabled.
+* BUGFIX: [Pushgateway import](https://docs.victoriametrics.com/#how-to-import-data-in-prometheus-exposition-format): properly return `200 OK` HTTP response code. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3636).
+* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly parse `M` and `Mi` suffixes as `1e6` multipliers in `1M` and `1Mi` numeric constants. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3664). The issue has been introduced in [v1.86.0](https://docs.victoriametrics.com/CHANGELOG.html#v1860).
+* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly display range query results at `Table` view. For example, `up[5m]` query now shows all the raw samples for the last 5 minutes for the `up` metric at the `Table` view. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3516).
## [v1.86.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.86.1)
diff --git a/docs/guides/guide-delete-or-replace-metrics.md b/docs/guides/guide-delete-or-replace-metrics.md
index fd1d5e692b..4b35265607 100644
--- a/docs/guides/guide-delete-or-replace-metrics.md
+++ b/docs/guides/guide-delete-or-replace-metrics.md
@@ -75,7 +75,7 @@ When you're sure [time series selector](https://prometheus.io/docs/prometheus/la
```console
-curl -s 'http://vmselect:8481/select/0/prometheus/api/v1/series?match[]=process_cpu_cores_available'
+curl -s 'http://vmselect:8481/delete/0/prometheus/api/v1/admin/tsdb/delete_series?match[]=process_cpu_cores_available'
```
diff --git a/docs/guides/migrate-from-influx.md b/docs/guides/migrate-from-influx.md
index 1ea0691e49..fb2d981780 100644
--- a/docs/guides/migrate-from-influx.md
+++ b/docs/guides/migrate-from-influx.md
@@ -123,7 +123,7 @@ In addition to InfluxDB line protocol, VictoriaMetrics supports many other ways
## Query data
-VictoriaMetrics does not have a com\mand-line interface (CLI). Instead, it provides
+VictoriaMetrics does not have a command-line interface (CLI). Instead, it provides
an [HTTP API](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-querying-api-usage)
for serving read queries. This API is used in various integrations such as
[Grafana](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#grafana-setup). The same API is also used
diff --git a/go.mod b/go.mod
index c40f12bcf4..000bf35639 100644
--- a/go.mod
+++ b/go.mod
@@ -11,8 +11,8 @@ require (
// Do not use the original github.com/valyala/fasthttp because of issues
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.1.0
- github.com/VictoriaMetrics/metrics v1.23.0
- github.com/VictoriaMetrics/metricsql v0.51.1
+ github.com/VictoriaMetrics/metrics v1.23.1
+ github.com/VictoriaMetrics/metricsql v0.51.2
github.com/aws/aws-sdk-go-v2 v1.17.3
github.com/aws/aws-sdk-go-v2/config v1.18.8
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.47
@@ -42,7 +42,7 @@ require (
golang.org/x/net v0.5.0
golang.org/x/oauth2 v0.4.0
golang.org/x/sys v0.4.0
- google.golang.org/api v0.106.0
+ google.golang.org/api v0.107.0
gopkg.in/yaml.v2 v2.4.0
)
@@ -54,7 +54,7 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
- github.com/aws/aws-sdk-go v1.44.177 // indirect
+ github.com/aws/aws-sdk-go v1.44.180 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.8 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect
@@ -107,13 +107,13 @@ require (
go.opentelemetry.io/otel/trace v1.11.2 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.2.0 // indirect
- golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a // indirect
+ golang.org/x/exp v0.0.0-20230113213754-f9f960f08ad4 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/text v0.6.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
+ google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5 // indirect
google.golang.org/grpc v1.52.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index cf4334c91b..cf36523e19 100644
--- a/go.sum
+++ b/go.sum
@@ -67,10 +67,10 @@ github.com/VictoriaMetrics/fastcache v1.12.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJ
github.com/VictoriaMetrics/fasthttp v1.1.0 h1:3crd4YWHsMwu60GUXRH6OstowiFvqrwS4a/ueoLdLL0=
github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR2uydjiWvoLp5ZTqQ=
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
-github.com/VictoriaMetrics/metrics v1.23.0 h1:WzfqyzCaxUZip+OBbg1+lV33WChDSu4ssYII3nxtpeA=
-github.com/VictoriaMetrics/metrics v1.23.0/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc=
-github.com/VictoriaMetrics/metricsql v0.51.1 h1:gmh3ZGCDrqUTdhUrr87eJOXMOputDYs1PtLwTfySTsI=
-github.com/VictoriaMetrics/metricsql v0.51.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
+github.com/VictoriaMetrics/metrics v1.23.1 h1:/j8DzeJBxSpL2qSIdqnRFLvQQhbJyJbbEi22yMm7oL0=
+github.com/VictoriaMetrics/metrics v1.23.1/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc=
+github.com/VictoriaMetrics/metricsql v0.51.2 h1:GCbxti0I46x3Ld/WhcUyawvLr6J0x90IaMftkjosHJI=
+github.com/VictoriaMetrics/metricsql v0.51.2/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
@@ -87,8 +87,8 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.44.177 h1:ckMJhU5Gj+4Rta+bJIUiUd7jvHom84aim3zkGPblq0s=
-github.com/aws/aws-sdk-go v1.44.177/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.44.180 h1:VLZuAHI9fa/3WME5JjpVjcPCNfpGHVMiHx8sLHWhMgI=
+github.com/aws/aws-sdk-go v1.44.180/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY=
github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
@@ -487,8 +487,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a h1:tlXy25amD5A7gOfbXdqCGN5k8ESEed/Ee1E5RcrYnqU=
-golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20230113213754-f9f960f08ad4 h1:CNkDRtCj8otM5CFz5jYvbr8ioXX8flVsLfDWEj0M5kk=
+golang.org/x/exp v0.0.0-20230113213754-f9f960f08ad4/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -703,8 +703,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
-google.golang.org/api v0.106.0 h1:ffmW0faWCwKkpbbtvlY/K/8fUl+JKvNS5CVzRoyfCv8=
-google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.107.0 h1:I2SlFjD8ZWabaIFOfeEDg3pf0BHJDh6iYQ1ic3Yu/UU=
+google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -742,8 +742,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f h1:BWUVssLB0HVOSY78gIdvk1dTVYtT1y8SBWtPYuTJ/6w=
-google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5 h1:wJT65XLOzhpSPCdAmmKfz94SlmnQzDzjm3Cj9k3fsXY=
+google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
diff --git a/lib/fs/fs_solaris.go b/lib/fs/fs_solaris.go
index ac94ea406c..385903896f 100644
--- a/lib/fs/fs_solaris.go
+++ b/lib/fs/fs_solaris.go
@@ -49,15 +49,8 @@ func createFlockFile(flockFile string) (*os.File, error) {
}
func mustGetFreeSpace(path string) uint64 {
- d, err := os.Open(path)
- if err != nil {
- logger.Panicf("FATAL: cannot open dir for determining free disk space: %s", err)
- }
- defer MustClose(d)
-
- fd := d.Fd()
var stat unix.Statvfs_t
- if err := unix.Fstatvfs(int(fd), &stat); err != nil {
+ if err := unix.Statvfs(path, &stat); err != nil {
logger.Panicf("FATAL: cannot determine free disk space on %q: %s", path, err)
}
return freeSpace(stat)
diff --git a/lib/fs/fs_unix.go b/lib/fs/fs_unix.go
index bcb789c94a..daa746e6af 100644
--- a/lib/fs/fs_unix.go
+++ b/lib/fs/fs_unix.go
@@ -45,15 +45,8 @@ func createFlockFile(flockFile string) (*os.File, error) {
}
func mustGetFreeSpace(path string) uint64 {
- d, err := os.Open(path)
- if err != nil {
- logger.Panicf("FATAL: cannot open dir for determining free disk space: %s", err)
- }
- defer MustClose(d)
-
- fd := d.Fd()
var stat unix.Statfs_t
- if err := unix.Fstatfs(int(fd), &stat); err != nil {
+ if err := unix.Statfs(path, &stat); err != nil {
logger.Panicf("FATAL: cannot determine free disk space on %q: %s", path, err)
}
return freeSpace(stat)
diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go
index cb875c0269..158b96cd4c 100644
--- a/lib/mergeset/table.go
+++ b/lib/mergeset/table.go
@@ -18,7 +18,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg"
)
@@ -782,11 +781,8 @@ func (tb *Table) assistedMergeForInmemoryParts() {
return
}
- // Prioritize assisted merges over searches.
- storagepacelimiter.Search.Inc()
atomic.AddUint64(&tb.inmemoryAssistedMerges, 1)
err := tb.mergeInmemoryParts()
- storagepacelimiter.Search.Dec()
if err == nil {
continue
}
@@ -806,11 +802,8 @@ func (tb *Table) assistedMergeForFileParts() {
return
}
- // Prioritize assisted merges over searches.
- storagepacelimiter.Search.Inc()
atomic.AddUint64(&tb.fileAssistedMerges, 1)
err := tb.mergeExistingParts(false)
- storagepacelimiter.Search.Dec()
if err == nil {
continue
}
diff --git a/lib/pacelimiter/pacelimiter.go b/lib/pacelimiter/pacelimiter.go
deleted file mode 100644
index 462a0ef6cb..0000000000
--- a/lib/pacelimiter/pacelimiter.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package pacelimiter
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// PaceLimiter throttles WaitIfNeeded callers while the number of Inc calls is bigger than the number of Dec calls.
-//
-// It is expected that Inc is called before performing high-priority work,
-// while Dec is called when the work is done.
-// WaitIfNeeded must be called inside the work which must be throttled (i.e. lower-priority work).
-// It may be called in the loop before performing a part of low-priority work.
-type PaceLimiter struct {
- mu sync.Mutex
- cond *sync.Cond
- delaysTotal uint64
- n int32
-}
-
-// New returns pace limiter that throttles WaitIfNeeded callers while the number of Inc calls is bigger than the number of Dec calls.
-func New() *PaceLimiter {
- var pl PaceLimiter
- pl.cond = sync.NewCond(&pl.mu)
- return &pl
-}
-
-// Inc increments pl.
-func (pl *PaceLimiter) Inc() {
- atomic.AddInt32(&pl.n, 1)
-}
-
-// Dec decrements pl.
-func (pl *PaceLimiter) Dec() {
- if atomic.AddInt32(&pl.n, -1) == 0 {
- // Wake up all the goroutines blocked in WaitIfNeeded,
- // since the number of Dec calls equals the number of Inc calls.
- pl.cond.Broadcast()
- }
-}
-
-// WaitIfNeeded blocks while the number of Inc calls is bigger than the number of Dec calls.
-func (pl *PaceLimiter) WaitIfNeeded() {
- if atomic.LoadInt32(&pl.n) <= 0 {
- // Fast path - there is no need in lock.
- return
- }
- // Slow path - wait until Dec is called.
- pl.mu.Lock()
- for atomic.LoadInt32(&pl.n) > 0 {
- pl.delaysTotal++
- pl.cond.Wait()
- }
- pl.mu.Unlock()
-}
-
-// DelaysTotal returns the number of delays inside WaitIfNeeded.
-func (pl *PaceLimiter) DelaysTotal() uint64 {
- pl.mu.Lock()
- n := pl.delaysTotal
- pl.mu.Unlock()
- return n
-}
diff --git a/lib/pacelimiter/pacelimiter_test.go b/lib/pacelimiter/pacelimiter_test.go
deleted file mode 100644
index 4c5bbf2520..0000000000
--- a/lib/pacelimiter/pacelimiter_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package pacelimiter
-
-import (
- "fmt"
- "runtime"
- "sync"
- "testing"
- "time"
-)
-
-func TestPacelimiter(t *testing.T) {
- t.Run("nonblocking", func(t *testing.T) {
- pl := New()
- ch := make(chan struct{}, 10)
- for i := 0; i < cap(ch); i++ {
- go func() {
- for j := 0; j < 10; j++ {
- pl.WaitIfNeeded()
- runtime.Gosched()
- }
- ch <- struct{}{}
- }()
- }
-
- // Check that all the goroutines are finished.
- timeoutCh := time.After(5 * time.Second)
- for i := 0; i < cap(ch); i++ {
- select {
- case <-ch:
- case <-timeoutCh:
- t.Fatalf("timeout")
- }
- }
- if n := pl.DelaysTotal(); n > 0 {
- t.Fatalf("unexpected non-zero number of delays: %d", n)
- }
- })
- t.Run("blocking", func(t *testing.T) {
- pl := New()
- pl.Inc()
- ch := make(chan struct{}, 10)
- var wg sync.WaitGroup
- for i := 0; i < cap(ch); i++ {
- wg.Add(1)
- go func() {
- wg.Done()
- for j := 0; j < 10; j++ {
- pl.WaitIfNeeded()
- }
- ch <- struct{}{}
- }()
- }
-
- // Check that all the goroutines created above are started and blocked in WaitIfNeeded
- wg.Wait()
- select {
- case <-ch:
- t.Fatalf("the pl must be blocked")
- default:
- }
-
- // Unblock goroutines and check that they are unblocked.
- pl.Dec()
- timeoutCh := time.After(5 * time.Second)
- for i := 0; i < cap(ch); i++ {
- select {
- case <-ch:
- case <-timeoutCh:
- t.Fatalf("timeout")
- }
- }
- if n := pl.DelaysTotal(); n == 0 {
- t.Fatalf("expecting non-zero number of delays")
- }
- // Verify that the pl is unblocked now.
- pl.WaitIfNeeded()
-
- // Verify that negative count doesn't block pl.
- pl.Dec()
- pl.WaitIfNeeded()
- if n := pl.DelaysTotal(); n == 0 {
- t.Fatalf("expecting non-zero number of delays after subsequent pl.Dec()")
- }
- })
- t.Run("negative_count", func(t *testing.T) {
- n := 10
- pl := New()
- for i := 0; i < n; i++ {
- pl.Dec()
- }
-
- doneCh := make(chan error)
- go func() {
- defer close(doneCh)
- for i := 0; i < n; i++ {
- pl.Inc()
- pl.WaitIfNeeded()
- if n := pl.DelaysTotal(); n != 0 {
- doneCh <- fmt.Errorf("expecting zero number of delays")
- return
- }
- }
- doneCh <- nil
- }()
-
- select {
- case err := <-doneCh:
- if err != nil {
- t.Fatalf("unexpected error: %s", err)
- }
- case <-time.After(5 * time.Second):
- t.Fatalf("timeout")
- }
- })
- t.Run("concurrent_inc_dec", func(t *testing.T) {
- pl := New()
- ch := make(chan struct{}, 10)
- for i := 0; i < cap(ch); i++ {
- go func() {
- for j := 0; j < 10; j++ {
- pl.Inc()
- runtime.Gosched()
- pl.Dec()
- }
- ch <- struct{}{}
- }()
- }
-
- // Verify that all the goroutines are finished
- timeoutCh := time.After(5 * time.Second)
- for i := 0; i < cap(ch); i++ {
- select {
- case <-ch:
- case <-timeoutCh:
- t.Fatalf("timeout")
- }
- }
- // Verify that the pl is unblocked.
- pl.WaitIfNeeded()
- if n := pl.DelaysTotal(); n > 0 {
- t.Fatalf("expecting zer number of delays; got %d", n)
- }
- })
-}
diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go
index b4d5317f7d..dd64733cc3 100644
--- a/lib/promscrape/config.go
+++ b/lib/promscrape/config.go
@@ -41,9 +41,10 @@ import (
)
var (
- noStaleMarkers = flag.Bool("promscrape.noStaleMarkers", false, "Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series")
- strictParse = flag.Bool("promscrape.config.strictParse", true, "Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields")
- dryRun = flag.Bool("promscrape.config.dryRun", false, "Checks -promscrape.config file for errors and unsupported fields and then exits. "+
+ noStaleMarkers = flag.Bool("promscrape.noStaleMarkers", false, "Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series")
+ seriesLimitPerTarget = flag.Int("promscrape.seriesLimitPerTarget", 0, "Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info")
+ strictParse = flag.Bool("promscrape.config.strictParse", true, "Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields")
+ dryRun = flag.Bool("promscrape.config.dryRun", false, "Checks -promscrape.config file for errors and unsupported fields and then exits. "+
"Returns non-zero exit code on parsing errors and emits these errors to stderr. "+
"See also -promscrape.config.strictParse command-line flag. "+
"Pass -loggerLevel=ERROR if you don't need to see info messages in the output.")
@@ -971,6 +972,10 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
if sc.NoStaleMarkers != nil {
noStaleTracking = *sc.NoStaleMarkers
}
+ seriesLimit := *seriesLimitPerTarget
+ if sc.SeriesLimit > 0 {
+ seriesLimit = sc.SeriesLimit
+ }
swc := &scrapeWorkConfig{
scrapeInterval: scrapeInterval,
scrapeIntervalString: scrapeInterval.String(),
@@ -995,7 +1000,7 @@ func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConf
streamParse: sc.StreamParse,
scrapeAlignInterval: sc.ScrapeAlignInterval.Duration(),
scrapeOffset: sc.ScrapeOffset.Duration(),
- seriesLimit: sc.SeriesLimit,
+ seriesLimit: seriesLimit,
noStaleMarkers: noStaleTracking,
}
return swc, nil
diff --git a/lib/promscrape/discovery/consul/api.go b/lib/promscrape/discovery/consul/api.go
index 92c5deb4ef..5f9d7e2246 100644
--- a/lib/promscrape/discovery/consul/api.go
+++ b/lib/promscrape/discovery/consul/api.go
@@ -1,6 +1,7 @@
package consul
import (
+ "context"
"flag"
"fmt"
"net/http"
@@ -157,7 +158,7 @@ func maxWaitTime() time.Duration {
// getBlockingAPIResponse perfoms blocking request to Consul via client and returns response.
//
// See https://www.consul.io/api-docs/features/blocking .
-func getBlockingAPIResponse(client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
+func getBlockingAPIResponse(ctx context.Context, client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
path += "&index=" + strconv.FormatInt(index, 10)
path += "&wait=" + fmt.Sprintf("%ds", int(maxWaitTime().Seconds()))
getMeta := func(resp *http.Response) {
@@ -182,7 +183,7 @@ func getBlockingAPIResponse(client *discoveryutils.Client, path string, index in
}
index = newIndex
}
- data, err := client.GetBlockingAPIResponse(path, getMeta)
+ data, err := client.GetBlockingAPIResponseCtx(ctx, path, getMeta)
if err != nil {
return nil, index, fmt.Errorf("cannot perform blocking Consul API request at %q: %w", path, err)
}
diff --git a/lib/promscrape/discovery/consul/watch.go b/lib/promscrape/discovery/consul/watch.go
index 18d287ab39..6b0ae267cc 100644
--- a/lib/promscrape/discovery/consul/watch.go
+++ b/lib/promscrape/discovery/consul/watch.go
@@ -34,15 +34,17 @@ type consulWatcher struct {
servicesLock sync.Mutex
services map[string]*serviceWatcher
- stopCh chan struct{}
stoppedCh chan struct{}
}
type serviceWatcher struct {
serviceName string
serviceNodes []ServiceNode
- stopCh chan struct{}
- stoppedCh chan struct{}
+
+ stoppedCh chan struct{}
+
+ requestCtx context.Context
+ requestCancel context.CancelFunc
}
// newConsulWatcher creates new watcher and starts background service discovery for Consul.
@@ -71,7 +73,6 @@ func newConsulWatcher(client *discoveryutils.Client, sdc *SDConfig, datacenter,
watchServices: sdc.Services,
watchTags: sdc.Tags,
services: make(map[string]*serviceWatcher),
- stopCh: make(chan struct{}),
stoppedCh: make(chan struct{}),
}
initCh := make(chan struct{})
@@ -85,7 +86,6 @@ func newConsulWatcher(client *discoveryutils.Client, sdc *SDConfig, datacenter,
}
func (cw *consulWatcher) mustStop() {
- close(cw.stopCh)
cw.client.Stop()
<-cw.stoppedCh
}
@@ -100,10 +100,12 @@ func (cw *consulWatcher) updateServices(serviceNames []string) {
// The watcher for serviceName already exists.
continue
}
+ ctx, cancel := context.WithCancel(cw.client.Context())
sw := &serviceWatcher{
- serviceName: serviceName,
- stopCh: make(chan struct{}),
- stoppedCh: make(chan struct{}),
+ serviceName: serviceName,
+ stoppedCh: make(chan struct{}),
+ requestCtx: ctx,
+ requestCancel: cancel,
}
cw.services[serviceName] = sw
serviceWatchersCreated.Inc()
@@ -126,7 +128,7 @@ func (cw *consulWatcher) updateServices(serviceNames []string) {
if _, ok := newServiceNamesMap[serviceName]; ok {
continue
}
- close(sw.stopCh)
+ sw.requestCancel()
delete(cw.services, serviceName)
swsStopped = append(swsStopped, sw)
}
@@ -173,24 +175,26 @@ func (cw *consulWatcher) watchForServicesUpdates(initCh chan struct{}) {
checkInterval := getCheckInterval()
ticker := time.NewTicker(checkInterval / 2)
defer ticker.Stop()
+ stopCh := cw.client.Context().Done()
for {
select {
case <-ticker.C:
f()
- case <-cw.stopCh:
+ case <-stopCh:
logger.Infof("stopping Consul service watchers for %q", apiServer)
startTime := time.Now()
var swsStopped []*serviceWatcher
cw.servicesLock.Lock()
for _, sw := range cw.services {
- close(sw.stopCh)
+ sw.requestCancel()
swsStopped = append(swsStopped, sw)
}
cw.servicesLock.Unlock()
for _, sw := range swsStopped {
<-sw.stoppedCh
+ serviceWatchersStopped.Inc()
}
logger.Infof("stopped Consul service watcher for %q in %.3f seconds", apiServer, time.Since(startTime).Seconds())
return
@@ -209,7 +213,7 @@ var (
// It returns an empty serviceNames list if response contains the same index.
func (cw *consulWatcher) getBlockingServiceNames(index int64) ([]string, int64, error) {
path := "/v1/catalog/services" + cw.serviceNamesQueryArgs
- data, newIndex, err := getBlockingAPIResponse(cw.client, path, index)
+ data, newIndex, err := getBlockingAPIResponse(cw.client.Context(), cw.client, path, index)
if err != nil {
return nil, index, err
}
@@ -242,7 +246,7 @@ func (sw *serviceWatcher) watchForServiceNodesUpdates(cw *consulWatcher, initWG
index := int64(0)
path := "/v1/health/service/" + sw.serviceName + cw.serviceNodesQueryArgs
f := func() {
- data, newIndex, err := getBlockingAPIResponse(cw.client, path, index)
+ data, newIndex, err := getBlockingAPIResponse(sw.requestCtx, cw.client, path, index)
if err != nil {
if !errors.Is(err, context.Canceled) {
logger.Errorf("cannot obtain Consul serviceNodes for serviceName=%q from %q: %s", sw.serviceName, apiServer, err)
@@ -273,11 +277,12 @@ func (sw *serviceWatcher) watchForServiceNodesUpdates(cw *consulWatcher, initWG
checkInterval := getCheckInterval()
ticker := time.NewTicker(checkInterval / 2)
defer ticker.Stop()
+ stopCh := sw.requestCtx.Done()
for {
select {
case <-ticker.C:
f()
- case <-sw.stopCh:
+ case <-stopCh:
return
}
}
diff --git a/lib/promscrape/discovery/nomad/api.go b/lib/promscrape/discovery/nomad/api.go
index d6cccb7ed1..6c5efcc731 100644
--- a/lib/promscrape/discovery/nomad/api.go
+++ b/lib/promscrape/discovery/nomad/api.go
@@ -1,6 +1,7 @@
package nomad
import (
+ "context"
"flag"
"fmt"
"net/http"
@@ -116,7 +117,7 @@ func maxWaitTime() time.Duration {
// getBlockingAPIResponse perfoms blocking request to Nomad via client and returns response.
// See https://developer.hashicorp.com/nomad/api-docs#blocking-queries .
-func getBlockingAPIResponse(client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
+func getBlockingAPIResponse(ctx context.Context, client *discoveryutils.Client, path string, index int64) ([]byte, int64, error) {
path += "&index=" + strconv.FormatInt(index, 10)
path += "&wait=" + fmt.Sprintf("%ds", int(maxWaitTime().Seconds()))
getMeta := func(resp *http.Response) {
@@ -142,7 +143,7 @@ func getBlockingAPIResponse(client *discoveryutils.Client, path string, index in
}
index = newIndex
}
- data, err := client.GetBlockingAPIResponse(path, getMeta)
+ data, err := client.GetBlockingAPIResponseCtx(ctx, path, getMeta)
if err != nil {
return nil, index, fmt.Errorf("cannot perform blocking Nomad API request at %q: %w", path, err)
}
diff --git a/lib/promscrape/discovery/nomad/watch.go b/lib/promscrape/discovery/nomad/watch.go
index f3510e206b..2c8d42a9ae 100644
--- a/lib/promscrape/discovery/nomad/watch.go
+++ b/lib/promscrape/discovery/nomad/watch.go
@@ -30,15 +30,17 @@ type nomadWatcher struct {
servicesLock sync.Mutex
services map[string]*serviceWatcher
- stopCh chan struct{}
stoppedCh chan struct{}
}
type serviceWatcher struct {
serviceName string
services []Service
- stopCh chan struct{}
- stoppedCh chan struct{}
+
+ stoppedCh chan struct{}
+
+ requestCtx context.Context
+ requestCancel context.CancelFunc
}
// newNomadWatcher creates new watcher and starts background service discovery for Nomad.
@@ -62,7 +64,6 @@ func newNomadWatcher(client *discoveryutils.Client, sdc *SDConfig, namespace, re
client: client,
serviceNamesQueryArgs: queryArgs,
services: make(map[string]*serviceWatcher),
- stopCh: make(chan struct{}),
stoppedCh: make(chan struct{}),
}
initCh := make(chan struct{})
@@ -76,7 +77,6 @@ func newNomadWatcher(client *discoveryutils.Client, sdc *SDConfig, namespace, re
}
func (cw *nomadWatcher) mustStop() {
- close(cw.stopCh)
cw.client.Stop()
<-cw.stoppedCh
}
@@ -91,10 +91,12 @@ func (cw *nomadWatcher) updateServices(serviceNames []string) {
// The watcher for serviceName already exists.
continue
}
+ ctx, cancel := context.WithCancel(cw.client.Context())
sw := &serviceWatcher{
- serviceName: serviceName,
- stopCh: make(chan struct{}),
- stoppedCh: make(chan struct{}),
+ serviceName: serviceName,
+ stoppedCh: make(chan struct{}),
+ requestCtx: ctx,
+ requestCancel: cancel,
}
cw.services[serviceName] = sw
serviceWatchersCreated.Inc()
@@ -117,7 +119,7 @@ func (cw *nomadWatcher) updateServices(serviceNames []string) {
if _, ok := newServiceNamesMap[serviceName]; ok {
continue
}
- close(sw.stopCh)
+ sw.requestCancel()
delete(cw.services, serviceName)
swsStopped = append(swsStopped, sw)
}
@@ -164,24 +166,26 @@ func (cw *nomadWatcher) watchForServicesUpdates(initCh chan struct{}) {
checkInterval := getCheckInterval()
ticker := time.NewTicker(checkInterval / 2)
defer ticker.Stop()
+ stopCh := cw.client.Context().Done()
for {
select {
case <-ticker.C:
f()
- case <-cw.stopCh:
+ case <-stopCh:
logger.Infof("stopping Nomad service watchers for %q", apiServer)
startTime := time.Now()
var swsStopped []*serviceWatcher
cw.servicesLock.Lock()
for _, sw := range cw.services {
- close(sw.stopCh)
+ sw.requestCancel()
swsStopped = append(swsStopped, sw)
}
cw.servicesLock.Unlock()
for _, sw := range swsStopped {
<-sw.stoppedCh
+ serviceWatchersStopped.Inc()
}
logger.Infof("stopped Nomad service watcher for %q in %.3f seconds", apiServer, time.Since(startTime).Seconds())
return
@@ -200,7 +204,7 @@ var (
// It returns an empty serviceNames list if response contains the same index.
func (cw *nomadWatcher) getBlockingServiceNames(index int64) ([]string, int64, error) {
path := "/v1/services" + cw.serviceNamesQueryArgs
- data, newIndex, err := getBlockingAPIResponse(cw.client, path, index)
+ data, newIndex, err := getBlockingAPIResponse(cw.client.Context(), cw.client, path, index)
if err != nil {
return nil, index, err
}
@@ -244,7 +248,7 @@ func (sw *serviceWatcher) watchForServiceAddressUpdates(nw *nomadWatcher, initWG
// TODO: Maybe use a different query arg.
path := "/v1/service/" + sw.serviceName + nw.serviceNamesQueryArgs
f := func() {
- data, newIndex, err := getBlockingAPIResponse(nw.client, path, index)
+ data, newIndex, err := getBlockingAPIResponse(sw.requestCtx, nw.client, path, index)
if err != nil {
if !errors.Is(err, context.Canceled) {
logger.Errorf("cannot obtain Nomad services for serviceName=%q from %q: %s", sw.serviceName, apiServer, err)
@@ -275,11 +279,12 @@ func (sw *serviceWatcher) watchForServiceAddressUpdates(nw *nomadWatcher, initWG
checkInterval := getCheckInterval()
ticker := time.NewTicker(checkInterval / 2)
defer ticker.Stop()
+ stopCh := sw.requestCtx.Done()
for {
select {
case <-ticker.C:
f()
- case <-sw.stopCh:
+ case <-stopCh:
return
}
}
diff --git a/lib/promscrape/discoveryutils/client.go b/lib/promscrape/discoveryutils/client.go
index 301b73b0bd..319c1f2f90 100644
--- a/lib/promscrape/discoveryutils/client.go
+++ b/lib/promscrape/discoveryutils/client.go
@@ -159,6 +159,11 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
return c, nil
}
+// Context returns context for the client requests.
+func (c *Client) Context() context.Context {
+ return c.clientCtx
+}
+
// GetAPIResponseWithReqParams returns response for given absolute path with optional callback for request.
func (c *Client) GetAPIResponseWithReqParams(path string, modifyRequest func(request *http.Request)) ([]byte, error) {
return c.getAPIResponse(path, modifyRequest)
@@ -185,16 +190,21 @@ func (c *Client) getAPIResponse(path string, modifyRequest func(request *http.Re
defer func() {
<-concurrencyLimitCh
}()
- return c.getAPIResponseWithParamsAndClient(c.client, path, modifyRequest, nil)
+ return c.getAPIResponseWithParamsAndClientCtx(c.clientCtx, c.client, path, modifyRequest, nil)
}
// GetBlockingAPIResponse returns response for given absolute path with blocking client and optional callback for api response,
func (c *Client) GetBlockingAPIResponse(path string, inspectResponse func(resp *http.Response)) ([]byte, error) {
- return c.getAPIResponseWithParamsAndClient(c.blockingClient, path, nil, inspectResponse)
+ return c.getAPIResponseWithParamsAndClientCtx(c.clientCtx, c.blockingClient, path, nil, inspectResponse)
+}
+
+// GetBlockingAPIResponseCtx returns response for given absolute path with blocking client and optional callback for api response,
+func (c *Client) GetBlockingAPIResponseCtx(ctx context.Context, path string, inspectResponse func(resp *http.Response)) ([]byte, error) {
+ return c.getAPIResponseWithParamsAndClientCtx(ctx, c.blockingClient, path, nil, inspectResponse)
}
// getAPIResponseWithParamsAndClient returns response for the given absolute path with optional callback for request and for response.
-func (c *Client) getAPIResponseWithParamsAndClient(client *HTTPClient, path string, modifyRequest func(req *http.Request), inspectResponse func(resp *http.Response)) ([]byte, error) {
+func (c *Client) getAPIResponseWithParamsAndClientCtx(ctx context.Context, client *HTTPClient, path string, modifyRequest func(req *http.Request), inspectResponse func(resp *http.Response)) ([]byte, error) {
requestURL := c.apiServer + path
u, err := url.Parse(requestURL)
if err != nil {
@@ -202,7 +212,7 @@ func (c *Client) getAPIResponseWithParamsAndClient(client *HTTPClient, path stri
}
deadline := time.Now().Add(client.ReadTimeout)
- ctx, cancel := context.WithDeadline(c.clientCtx, deadline)
+ ctx, cancel := context.WithDeadline(ctx, deadline)
defer cancel()
req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
if err != nil {
diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go
index 270a4250bc..2370869db6 100644
--- a/lib/promscrape/scrapework.go
+++ b/lib/promscrape/scrapework.go
@@ -37,7 +37,6 @@ var (
"See also -promscrape.suppressScrapeErrorsDelay")
suppressScrapeErrorsDelay = flag.Duration("promscrape.suppressScrapeErrorsDelay", 0, "The delay for suppressing repeated scrape errors logging per each scrape targets. "+
"This may be used for reducing the number of log lines related to scrape errors. See also -promscrape.suppressScrapeErrors")
- seriesLimitPerTarget = flag.Int("promscrape.seriesLimitPerTarget", 0, "Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info")
minResponseSizeForStreamParse = flagutil.NewBytes("promscrape.minResponseSizeForStreamParse", 1e6, "The minimum target response size for automatic switching to stream parsing mode, which can reduce memory usage. See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode")
)
@@ -451,7 +450,7 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b
wc := writeRequestCtxPool.Get(sw.prevLabelsLen)
lastScrape := sw.loadLastScrape()
bodyString := bytesutil.ToUnsafeString(body.B)
- areIdenticalSeries := sw.Config.NoStaleMarkers || parser.AreIdenticalSeriesFast(lastScrape, bodyString)
+ areIdenticalSeries := sw.areIdenticalSeries(lastScrape, bodyString)
if err != nil {
up = 0
scrapesFailed.Inc()
@@ -485,9 +484,6 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b
samplesDropped := 0
if sw.seriesLimitExceeded || !areIdenticalSeries {
samplesDropped = sw.applySeriesLimit(wc)
- if samplesDropped > 0 {
- sw.seriesLimitExceeded = true
- }
}
am := &autoMetrics{
up: up,
@@ -577,7 +573,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
err = sbr.Init(sr)
if err == nil {
bodyString = bytesutil.ToUnsafeString(sbr.body)
- areIdenticalSeries = sw.Config.NoStaleMarkers || parser.AreIdenticalSeriesFast(lastScrape, bodyString)
+ areIdenticalSeries = sw.areIdenticalSeries(lastScrape, bodyString)
err = parser.ParseStream(&sbr, scrapeTimestamp, false, func(rows []parser.Row) error {
mu.Lock()
defer mu.Unlock()
@@ -594,9 +590,6 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
}
if sw.seriesLimitExceeded || !areIdenticalSeries {
samplesDropped += sw.applySeriesLimit(wc)
- if samplesDropped > 0 && !sw.seriesLimitExceeded {
- sw.seriesLimitExceeded = true
- }
}
// Push the collected rows to sw before returning from the callback, since they cannot be held
// after returning from the callback - this will result in data race.
@@ -655,6 +648,15 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
return err
}
+func (sw *scrapeWork) areIdenticalSeries(prevData, currData string) bool {
+ if sw.Config.NoStaleMarkers && sw.Config.SeriesLimit <= 0 {
+ // Do not spend CPU time on tracking the changes in series if stale markers are disabled.
+ // The check for series_limit is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3660
+ return true
+ }
+ return parser.AreIdenticalSeriesFast(prevData, currData)
+}
+
// leveledWriteRequestCtxPool allows reducing memory usage when writeRequesCtx
// structs contain mixed number of labels.
//
@@ -738,17 +740,13 @@ func (sw *scrapeWork) getSeriesAdded(lastScrape, currScrape string) int {
}
func (sw *scrapeWork) applySeriesLimit(wc *writeRequestCtx) int {
- seriesLimit := *seriesLimitPerTarget
- if sw.Config.SeriesLimit > 0 {
- seriesLimit = sw.Config.SeriesLimit
- }
- if sw.seriesLimiter == nil && seriesLimit > 0 {
- sw.seriesLimiter = bloomfilter.NewLimiter(seriesLimit, 24*time.Hour)
- }
- sl := sw.seriesLimiter
- if sl == nil {
+ if sw.Config.SeriesLimit <= 0 {
return 0
}
+ if sw.seriesLimiter == nil {
+ sw.seriesLimiter = bloomfilter.NewLimiter(sw.Config.SeriesLimit, 24*time.Hour)
+ }
+ sl := sw.seriesLimiter
dstSeries := wc.writeRequest.Timeseries[:0]
samplesDropped := 0
for _, ts := range wc.writeRequest.Timeseries {
@@ -761,6 +759,9 @@ func (sw *scrapeWork) applySeriesLimit(wc *writeRequestCtx) int {
}
prompbmarshal.ResetTimeSeries(wc.writeRequest.Timeseries[len(dstSeries):])
wc.writeRequest.Timeseries = dstSeries
+ if samplesDropped > 0 && !sw.seriesLimitExceeded {
+ sw.seriesLimitExceeded = true
+ }
return samplesDropped
}
@@ -774,7 +775,7 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i
}
wc := &writeRequestCtx{}
if bodyString != "" {
- wc.rows.Unmarshal(bodyString)
+ wc.rows.UnmarshalWithErrLogger(bodyString, sw.logError)
srcRows := wc.rows.Rows
for i := range srcRows {
sw.addRowToTimeseries(wc, &srcRows[i], timestamp, true)
@@ -784,6 +785,13 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i
am := &autoMetrics{}
sw.addAutoMetrics(am, wc, timestamp)
}
+
+ // Apply series limit to stale markers in order to prevent sending stale markers for newly created series.
+ // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3660
+ if sw.seriesLimitExceeded {
+ sw.applySeriesLimit(wc)
+ }
+
series := wc.writeRequest.Timeseries
if len(series) == 0 {
return
diff --git a/lib/storage/partition.go b/lib/storage/partition.go
index 9db3e70a1f..80e65edeac 100644
--- a/lib/storage/partition.go
+++ b/lib/storage/partition.go
@@ -20,7 +20,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg"
)
@@ -610,14 +609,8 @@ func (pt *partition) assistedMergeForInmemoryParts() {
return
}
- // There are too many unmerged inmemory parts.
- // This usually means that the app cannot keep up with the data ingestion rate.
- // Assist with mering inmemory parts.
- // Prioritize assisted merges over searches.
- storagepacelimiter.Search.Inc()
atomic.AddUint64(&pt.inmemoryAssistedMerges, 1)
err := pt.mergeInmemoryParts()
- storagepacelimiter.Search.Dec()
if err == nil {
continue
}
@@ -637,14 +630,8 @@ func (pt *partition) assistedMergeForSmallParts() {
return
}
- // There are too many unmerged small parts.
- // This usually means that the app cannot keep up with the data ingestion rate.
- // Assist with mering small parts.
- // Prioritize assisted merges over searches.
- storagepacelimiter.Search.Inc()
atomic.AddUint64(&pt.smallAssistedMerges, 1)
err := pt.mergeExistingParts(false)
- storagepacelimiter.Search.Dec()
if err == nil {
continue
}
diff --git a/lib/storage/search.go b/lib/storage/search.go
index 06252596d8..29a4b5b90e 100644
--- a/lib/storage/search.go
+++ b/lib/storage/search.go
@@ -10,7 +10,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
)
// BlockRef references a Block.
@@ -448,7 +447,6 @@ func checkSearchDeadlineAndPace(deadline uint64) error {
if fasttime.UnixTimestamp() > deadline {
return ErrDeadlineExceeded
}
- storagepacelimiter.Search.WaitIfNeeded()
return nil
}
diff --git a/lib/storage/storage.go b/lib/storage/storage.go
index f3514080da..1a14f470dc 100644
--- a/lib/storage/storage.go
+++ b/lib/storage/storage.go
@@ -26,7 +26,6 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/snapshot"
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
"github.com/VictoriaMetrics/fastcache"
@@ -446,8 +445,6 @@ type Metrics struct {
TooSmallTimestampRows uint64
TooBigTimestampRows uint64
- SearchDelays uint64
-
SlowRowInserts uint64
SlowPerDayIndexInserts uint64
SlowMetricNameLoads uint64
@@ -517,8 +514,6 @@ func (s *Storage) UpdateMetrics(m *Metrics) {
m.TooSmallTimestampRows += atomic.LoadUint64(&s.tooSmallTimestampRows)
m.TooBigTimestampRows += atomic.LoadUint64(&s.tooBigTimestampRows)
- m.SearchDelays = storagepacelimiter.Search.DelaysTotal()
-
m.SlowRowInserts += atomic.LoadUint64(&s.slowRowInserts)
m.SlowPerDayIndexInserts += atomic.LoadUint64(&s.slowPerDayIndexInserts)
m.SlowMetricNameLoads += atomic.LoadUint64(&s.slowMetricNameLoads)
diff --git a/lib/storagepacelimiter/storagepacelimiter.go b/lib/storagepacelimiter/storagepacelimiter.go
deleted file mode 100644
index e309e6c52e..0000000000
--- a/lib/storagepacelimiter/storagepacelimiter.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package storagepacelimiter
-
-import (
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/pacelimiter"
-)
-
-// Search limits the pace of search calls when there is at least a single in-flight assisted merge.
-//
-// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/291
-var Search = pacelimiter.New()
diff --git a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go
index 005af82f32..48def1cba7 100644
--- a/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go
+++ b/vendor/github.com/VictoriaMetrics/metrics/process_metrics_linux.go
@@ -9,6 +9,7 @@ import (
"os"
"strconv"
"strings"
+ "sync/atomic"
"time"
)
@@ -48,6 +49,7 @@ func writeProcessMetrics(w io.Writer) {
log.Printf("ERROR: metrics: cannot open %s: %s", statFilepath, err)
return
}
+
// Search for the end of command.
n := bytes.LastIndex(data, []byte(") "))
if n < 0 {
@@ -85,12 +87,20 @@ func writeProcessMetrics(w io.Writer) {
writeIOMetrics(w)
}
+var procSelfIOErrLogged uint32
+
func writeIOMetrics(w io.Writer) {
ioFilepath := "/proc/self/io"
data, err := ioutil.ReadFile(ioFilepath)
if err != nil {
- log.Printf("ERROR: metrics: cannot open %q: %s", ioFilepath, err)
+ // Do not spam the logs with errors - this error cannot be fixed without process restart.
+ // See https://github.com/VictoriaMetrics/metrics/issues/42
+ if atomic.CompareAndSwapUint32(&procSelfIOErrLogged, 0, 1) {
+ log.Printf("ERROR: metrics: cannot read process_io_* metrics from %q, so these metrics won't be updated until the error is fixed; "+
+ "see https://github.com/VictoriaMetrics/metrics/issues/42 ; The error: %s", ioFilepath, err)
+ }
}
+
getInt := func(s string) int64 {
n := strings.IndexByte(s, ' ')
if n < 0 {
diff --git a/vendor/github.com/VictoriaMetrics/metricsql/lexer.go b/vendor/github.com/VictoriaMetrics/metricsql/lexer.go
index c3aebd80d3..f0dec0a821 100644
--- a/vendor/github.com/VictoriaMetrics/metricsql/lexer.go
+++ b/vendor/github.com/VictoriaMetrics/metricsql/lexer.go
@@ -289,6 +289,9 @@ func scanPositiveNumber(s string) (string, error) {
}
func scanNumMultiplier(s string) int {
+ if len(s) > 3 {
+ s = s[:3]
+ }
s = strings.ToLower(s)
switch true {
case strings.HasPrefix(s, "kib"):
@@ -616,7 +619,6 @@ func scanSingleDuration(s string, canBeNegative bool) int {
if len(s) == 0 {
return -1
}
- s = strings.ToLower(s)
i := 0
if s[0] == '-' && canBeNegative {
i++
@@ -637,14 +639,26 @@ func scanSingleDuration(s string, canBeNegative bool) int {
return -1
}
}
- switch s[i] {
+ switch unicode.ToLower(rune(s[i])) {
case 'm':
- if i+1 < len(s) && s[i+1] == 's' {
- // duration in ms
- return i + 2
+ if i+1 < len(s) {
+ switch unicode.ToLower(rune(s[i+1])) {
+ case 's':
+ // duration in ms
+ return i + 2
+ case 'i', 'b':
+ // This is not a duration, but Mi or MB suffix.
+ // See parsePositiveNumber() and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3664
+ return -1
+ }
}
- // duration in minutes
- return i + 1
+ // Allow small m for durtion in minutes.
+ // Big M means 1e6.
+ // See parsePositiveNumber() and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3664
+ if s[i] == 'm' {
+ return i + 1
+ }
+ return -1
case 's', 'h', 'd', 'w', 'y', 'i':
return i + 1
default:
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 52f5906d0c..7635f10de8 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -4143,6 +4143,43 @@ var awsPartition = partition{
},
},
},
+ "cleanrooms": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"cloud9": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -22763,6 +22800,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
endpointKey{
Region: "me-south-1",
}: endpoint{},
@@ -27093,6 +27133,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-me-central-1",
+ }: endpoint{
+ Hostname: "waf-regional-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-me-south-1",
}: endpoint{
@@ -27147,6 +27196,23 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "waf-regional.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "waf-regional-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
@@ -27669,6 +27735,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "fips-me-central-1",
+ }: endpoint{
+ Hostname: "wafv2-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ Deprecated: boxedTrue,
+ },
endpointKey{
Region: "fips-me-south-1",
}: endpoint{
@@ -27723,6 +27798,23 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{
+ Hostname: "wafv2.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
+ endpointKey{
+ Region: "me-central-1",
+ Variant: fipsVariant,
+ }: endpoint{
+ Hostname: "wafv2-fips.me-central-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "me-central-1",
+ },
+ },
endpointKey{
Region: "me-south-1",
}: endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 942d9d1572..5659396dda 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.44.177"
+const SDKVersion = "1.44.180"
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
index 35a5d8f06d..c5e4a6ac08 100644
--- a/vendor/golang.org/x/exp/slices/sort.go
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -62,15 +62,22 @@ func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool {
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
- // search returns the leftmost position where f returns true, or len(x) if f
- // returns false for all x. This is the insertion position for target in x,
- // and could point to an element that's either == target or not.
- pos := search(len(x), func(i int) bool { return x[i] >= target })
- if pos >= len(x) || x[pos] != target {
- return pos, false
- } else {
- return pos, true
+ // Inlining is faster than calling BinarySearchFunc with a lambda.
+ n := len(x)
+ // Define x[-1] < target and x[n] >= target.
+ // Invariant: x[i-1] < target, x[j] >= target.
+ i, j := 0, n
+ for i < j {
+ h := int(uint(i+j) >> 1) // avoid overflow when computing h
+ // i ≤ h < j
+ if x[h] < target {
+ i = h + 1 // preserves x[i-1] < target
+ } else {
+ j = h // preserves x[j] >= target
+ }
}
+ // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
+ return i, i < n && x[i] == target
}
// BinarySearchFunc works like BinarySearch, but uses a custom comparison
@@ -79,29 +86,21 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) {
// parameters: 0 if a == b, a negative number if a < b and a positive number if
// a > b.
func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) {
- pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 })
- if pos >= len(x) || cmp(x[pos], target) != 0 {
- return pos, false
- } else {
- return pos, true
- }
-}
-
-func search(n int, f func(int) bool) int {
- // Define f(-1) == false and f(n) == true.
- // Invariant: f(i-1) == false, f(j) == true.
+ n := len(x)
+ // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
+ // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
i, j := 0, n
for i < j {
h := int(uint(i+j) >> 1) // avoid overflow when computing h
// i ≤ h < j
- if !f(h) {
- i = h + 1 // preserves f(i-1) == false
+ if cmp(x[h], target) < 0 {
+ i = h + 1 // preserves cmp(x[i - 1], target) < 0
} else {
- j = h // preserves f(j) == true
+ j = h // preserves cmp(x[j], target) >= 0
}
}
- // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i.
- return i
+ // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
+ return i, i < n && cmp(x[i], target) == 0
}
type sortedHint int // hint for pdqsort when choosing the pivot
diff --git a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
index 63e5ef64c0..29302a1e99 100644
--- a/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
+++ b/vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Google LLC.
+// Copyright 2023 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/google.golang.org/api/internal/gensupport/resumable.go b/vendor/google.golang.org/api/internal/gensupport/resumable.go
index 0c659188dd..ee8e9f3247 100644
--- a/vendor/google.golang.org/api/internal/gensupport/resumable.go
+++ b/vendor/google.golang.org/api/internal/gensupport/resumable.go
@@ -193,22 +193,28 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
// Each chunk gets its own initialized-at-zero backoff and invocation ID.
bo := rx.Retry.backoff()
- quitAfter := time.After(retryDeadline)
+ quitAfterTimer := time.NewTimer(retryDeadline)
rx.attempts = 1
rx.invocationID = uuid.New().String()
// Retry loop for a single chunk.
for {
+ pauseTimer := time.NewTimer(pause)
select {
case <-ctx.Done():
+ quitAfterTimer.Stop()
+ pauseTimer.Stop()
if err == nil {
err = ctx.Err()
}
return prepareReturn(resp, err)
- case <-time.After(pause):
- case <-quitAfter:
+ case <-pauseTimer.C:
+ quitAfterTimer.Stop()
+ case <-quitAfterTimer.C:
+ pauseTimer.Stop()
return prepareReturn(resp, err)
}
+ pauseTimer.Stop()
// Check for context cancellation or timeout once more. If more than one
// case in the select statement above was satisfied at the same time, Go
@@ -217,13 +223,15 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
// canceled before or the timeout was reached.
select {
case <-ctx.Done():
+ quitAfterTimer.Stop()
if err == nil {
err = ctx.Err()
}
return prepareReturn(resp, err)
- case <-quitAfter:
+ case <-quitAfterTimer.C:
return prepareReturn(resp, err)
default:
+ quitAfterTimer.Stop()
}
resp, err = rx.transferChunk(ctx)
diff --git a/vendor/google.golang.org/api/internal/gensupport/send.go b/vendor/google.golang.org/api/internal/gensupport/send.go
index dd24139b36..85c7bcbfdf 100644
--- a/vendor/google.golang.org/api/internal/gensupport/send.go
+++ b/vendor/google.golang.org/api/internal/gensupport/send.go
@@ -115,15 +115,17 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, r
var errorFunc = retry.errorFunc()
for {
+ t := time.NewTimer(pause)
select {
case <-ctx.Done():
+ t.Stop()
// If we got an error and the context has been canceled, return an error acknowledging
// both the context cancelation and the service error.
if err != nil {
return resp, wrappedCallErr{ctx.Err(), err}
}
return resp, ctx.Err()
- case <-time.After(pause):
+ case <-t.C:
}
if ctx.Err() != nil {
diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go
index db10d4bc9f..79fea0548c 100644
--- a/vendor/google.golang.org/api/internal/version.go
+++ b/vendor/google.golang.org/api/internal/version.go
@@ -5,4 +5,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "0.106.0"
+const Version = "0.107.0"
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
index 4613ebdfa7..8527d3fb1a 100644
--- a/vendor/google.golang.org/api/storage/v1/storage-gen.go
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Google LLC.
+// Copyright 2023 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index ea38a62fad..9960fcab46 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -67,10 +67,10 @@ github.com/VictoriaMetrics/fastcache
github.com/VictoriaMetrics/fasthttp
github.com/VictoriaMetrics/fasthttp/fasthttputil
github.com/VictoriaMetrics/fasthttp/stackless
-# github.com/VictoriaMetrics/metrics v1.23.0
+# github.com/VictoriaMetrics/metrics v1.23.1
## explicit; go 1.15
github.com/VictoriaMetrics/metrics
-# github.com/VictoriaMetrics/metricsql v0.51.1
+# github.com/VictoriaMetrics/metricsql v0.51.2
## explicit; go 1.13
github.com/VictoriaMetrics/metricsql
github.com/VictoriaMetrics/metricsql/binaryop
@@ -80,7 +80,7 @@ github.com/VividCortex/ewma
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
## explicit; go 1.15
github.com/alecthomas/units
-# github.com/aws/aws-sdk-go v1.44.177
+# github.com/aws/aws-sdk-go v1.44.180
## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr
@@ -528,7 +528,7 @@ go.uber.org/atomic
## explicit; go 1.18
go.uber.org/goleak
go.uber.org/goleak/internal/stack
-# golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a
+# golang.org/x/exp v0.0.0-20230113213754-f9f960f08ad4
## explicit; go 1.18
golang.org/x/exp/constraints
golang.org/x/exp/slices
@@ -575,7 +575,7 @@ golang.org/x/time/rate
## explicit; go 1.17
golang.org/x/xerrors
golang.org/x/xerrors/internal
-# google.golang.org/api v0.106.0
+# google.golang.org/api v0.107.0
## explicit; go 1.19
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
@@ -608,7 +608,7 @@ google.golang.org/appengine/internal/socket
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/socket
google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f
+# google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5
## explicit; go 1.19
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
|