(seriesLimits);
+ const [timezone, setTimezone] = useState(stateTimezone);
const [open, setOpen] = useState(false);
const handleOpen = () => setOpen(true);
@@ -32,6 +37,7 @@ const GlobalSettings: FC = () => {
const handlerApply = () => {
dispatch({ type: "SET_SERVER", payload: serverUrl });
+ timeDispatch({ type: "SET_TIMEZONE", payload: timezone });
customPanelDispatch({ type: "SET_SERIES_LIMITS", payload: limits });
handleClose();
};
@@ -70,6 +76,12 @@ const GlobalSettings: FC = () => {
onEnter={handlerApply}
/>
+
+
+
= ({ limits, onChange , on
return (
-
+
Series limits by tabs
= ({ serverUrl, onChange ,
};
return (
-
+
);
};
diff --git a/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/Timezones.tsx b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/Timezones.tsx
new file mode 100644
index 0000000000..65fae3aca7
--- /dev/null
+++ b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/Timezones.tsx
@@ -0,0 +1,143 @@
+import React, { FC, useMemo, useRef, useState } from "preact/compat";
+import { getTimezoneList, getUTCByTimezone } from "../../../../utils/time";
+import { ArrowDropDownIcon } from "../../../Main/Icons";
+import classNames from "classnames";
+import Popper from "../../../Main/Popper/Popper";
+import Accordion from "../../../Main/Accordion/Accordion";
+import dayjs from "dayjs";
+import TextField from "../../../Main/TextField/TextField";
+import { Timezone } from "../../../../types";
+import "./style.scss";
+
+interface TimezonesProps {
+ timezoneState: string
+ onChange: (val: string) => void
+}
+
+const Timezones: FC = ({ timezoneState, onChange }) => {
+
+ const timezones = getTimezoneList();
+
+ const [openList, setOpenList] = useState(false);
+ const [search, setSearch] = useState("");
+ const targetRef = useRef(null);
+
+ const searchTimezones = useMemo(() => {
+ if (!search) return timezones;
+ try {
+ return getTimezoneList(search);
+ } catch (e) {
+ return {};
+ }
+ }, [search, timezones]);
+
+ const timezonesGroups = useMemo(() => Object.keys(searchTimezones), [searchTimezones]);
+
+ const localTimezone = useMemo(() => ({
+ region: dayjs.tz.guess(),
+ utc: getUTCByTimezone(dayjs.tz.guess())
+ }), []);
+
+ const activeTimezone = useMemo(() => ({
+ region: timezoneState,
+ utc: getUTCByTimezone(timezoneState)
+ }), [timezoneState]);
+
+ const toggleOpenList = () => {
+ setOpenList(prev => !prev);
+ };
+
+ const handleCloseList = () => {
+ setOpenList(false);
+ };
+
+ const handleChangeSearch = (val: string) => {
+ setSearch(val);
+ };
+
+ const handleSetTimezone = (val: Timezone) => {
+ onChange(val.region);
+ setSearch("");
+ handleCloseList();
+ };
+
+ const createHandlerSetTimezone = (val: Timezone) => () => {
+ handleSetTimezone(val);
+ };
+
+ return (
+
+
+ Time zone
+
+
+
{activeTimezone.region}
+
{activeTimezone.utc}
+
+
+
+
+
+
+
+
+
+
Browser Time ({localTimezone.region})
+
{localTimezone.utc}
+
+
+ {timezonesGroups.map(t => (
+
}
+ >
+
+ {searchTimezones[t] && searchTimezones[t].map(item => (
+
+
{item.region}
+
{item.utc}
+
+ ))}
+
+
+
+ ))}
+
+
+
+ );
+};
+
+export default Timezones;
diff --git a/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/style.scss b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/style.scss
new file mode 100644
index 0000000000..185d7e845a
--- /dev/null
+++ b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/style.scss
@@ -0,0 +1,96 @@
+@use "src/styles/variables" as *;
+
+.vm-timezones {
+
+ &-item {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: $padding-small;
+ cursor: pointer;
+
+ &_selected {
+ border: $border-divider;
+ padding: $padding-small $padding-global;
+ border-radius: $border-radius-small;
+ }
+
+ &__title {
+ text-transform: capitalize;
+ }
+
+ &__utc {
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ background-color: rgba($color-black, 0.06);
+ padding: calc($padding-small/2);
+ border-radius: $border-radius-small;
+ }
+
+ &__icon {
+ display: inline-flex;
+ align-items: center;
+ justify-content: flex-end;
+ margin: 0 0 0 auto;
+ transition: transform 200ms ease-in;
+
+ svg {
+ width: 14px;
+ }
+
+ &_open {
+ transform: rotate(180deg);
+ }
+ }
+ }
+
+ &-list {
+ min-width: 600px;
+ max-height: 300px;
+ background-color: $color-background-block;
+ border-radius: $border-radius-medium;
+ overflow: auto;
+
+ &-header {
+ position: sticky;
+ top: 0;
+ background-color: $color-background-block;
+ z-index: 2;
+ border-bottom: $border-divider;
+
+ &__search {
+ padding: $padding-small;
+ }
+ }
+
+ &-group {
+ padding: $padding-small 0;
+ border-bottom: $border-divider;
+
+ &:last-child {
+ border-bottom: none;
+ }
+
+ &__title {
+ font-weight: bold;
+ color: $color-text-secondary;
+ padding: $padding-small $padding-global;
+ }
+
+ &-options {
+ display: grid;
+ align-items: flex-start;
+
+ &__item {
+ padding: $padding-small $padding-global;
+ transition: background-color 200ms ease;
+
+ &:hover {
+ background-color: rgba($color-black, 0.1);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/style.scss b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/style.scss
index 5f40cf32d2..ec0d5c391d 100644
--- a/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/style.scss
+++ b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/style.scss
@@ -10,6 +10,15 @@
}
+ &__title {
+ display: flex;
+ align-items: center;
+ justify-content: flex-start;
+ font-size: $font-size;
+ font-weight: bold;
+ margin-bottom: $padding-global;
+ }
+
&__footer {
display: inline-grid;
grid-template-columns: repeat(2, 1fr);
diff --git a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeDurationSelector/style.scss b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeDurationSelector/style.scss
index 79eb3be594..e00038492d 100644
--- a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeDurationSelector/style.scss
+++ b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeDurationSelector/style.scss
@@ -1,7 +1,7 @@
@use "src/styles/variables" as *;
.vm-time-duration {
- max-height: 168px;
+ max-height: 200px;
overflow: auto;
font-size: $font-size;
}
diff --git a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector.tsx b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector.tsx
index 75d5a82cfc..d362277fc2 100644
--- a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector.tsx
+++ b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector.tsx
@@ -1,5 +1,5 @@
import React, { FC, useEffect, useState, useMemo, useRef } from "preact/compat";
-import { dateFromSeconds, formatDateForNativeInput } from "../../../../utils/time";
+import { dateFromSeconds, formatDateForNativeInput, getRelativeTime, getUTCByTimezone } from "../../../../utils/time";
import TimeDurationSelector from "../TimeDurationSelector/TimeDurationSelector";
import dayjs from "dayjs";
import { getAppModeEnable } from "../../../../utils/app-mode";
@@ -22,20 +22,25 @@ export const TimeSelector: FC = () => {
const [until, setUntil] = useState
();
const [from, setFrom] = useState();
- const formFormat = useMemo(() => dayjs(from).format(DATE_TIME_FORMAT), [from]);
- const untilFormat = useMemo(() => dayjs(until).format(DATE_TIME_FORMAT), [until]);
+ const formFormat = useMemo(() => dayjs.tz(from).format(DATE_TIME_FORMAT), [from]);
+ const untilFormat = useMemo(() => dayjs.tz(until).format(DATE_TIME_FORMAT), [until]);
- const { period: { end, start }, relativeTime } = useTimeState();
+ const { period: { end, start }, relativeTime, timezone, duration } = useTimeState();
const dispatch = useTimeDispatch();
const appModeEnable = getAppModeEnable();
+ const activeTimezone = useMemo(() => ({
+ region: timezone,
+ utc: getUTCByTimezone(timezone)
+ }), [timezone]);
+
useEffect(() => {
setUntil(formatDateForNativeInput(dateFromSeconds(end)));
- }, [end]);
+ }, [timezone, end]);
useEffect(() => {
setFrom(formatDateForNativeInput(dateFromSeconds(start)));
- }, [start]);
+ }, [timezone, start]);
const setDuration = ({ duration, until, id }: {duration: string, until: Date, id: string}) => {
dispatch({ type: "SET_RELATIVE_TIME", payload: { duration, until, id } });
@@ -43,13 +48,13 @@ export const TimeSelector: FC = () => {
};
const formatRange = useMemo(() => {
- const startFormat = dayjs(dateFromSeconds(start)).format(DATE_TIME_FORMAT);
- const endFormat = dayjs(dateFromSeconds(end)).format(DATE_TIME_FORMAT);
+ const startFormat = dayjs.tz(dateFromSeconds(start)).format(DATE_TIME_FORMAT);
+ const endFormat = dayjs.tz(dateFromSeconds(end)).format(DATE_TIME_FORMAT);
return {
start: startFormat,
end: endFormat
};
- }, [start, end]);
+ }, [start, end, timezone]);
const dateTitle = useMemo(() => {
const isRelativeTime = relativeTime && relativeTime !== "none";
@@ -65,7 +70,10 @@ export const TimeSelector: FC = () => {
const setTimeAndClosePicker = () => {
if (from && until) {
- dispatch({ type: "SET_PERIOD", payload: { from: new Date(from), to: new Date(until) } });
+ dispatch({ type: "SET_PERIOD", payload: {
+ from: dayjs(from).toDate(),
+ to: dayjs(until).toDate()
+ } });
}
setOpenOptions(false);
};
@@ -91,6 +99,15 @@ export const TimeSelector: FC = () => {
setOpenOptions(false);
};
+ useEffect(() => {
+ const value = getRelativeTime({
+ relativeTimeId: relativeTime,
+ defaultDuration: duration,
+ defaultEndInput: dateFromSeconds(end),
+ });
+ setDuration({ id: value.relativeTimeId, duration: value.duration, until: value.endInput });
+ }, [timezone]);
+
useClickOutside(wrapperRef, (e) => {
const target = e.target as HTMLElement;
const isFromButton = fromRef?.current && fromRef.current.contains(target);
@@ -159,6 +176,10 @@ export const TimeSelector: FC = () => {
/>
+
+
{activeTimezone.region}
+
{activeTimezone.utc}
+
}
diff --git a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/style.scss b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/style.scss
index 74424e0133..7452131e59 100644
--- a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/style.scss
+++ b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/style.scss
@@ -30,6 +30,10 @@
cursor: pointer;
transition: color 200ms ease-in-out, border-bottom-color 300ms ease;
+ &:last-child {
+ margin-bottom: 0;
+ }
+
&:hover {
border-bottom-color: $color-primary;
}
@@ -52,6 +56,26 @@
}
}
+ &-timezone {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: $padding-small;
+ font-size: $font-size-small;
+ margin-bottom: $padding-small;
+
+ &__title {}
+
+ &__utc {
+ display: inline-flex;
+ align-items: center;
+ justify-content: center;
+ background-color: rgba($color-black, 0.06);
+ padding: calc($padding-small/2);
+ border-radius: $border-radius-small;
+ }
+ }
+
&__controls {
display: grid;
grid-template-columns: repeat(2, 1fr);
diff --git a/app/vmui/packages/vmui/src/components/Main/Autocomplete/Autocomplete.tsx b/app/vmui/packages/vmui/src/components/Main/Autocomplete/Autocomplete.tsx
index 021e05797c..3cb15557e8 100644
--- a/app/vmui/packages/vmui/src/components/Main/Autocomplete/Autocomplete.tsx
+++ b/app/vmui/packages/vmui/src/components/Main/Autocomplete/Autocomplete.tsx
@@ -56,13 +56,14 @@ const Autocomplete: FC = ({
const handleKeyDown = (e: KeyboardEvent) => {
const { key, ctrlKey, metaKey, shiftKey } = e;
const modifiers = ctrlKey || metaKey || shiftKey;
+ const hasOptions = foundOptions.length;
- if (key === "ArrowUp" && !modifiers) {
+ if (key === "ArrowUp" && !modifiers && hasOptions) {
e.preventDefault();
setFocusOption((prev) => prev <= 0 ? 0 : prev - 1);
}
- if (key === "ArrowDown" && !modifiers) {
+ if (key === "ArrowDown" && !modifiers && hasOptions) {
e.preventDefault();
const lastIndex = foundOptions.length - 1;
setFocusOption((prev) => prev >= lastIndex ? lastIndex : prev + 1);
diff --git a/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/Calendar.tsx b/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/Calendar.tsx
index 2387de5e47..f178d362b4 100644
--- a/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/Calendar.tsx
+++ b/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/Calendar.tsx
@@ -30,8 +30,8 @@ const Calendar: FC = ({
onClose
}) => {
const [displayYears, setDisplayYears] = useState(false);
- const [viewDate, setViewDate] = useState(dayjs(date));
- const [selectDate, setSelectDate] = useState(dayjs(date));
+ const [viewDate, setViewDate] = useState(dayjs.tz(date));
+ const [selectDate, setSelectDate] = useState(dayjs.tz(date));
const [tab, setTab] = useState(tabs[0].value);
const toggleDisplayYears = () => {
@@ -62,7 +62,7 @@ const Calendar: FC = ({
};
useEffect(() => {
- if (selectDate.format() === dayjs(date).format()) return;
+ if (selectDate.format() === dayjs.tz(date).format()) return;
onChange(selectDate.format(format));
}, [selectDate]);
diff --git a/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/CalendarBody/CalendarBody.tsx b/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/CalendarBody/CalendarBody.tsx
index 701bc7434a..2b24d3fb95 100644
--- a/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/CalendarBody/CalendarBody.tsx
+++ b/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/CalendarBody/CalendarBody.tsx
@@ -11,7 +11,7 @@ interface CalendarBodyProps {
const weekday = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"];
const CalendarBody: FC = ({ viewDate, selectDate, onChangeSelectDate }) => {
- const today = dayjs().startOf("day");
+ const today = dayjs().tz().startOf("day");
const days: (Dayjs|null)[] = useMemo(() => {
const result = new Array(42).fill(null);
diff --git a/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/style.scss b/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/style.scss
index 2be976d37f..fdc188d0c8 100644
--- a/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/style.scss
+++ b/app/vmui/packages/vmui/src/components/Main/DatePicker/Calendar/style.scss
@@ -135,6 +135,7 @@
display: grid;
grid-template-columns: repeat(3, 1fr);
gap: $padding-small;
+ max-height: 400px;
overflow: auto;
&__year {
diff --git a/app/vmui/packages/vmui/src/components/Main/DatePicker/DatePicker.tsx b/app/vmui/packages/vmui/src/components/Main/DatePicker/DatePicker.tsx
index 69b668c79f..9a5500879b 100644
--- a/app/vmui/packages/vmui/src/components/Main/DatePicker/DatePicker.tsx
+++ b/app/vmui/packages/vmui/src/components/Main/DatePicker/DatePicker.tsx
@@ -20,7 +20,7 @@ const DatePicker = forwardRef(({
onChange,
}, ref) => {
const [openCalendar, setOpenCalendar] = useState(false);
- const dateDayjs = useMemo(() => date ? dayjs(date) : dayjs(), [date]);
+ const dateDayjs = useMemo(() => date ? dayjs.tz(date) : dayjs().tz(), [date]);
const toggleOpenCalendar = () => {
setOpenCalendar(prev => !prev);
diff --git a/app/vmui/packages/vmui/src/components/Main/ShortcutKeys/ShortcutKeys.tsx b/app/vmui/packages/vmui/src/components/Main/ShortcutKeys/ShortcutKeys.tsx
index 2ae8a9aeb4..7dfd619e7b 100644
--- a/app/vmui/packages/vmui/src/components/Main/ShortcutKeys/ShortcutKeys.tsx
+++ b/app/vmui/packages/vmui/src/components/Main/ShortcutKeys/ShortcutKeys.tsx
@@ -28,6 +28,10 @@ const keyList = [
{
keys: [ctrlMeta, "Arrow Down"],
description: "Next command from the Query history"
+ },
+ {
+ keys: [ctrlMeta, "Click by 'Eye'"],
+ description: "Toggle multiple queries"
}
]
},
@@ -36,10 +40,12 @@ const keyList = [
list: [
{
keys: [ctrlMeta, "Scroll Up"],
+ alt: ["+"],
description: "Zoom in"
},
{
keys: [ctrlMeta, "Scroll Down"],
+ alt: ["-"],
description: "Zoom out"
},
{
@@ -118,6 +124,15 @@ const ShortcutKeys: FC = () => {
{i !== l.keys.length - 1 ? "+" : ""}
>
))}
+ {l.alt && l.alt.map((alt, i) => (
+ <>
+ or
+
+ {alt}
+
+ {i !== l.alt.length - 1 ? "+" : ""}
+ >
+ ))}
{l.description}
diff --git a/app/vmui/packages/vmui/src/components/Main/ShortcutKeys/style.scss b/app/vmui/packages/vmui/src/components/Main/ShortcutKeys/style.scss
index 7550b9fc40..c6668f4d52 100644
--- a/app/vmui/packages/vmui/src/components/Main/ShortcutKeys/style.scss
+++ b/app/vmui/packages/vmui/src/components/Main/ShortcutKeys/style.scss
@@ -19,7 +19,7 @@
&-item {
display: grid;
- grid-template-columns: 180px 1fr;
+ grid-template-columns: 210px 1fr;
align-items: center;
gap: $padding-small;
diff --git a/app/vmui/packages/vmui/src/components/Views/GraphView/GraphView.tsx b/app/vmui/packages/vmui/src/components/Views/GraphView/GraphView.tsx
index 8062325d98..31f505a100 100644
--- a/app/vmui/packages/vmui/src/components/Views/GraphView/GraphView.tsx
+++ b/app/vmui/packages/vmui/src/components/Views/GraphView/GraphView.tsx
@@ -10,6 +10,7 @@ import { TimeParams } from "../../../types";
import { AxisRange, YaxisState } from "../../../state/graph/reducer";
import { getAvgFromArray, getMaxFromArray, getMinFromArray } from "../../../utils/math";
import classNames from "classnames";
+import { useTimeState } from "../../../state/time/TimeStateContext";
import "./style.scss";
export interface GraphViewProps {
@@ -54,6 +55,7 @@ const GraphView: FC = ({
alias = [],
fullWidth = true
}) => {
+ const { timezone } = useTimeState();
const currentStep = useMemo(() => customStep || period.step || 1, [period.step, customStep]);
const [dataChart, setDataChart] = useState([[]]);
@@ -121,7 +123,7 @@ const GraphView: FC = ({
setDataChart(timeDataSeries as uPlotData);
setSeries(tempSeries);
setLegend(tempLegend);
- }, [data]);
+ }, [data, timezone]);
useEffect(() => {
const tempLegend: LegendItemType[] = [];
diff --git a/app/vmui/packages/vmui/src/constants/dayjsPlugins.ts b/app/vmui/packages/vmui/src/constants/dayjsPlugins.ts
new file mode 100644
index 0000000000..536d1b63a9
--- /dev/null
+++ b/app/vmui/packages/vmui/src/constants/dayjsPlugins.ts
@@ -0,0 +1,8 @@
+import dayjs from "dayjs";
+import timezone from "dayjs/plugin/timezone";
+import duration from "dayjs/plugin/duration";
+import utc from "dayjs/plugin/utc";
+
+dayjs.extend(timezone);
+dayjs.extend(duration);
+dayjs.extend(utc);
diff --git a/app/vmui/packages/vmui/src/contexts/Snackbar.tsx b/app/vmui/packages/vmui/src/contexts/Snackbar.tsx
index 8f9f4e2f3b..2172cd1b87 100644
--- a/app/vmui/packages/vmui/src/contexts/Snackbar.tsx
+++ b/app/vmui/packages/vmui/src/contexts/Snackbar.tsx
@@ -36,7 +36,7 @@ export const SnackbarProvider: FC = ({ children }) => {
setSnack({
message: infoMessage.text,
variant: infoMessage.type,
- key: new Date().getTime()
+ key: Date.now()
});
setOpen(true);
const timeout = setTimeout(handleClose, 4000);
diff --git a/app/vmui/packages/vmui/src/hooks/useClickOutside.ts b/app/vmui/packages/vmui/src/hooks/useClickOutside.ts
index 25f716b2a2..5f39e15261 100644
--- a/app/vmui/packages/vmui/src/hooks/useClickOutside.ts
+++ b/app/vmui/packages/vmui/src/hooks/useClickOutside.ts
@@ -8,9 +8,8 @@ const useClickOutside = (
preventRef?: RefObject
) => {
useEffect(() => {
- const el = ref?.current;
-
const listener = (event: Event) => {
+ const el = ref?.current;
const target = event.target as HTMLElement;
const isPreventRef = preventRef?.current && preventRef.current.contains(target);
if (!el || el.contains((event?.target as Node) || null) || isPreventRef) {
@@ -23,13 +22,10 @@ const useClickOutside = (
document.addEventListener("mousedown", listener);
document.addEventListener("touchstart", listener);
- const removeListeners = () => {
+ return () => {
document.removeEventListener("mousedown", listener);
document.removeEventListener("touchstart", listener);
};
-
- if (!el) removeListeners();
- return removeListeners;
}, [ref, handler]); // Reload only if ref or handler changes
};
diff --git a/app/vmui/packages/vmui/src/index.tsx b/app/vmui/packages/vmui/src/index.tsx
index b989336c19..20e08435cc 100644
--- a/app/vmui/packages/vmui/src/index.tsx
+++ b/app/vmui/packages/vmui/src/index.tsx
@@ -1,4 +1,5 @@
import React, { render } from "preact/compat";
+import "./constants/dayjsPlugins";
import App from "./App";
import reportWebVitals from "./reportWebVitals";
import "./styles/style.scss";
diff --git a/app/vmui/packages/vmui/src/pages/CustomPanel/QueryConfigurator/QueryConfigurator.tsx b/app/vmui/packages/vmui/src/pages/CustomPanel/QueryConfigurator/QueryConfigurator.tsx
index 543e91c1b4..ffbe7c76f5 100644
--- a/app/vmui/packages/vmui/src/pages/CustomPanel/QueryConfigurator/QueryConfigurator.tsx
+++ b/app/vmui/packages/vmui/src/pages/CustomPanel/QueryConfigurator/QueryConfigurator.tsx
@@ -11,6 +11,8 @@ import Button from "../../../components/Main/Button/Button";
import "./style.scss";
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
import classNames from "classnames";
+import { MouseEvent as ReactMouseEvent } from "react";
+import { arrayEquals } from "../../../utils/array";
export interface QueryConfiguratorProps {
error?: ErrorTypes | string;
@@ -55,8 +57,16 @@ const QueryConfigurator: FC = ({ error, queryOptions, on
setStateQuery(prev => prev.filter((q, i) => i !== index));
};
- const onToggleHideQuery = (index: number) => {
- setHideQuery(prev => prev.includes(index) ? prev.filter(n => n !== index) : [...prev, index]);
+ const onToggleHideQuery = (e: ReactMouseEvent, index: number) => {
+ const { ctrlKey, metaKey } = e;
+ const ctrlMetaKey = ctrlKey || metaKey;
+
+ if (ctrlMetaKey) {
+ const hideIndexes = stateQuery.map((q, i) => i).filter(n => n !== index);
+ setHideQuery(prev => arrayEquals(hideIndexes, prev) ? [] : hideIndexes);
+ } else {
+ setHideQuery(prev => prev.includes(index) ? prev.filter(n => n !== index) : [...prev, index]);
+ }
};
const handleChangeQuery = (value: string, index: number) => {
@@ -84,11 +94,11 @@ const QueryConfigurator: FC = ({ error, queryOptions, on
const createHandlerRemoveQuery = (i: number) => () => {
onRemoveQuery(i);
- setHideQuery(prev => prev.map(n => n > i ? n - 1: n));
+ setHideQuery(prev => prev.includes(i) ? prev.filter(n => n !== i) : prev.map(n => n > i ? n - 1: n));
};
- const createHandlerHideQuery = (i: number) => () => {
- onToggleHideQuery(i);
+ const createHandlerHideQuery = (i: number) => (e: ReactMouseEvent) => {
+ onToggleHideQuery(e, i);
};
useEffect(() => {
diff --git a/app/vmui/packages/vmui/src/state/cardinality/reducer.ts b/app/vmui/packages/vmui/src/state/cardinality/reducer.ts
index ffb8ac25bb..86531cc9ef 100644
--- a/app/vmui/packages/vmui/src/state/cardinality/reducer.ts
+++ b/app/vmui/packages/vmui/src/state/cardinality/reducer.ts
@@ -23,7 +23,7 @@ export type Action =
export const initialState: CardinalityState = {
runQuery: 0,
topN: getQueryStringValue("topN", 10) as number,
- date: getQueryStringValue("date", dayjs(new Date()).format(DATE_FORMAT)) as string,
+ date: getQueryStringValue("date", dayjs().tz().format(DATE_FORMAT)) as string,
focusLabel: getQueryStringValue("focusLabel", "") as string,
match: getQueryStringValue("match", "") as string,
extraLabel: getQueryStringValue("extra_label", "") as string,
diff --git a/app/vmui/packages/vmui/src/state/time/reducer.ts b/app/vmui/packages/vmui/src/state/time/reducer.ts
index 38b2a5cb4c..6cc69ac40a 100644
--- a/app/vmui/packages/vmui/src/state/time/reducer.ts
+++ b/app/vmui/packages/vmui/src/state/time/reducer.ts
@@ -5,14 +5,18 @@ import {
getDateNowUTC,
getDurationFromPeriod,
getTimeperiodForDuration,
- getRelativeTime
+ getRelativeTime,
+ setTimezone
} from "../../utils/time";
import { getQueryStringValue } from "../../utils/query-string";
+import dayjs from "dayjs";
+import { getFromStorage, saveToStorage } from "../../utils/storage";
export interface TimeState {
duration: string;
period: TimeParams;
relativeTime?: string;
+ timezone: string;
}
export type TimeAction =
@@ -21,12 +25,16 @@ export type TimeAction =
| { type: "SET_PERIOD", payload: TimePeriod }
| { type: "RUN_QUERY"}
| { type: "RUN_QUERY_TO_NOW"}
+ | { type: "SET_TIMEZONE", payload: string }
+
+const timezone = getFromStorage("TIMEZONE") as string || dayjs.tz.guess();
+setTimezone(timezone);
const defaultDuration = getQueryStringValue("g0.range_input") as string;
const { duration, endInput, relativeTimeId } = getRelativeTime({
defaultDuration: defaultDuration || "1h",
- defaultEndInput: new Date(formatDateToLocal(getQueryStringValue("g0.end_input", getDateNowUTC()) as Date)),
+ defaultEndInput: formatDateToLocal(getQueryStringValue("g0.end_input", getDateNowUTC()) as string),
relativeTimeId: defaultDuration ? getQueryStringValue("g0.relative_time", "none") as string : undefined
});
@@ -34,8 +42,10 @@ export const initialTimeState: TimeState = {
duration,
period: getTimeperiodForDuration(duration, endInput),
relativeTime: relativeTimeId,
+ timezone,
};
+
export function reducer(state: TimeState, action: TimeAction): TimeState {
switch (action.type) {
case "SET_DURATION":
@@ -49,7 +59,7 @@ export function reducer(state: TimeState, action: TimeAction): TimeState {
return {
...state,
duration: action.payload.duration,
- period: getTimeperiodForDuration(action.payload.duration, new Date(action.payload.until)),
+ period: getTimeperiodForDuration(action.payload.duration, action.payload.until),
relativeTime: action.payload.id,
};
case "SET_PERIOD":
@@ -77,6 +87,13 @@ export function reducer(state: TimeState, action: TimeAction): TimeState {
...state,
period: getTimeperiodForDuration(state.duration)
};
+ case "SET_TIMEZONE":
+ setTimezone(action.payload);
+ saveToStorage("TIMEZONE", action.payload);
+ return {
+ ...state,
+ timezone: action.payload
+ };
default:
throw new Error();
}
diff --git a/app/vmui/packages/vmui/src/types/index.ts b/app/vmui/packages/vmui/src/types/index.ts
index be5d2a56ca..c6085a3bcf 100644
--- a/app/vmui/packages/vmui/src/types/index.ts
+++ b/app/vmui/packages/vmui/src/types/index.ts
@@ -105,3 +105,9 @@ export interface SeriesLimits {
chart: number,
code: number,
}
+
+export interface Timezone {
+ region: string,
+ utc: string,
+ search?: string
+}
diff --git a/app/vmui/packages/vmui/src/utils/query-string.ts b/app/vmui/packages/vmui/src/utils/query-string.ts
index 170a163795..333bd93550 100644
--- a/app/vmui/packages/vmui/src/utils/query-string.ts
+++ b/app/vmui/packages/vmui/src/utils/query-string.ts
@@ -5,7 +5,7 @@ import { MAX_QUERY_FIELDS } from "../constants/graph";
export const setQueryStringWithoutPageReload = (params: Record): void => {
const w = window;
if (w) {
- const qsValue = Object.entries(params).map(([k, v]) => `${k}=${v}`).join("&");
+ const qsValue = Object.entries(params).map(([k, v]) => `${k}=${encodeURIComponent(String(v))}`).join("&");
const qs = qsValue ? `?${qsValue}` : "";
const newurl = `${w.location.protocol}//${w.location.host}${w.location.pathname}${qs}${w.location.hash}`;
w.history.pushState({ path: newurl }, "", newurl);
diff --git a/app/vmui/packages/vmui/src/utils/storage.ts b/app/vmui/packages/vmui/src/utils/storage.ts
index f08833577a..86d238a8b1 100644
--- a/app/vmui/packages/vmui/src/utils/storage.ts
+++ b/app/vmui/packages/vmui/src/utils/storage.ts
@@ -6,6 +6,7 @@ export type StorageKeys = "BASIC_AUTH_DATA"
| "QUERY_TRACING"
| "SERIES_LIMITS"
| "TABLE_COMPACT"
+ | "TIMEZONE"
export const saveToStorage = (key: StorageKeys, value: string | boolean | Record): void => {
if (value) {
diff --git a/app/vmui/packages/vmui/src/utils/time.ts b/app/vmui/packages/vmui/src/utils/time.ts
index de1d445279..2b789c25f3 100644
--- a/app/vmui/packages/vmui/src/utils/time.ts
+++ b/app/vmui/packages/vmui/src/utils/time.ts
@@ -1,17 +1,16 @@
-import { RelativeTimeOption, TimeParams, TimePeriod } from "../types";
+import { RelativeTimeOption, TimeParams, TimePeriod, Timezone } from "../types";
import dayjs, { UnitTypeShort } from "dayjs";
-import duration from "dayjs/plugin/duration";
-import utc from "dayjs/plugin/utc";
import { getQueryStringValue } from "./query-string";
import { DATE_ISO_FORMAT } from "../constants/date";
-dayjs.extend(duration);
-dayjs.extend(utc);
-
const MAX_ITEMS_PER_CHART = window.innerWidth / 4;
export const limitsDurations = { min: 1, max: 1.578e+11 }; // min: 1 ms, max: 5 years
+// eslint-disable-next-line @typescript-eslint/ban-ts-comment
+// @ts-ignore
+export const supportedTimezones = Intl.supportedValuesOf("timeZone") as string[];
+
export const supportedDurations = [
{ long: "days", short: "d", possible: "day" },
{ long: "weeks", short: "w", possible: "week" },
@@ -38,7 +37,7 @@ export const isSupportedDuration = (str: string): Partial {
- const n = (date || new Date()).valueOf() / 1000;
+ const n = (date || dayjs().toDate()).valueOf() / 1000;
const durItems = dur.trim().split(" ");
@@ -64,24 +63,24 @@ export const getTimeperiodForDuration = (dur: string, date?: Date): TimeParams =
start: n - delta,
end: n,
step: step,
- date: formatDateToUTC(date || new Date())
+ date: formatDateToUTC(date || dayjs().toDate())
};
};
-export const formatDateToLocal = (date: Date): string => {
- return dayjs(date).utcOffset(0, true).local().format(DATE_ISO_FORMAT);
+export const formatDateToLocal = (date: string): Date => {
+ return dayjs(date).utcOffset(0, true).toDate();
};
export const formatDateToUTC = (date: Date): string => {
- return dayjs(date).utc().format(DATE_ISO_FORMAT);
+ return dayjs.tz(date).utc().format(DATE_ISO_FORMAT);
};
export const formatDateForNativeInput = (date: Date): string => {
- return dayjs(date).format(DATE_ISO_FORMAT);
+ return dayjs.tz(date).format(DATE_ISO_FORMAT);
};
-export const getDateNowUTC = (): Date => {
- return new Date(dayjs().utc().format(DATE_ISO_FORMAT));
+export const getDateNowUTC = (): string => {
+ return dayjs().utc().format(DATE_ISO_FORMAT);
};
export const getDurationFromMilliseconds = (ms: number): string => {
@@ -115,7 +114,10 @@ export const checkDurationLimit = (dur: string): string => {
return dur;
};
-export const dateFromSeconds = (epochTimeInSeconds: number): Date => new Date(epochTimeInSeconds * 1000);
+export const dateFromSeconds = (epochTimeInSeconds: number): Date => dayjs(epochTimeInSeconds * 1000).toDate();
+
+const getYesterday = () => dayjs().tz().subtract(1, "day").endOf("day").toDate();
+const getToday = () => dayjs().tz().endOf("day").toDate();
export const relativeTimeOptions: RelativeTimeOption[] = [
{ title: "Last 5 minutes", duration: "5m" },
@@ -132,11 +134,11 @@ export const relativeTimeOptions: RelativeTimeOption[] = [
{ title: "Last 90 days", duration: "90d" },
{ title: "Last 180 days", duration: "180d" },
{ title: "Last 1 year", duration: "1y" },
- { title: "Yesterday", duration: "1d", until: () => dayjs().subtract(1, "day").endOf("day").toDate() },
- { title: "Today", duration: "1d", until: () => dayjs().endOf("day").toDate() },
+ { title: "Yesterday", duration: "1d", until: getYesterday },
+ { title: "Today", duration: "1d", until: getToday },
].map(o => ({
id: o.title.replace(/\s/g, "_").toLocaleLowerCase(),
- until: o.until ? o.until : () => dayjs().toDate(),
+ until: o.until ? o.until : () => dayjs().tz().toDate(),
...o
}));
@@ -151,3 +153,35 @@ export const getRelativeTime = ({ relativeTimeId, defaultDuration, defaultEndInp
endInput: target ? target.until() : defaultEndInput
};
};
+
+export const getUTCByTimezone = (timezone: string) => {
+ const date = dayjs().tz(timezone);
+ return `UTC${date.format("Z")}`;
+};
+
+export const getTimezoneList = (search = "") => {
+ const regexp = new RegExp(search, "i");
+
+ return supportedTimezones.reduce((acc: {[key: string]: Timezone[]}, region) => {
+ const zone = (region.match(/^(.*?)\//) || [])[1] || "unknown";
+ const utc = getUTCByTimezone(region);
+ const item = {
+ region,
+ utc,
+ search: `${region} ${utc} ${region.replace(/[/_]/gmi, " ")}`
+ };
+ const includeZone = !search || (search && regexp.test(item.search));
+
+ if (includeZone && acc[zone]) {
+ acc[zone].push(item);
+ } else if (includeZone) {
+ acc[zone] = [item];
+ }
+
+ return acc;
+ }, {});
+};
+
+export const setTimezone = (timezone: string) => {
+ dayjs.tz.setDefault(timezone);
+};
diff --git a/app/vmui/packages/vmui/src/utils/uplot/axes.ts b/app/vmui/packages/vmui/src/utils/uplot/axes.ts
index 80760aa693..c3c225323e 100644
--- a/app/vmui/packages/vmui/src/utils/uplot/axes.ts
+++ b/app/vmui/packages/vmui/src/utils/uplot/axes.ts
@@ -5,6 +5,18 @@ import { AxisRange } from "../../state/graph/reducer";
import { formatTicks, sizeAxis } from "./helpers";
import { TimeParams } from "../../types";
+// see https://github.com/leeoniya/uPlot/tree/master/docs#axis--grid-opts
+const timeValues = [
+ // tick incr default year month day hour min sec mode
+ [3600 * 24 * 365, "{YYYY}", null, null, null, null, null, null, 1],
+ [3600 * 24 * 28, "{MMM}", "\n{YYYY}", null, null, null, null, null, 1],
+ [3600 * 24, "{MM}-{DD}", "\n{YYYY}", null, null, null, null, null, 1],
+ [3600, "{HH}:{mm}", "\n{YYYY}-{MM}-{DD}", null, "\n{MM}-{DD}", null, null, null, 1],
+ [60, "{HH}:{mm}", "\n{YYYY}-{MM}-{DD}", null, "\n{MM}-{DD}", null, null, null, 1],
+ [1, "{HH}:{mm}:{ss}", "\n{YYYY}-{MM}-{DD}", null, "\n{MM}-{DD} {HH}:{mm}", null, null, null, 1],
+ [0.001, ":{ss}.{fff}", "\n{YYYY}-{MM}-{DD} {HH}:{mm}", null, "\n{MM}-{DD} {HH}:{mm}", null, "\n{HH}:{mm}", null, 1],
+];
+
export const getAxes = (series: Series[], unit?: string): Axis[] => Array.from(new Set(series.map(s => s.scale))).map(a => {
const axis = {
scale: a,
@@ -13,7 +25,7 @@ export const getAxes = (series: Series[], unit?: string): Axis[] => Array.from(n
font: "10px Arial",
values: (u: uPlot, ticks: number[]) => formatTicks(u, ticks, unit)
};
- if (!a) return { space: 80 };
+ if (!a) return { space: 80, values: timeValues };
if (!(Number(a) % 2)) return { ...axis, side: 1 };
return axis;
});
diff --git a/dashboards/victoriametrics-cluster.json b/dashboards/victoriametrics-cluster.json
index 75be1b925e..98d9f20acb 100644
--- a/dashboards/victoriametrics-cluster.json
+++ b/dashboards/victoriametrics-cluster.json
@@ -179,7 +179,7 @@
"uid": "$ds"
},
"exemplar": true,
- "expr": "sum(vm_rows{job=~\"$job_storage\", type!=\"indexdb\"})",
+ "expr": "sum(vm_rows{job=~\"$job_storage\", type!~\"indexdb.*\"})",
"format": "time_series",
"instant": true,
"interval": "",
@@ -599,7 +599,7 @@
"uid": "$ds"
},
"exemplar": true,
- "expr": "sum(vm_data_size_bytes{job=~\"$job_storage\", type!=\"indexdb\"}) / sum(vm_rows{job=~\"$job_storage\", type!=\"indexdb\"})",
+ "expr": "sum(vm_data_size_bytes{job=~\"$job_storage\", type!~\"indexdb.*\"}) / sum(vm_rows{job=~\"$job_storage\", type!~\"indexdb.*\"})",
"format": "time_series",
"instant": true,
"interval": "",
@@ -1612,8 +1612,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1629,7 +1628,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 14
+ "y": 30
},
"id": 66,
"links": [],
@@ -1724,8 +1723,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1741,7 +1739,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 14
+ "y": 30
},
"id": 138,
"links": [],
@@ -1835,8 +1833,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1852,7 +1849,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 22
+ "y": 38
},
"id": 64,
"links": [],
@@ -1942,8 +1939,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -1972,7 +1968,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 22
+ "y": 38
},
"id": 122,
"links": [],
@@ -2080,8 +2076,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2113,7 +2108,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 30
+ "y": 46
},
"id": 117,
"links": [],
@@ -2201,8 +2196,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2218,7 +2212,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 30
+ "y": 46
},
"id": 119,
"options": {
@@ -2306,8 +2300,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2323,7 +2316,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 38
+ "y": 54
},
"id": 68,
"links": [],
@@ -2411,8 +2404,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2428,7 +2420,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 38
+ "y": 54
},
"id": 120,
"options": {
@@ -2516,8 +2508,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -2533,7 +2524,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 46
+ "y": 62
},
"id": 70,
"links": [],
@@ -2675,7 +2666,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 15
+ "y": 31
},
"id": 102,
"options": {
@@ -2789,7 +2780,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 15
+ "y": 31
},
"id": 108,
"options": {
@@ -2890,7 +2881,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 23
+ "y": 39
},
"id": 142,
"links": [
@@ -3001,7 +2992,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 23
+ "y": 39
},
"id": 107,
"options": {
@@ -3100,7 +3091,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 31
+ "y": 47
},
"id": 170,
"links": [],
@@ -3206,7 +3197,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 31
+ "y": 47
},
"id": 116,
"links": [],
@@ -3308,7 +3299,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 39
+ "y": 55
},
"id": 144,
"options": {
@@ -3411,7 +3402,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 39
+ "y": 55
},
"id": 58,
"links": [],
@@ -3515,7 +3506,7 @@
"h": 7,
"w": 24,
"x": 0,
- "y": 48
+ "y": 64
},
"id": 183,
"options": {
@@ -3663,7 +3654,7 @@
"h": 9,
"w": 12,
"x": 0,
- "y": 5
+ "y": 21
},
"id": 76,
"links": [],
@@ -3779,7 +3770,7 @@
"h": 9,
"w": 12,
"x": 12,
- "y": 5
+ "y": 21
},
"id": 86,
"links": [],
@@ -3904,7 +3895,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 14
+ "y": 30
},
"id": 80,
"links": [],
@@ -4009,7 +4000,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 14
+ "y": 30
},
"id": 78,
"links": [],
@@ -4125,7 +4116,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 22
+ "y": 38
},
"id": 82,
"options": {
@@ -4232,7 +4223,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 22
+ "y": 38
},
"id": 74,
"options": {
@@ -4334,7 +4325,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4445,7 +4437,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4491,7 +4484,7 @@
"uid": "$ds"
},
"editorMode": "code",
- "expr": "min(vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"})\n )\n))",
+ "expr": "min(vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -4557,7 +4550,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4702,7 +4696,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4839,7 +4834,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -4942,7 +4938,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -5084,7 +5081,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -5187,7 +5185,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -5246,7 +5245,7 @@
"type": "prometheus",
"uid": "$ds"
},
- "description": "Shows amount of on-disk space occupied by data points.",
+ "description": "Shows the percentage of used disk space. It is recommended to have at least 20% of free disk space for the best performance.",
"fieldConfig": {
"defaults": {
"color": {
@@ -5259,7 +5258,7 @@
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -5276,20 +5275,27 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "normal"
+ "mode": "none"
},
"thresholdsStyle": {
- "mode": "off"
+ "mode": "line"
}
},
- "links": [],
+ "links": [
+ {
+ "targetBlank": true,
+ "title": "Drilldown",
+ "url": "/d/oS7Bi_0Wz?viewPanel=200&var-ds=$ds&var-instance=$instance&${__url_time_range}"
+ }
+ ],
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -5297,7 +5303,7 @@
}
]
},
- "unit": "bytes"
+ "unit": "percentunit"
},
"overrides": []
},
@@ -5307,7 +5313,7 @@
"x": 0,
"y": 37
},
- "id": 18,
+ "id": 20,
"links": [],
"options": {
"legend": {
@@ -5324,7 +5330,7 @@
},
"tooltip": {
"mode": "multi",
- "sort": "none"
+ "sort": "desc"
}
},
"pluginVersion": "9.1.0",
@@ -5335,15 +5341,43 @@
"uid": "$ds"
},
"editorMode": "code",
- "expr": "sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"}) ",
+ "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) /\n (\n sum(vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) +\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n ) \n)",
"format": "time_series",
"intervalFactor": 1,
- "legendFormat": "disk usage",
+ "legendFormat": "max",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "editorMode": "code",
+ "expr": "min(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) /\n (\n sum(vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) +\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n ) \n)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "legendFormat": "min",
+ "range": true,
+ "refId": "B"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "editorMode": "code",
+ "expr": "avg(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) /\n (\n sum(vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) +\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n ) \n)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "legendFormat": "avg",
+ "range": true,
+ "refId": "C"
}
],
- "title": "Disk space usage (datapoints) ($instance)",
+ "title": "Disk space usage % ($instance)",
"type": "timeseries"
},
{
@@ -5394,7 +5428,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -5457,7 +5492,7 @@
"type": "prometheus",
"uid": "$ds"
},
- "description": "Shows amount of on-disk space occupied by inverted index.",
+ "description": "Shows the percentage of used disk space by type: datapoints or indexdb. Normally, indexdb takes much less space comparing to datapoints. But with high churn rate the size of the indexdb could grow significantly.\n\nThe sum of the % can be > 100% since panel shows max % per-job and per-instance. It means different instance can have different ratio between datapoints and indexdb size.",
"fieldConfig": {
"defaults": {
"color": {
@@ -5470,7 +5505,7 @@
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
- "fillOpacity": 10,
+ "fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
@@ -5487,28 +5522,31 @@
"spanNulls": false,
"stacking": {
"group": "A",
- "mode": "normal"
+ "mode": "none"
},
"thresholdsStyle": {
- "mode": "off"
+ "mode": "line"
}
},
- "links": [],
+ "links": [
+ {
+ "targetBlank": true,
+ "title": "Drilldown",
+ "url": "/d/oS7Bi_0Wz?viewPanel=201&var-ds=$ds&var-instance=$instance&${__url_time_range}"
+ }
+ ],
"mappings": [],
"min": 0,
"thresholds": {
"mode": "absolute",
"steps": [
{
- "color": "green"
- },
- {
- "color": "red",
- "value": 80
+ "color": "green",
+ "value": null
}
]
},
- "unit": "bytes"
+ "unit": "percentunit"
},
"overrides": []
},
@@ -5518,7 +5556,7 @@
"x": 0,
"y": 45
},
- "id": 20,
+ "id": 202,
"links": [],
"options": {
"legend": {
@@ -5535,7 +5573,7 @@
},
"tooltip": {
"mode": "multi",
- "sort": "none"
+ "sort": "desc"
}
},
"pluginVersion": "9.1.0",
@@ -5546,15 +5584,29 @@
"uid": "$ds"
},
"editorMode": "code",
- "expr": "sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type=\"indexdb\"})",
+ "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=~\"indexdb.*\"}) by(job, instance)\n / \n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n)",
"format": "time_series",
"intervalFactor": 1,
- "legendFormat": "disk usage",
+ "legendFormat": "indexdb",
"range": true,
"refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "editorMode": "code",
+ "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) by(job, instance)\n / \n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "legendFormat": "datapoints",
+ "range": true,
+ "refId": "B"
}
],
- "title": "Disk space usage (index) ($instance)",
+ "title": "Disk space usage % by type ($instance)",
"type": "timeseries"
},
{
@@ -5605,7 +5657,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -5740,7 +5793,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -5755,7 +5809,7 @@
"gridPos": {
"h": 8,
"w": 12,
- "x": 12,
+ "x": 0,
"y": 53
},
"id": 135,
@@ -5862,8 +5916,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -5879,7 +5932,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 82
+ "y": 98
},
"id": 92,
"links": [],
@@ -5969,8 +6022,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6006,7 +6058,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 82
+ "y": 98
},
"id": 95,
"links": [],
@@ -6112,8 +6164,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6129,7 +6180,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 90
+ "y": 106
},
"id": 163,
"links": [],
@@ -6257,8 +6308,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6274,7 +6324,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 90
+ "y": 106
},
"id": 165,
"links": [],
@@ -6398,8 +6448,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6415,7 +6464,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 98
+ "y": 114
},
"id": 178,
"links": [],
@@ -6506,8 +6555,7 @@
"mode": "absolute",
"steps": [
{
- "color": "green",
- "value": null
+ "color": "green"
},
{
"color": "red",
@@ -6523,7 +6571,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 98
+ "y": 114
},
"id": 180,
"links": [],
@@ -6630,7 +6678,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 106
+ "y": 122
},
"id": 179,
"links": [],
@@ -6737,7 +6785,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 106
+ "y": 122
},
"id": 181,
"links": [],
@@ -6855,7 +6903,7 @@
"h": 8,
"w": 24,
"x": 0,
- "y": 114
+ "y": 130
},
"id": 93,
"links": [],
@@ -6991,7 +7039,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 8
+ "y": 24
},
"id": 97,
"links": [],
@@ -7117,7 +7165,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 8
+ "y": 24
},
"id": 99,
"links": [],
@@ -7241,7 +7289,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 16
+ "y": 32
},
"id": 185,
"links": [],
@@ -7385,7 +7433,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 16
+ "y": 32
},
"id": 187,
"links": [],
@@ -7523,7 +7571,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 24
+ "y": 40
},
"id": 90,
"links": [],
@@ -7631,7 +7679,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 24
+ "y": 40
},
"id": 88,
"links": [],
@@ -7738,7 +7786,7 @@
"h": 8,
"w": 12,
"x": 0,
- "y": 32
+ "y": 48
},
"id": 139,
"links": [],
@@ -7845,7 +7893,7 @@
"h": 8,
"w": 12,
"x": 12,
- "y": 32
+ "y": 48
},
"id": 114,
"links": [],
@@ -7911,10 +7959,15 @@
},
"id": 198,
"options": {
+ "code": {
+ "language": "plaintext",
+ "showLineNumbers": false,
+ "showMiniMap": false
+ },
"content": "Drilldown row is used by other panels on the dashboard to show more detailed metrics per-instance.",
"mode": "markdown"
},
- "pluginVersion": "9.1.0",
+ "pluginVersion": "9.2.6",
"transparent": true,
"type": "text"
},
@@ -7966,7 +8019,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -8067,7 +8121,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -8168,7 +8223,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
}
]
},
@@ -8271,7 +8327,8 @@
"mode": "absolute",
"steps": [
{
- "color": "green"
+ "color": "green",
+ "value": null
},
{
"color": "red",
@@ -8317,7 +8374,7 @@
"uid": "$ds"
},
"editorMode": "code",
- "expr": "vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"})\n )\n)",
+ "expr": "vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n)",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -8328,6 +8385,224 @@
],
"title": "Storage full ETA ($instance)",
"type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "description": "Shows the percentage of used disk space. It is recommended to have at least 20% of free disk space for the best performance.",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "line"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 0,
+ "y": 26
+ },
+ "id": 200,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "9.1.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "editorMode": "code",
+ "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "",
+ "range": true,
+ "refId": "A"
+ }
+ ],
+ "title": "Disk space usage ($instance)",
+ "type": "timeseries"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "description": "",
+ "fieldConfig": {
+ "defaults": {
+ "color": {
+ "mode": "palette-classic"
+ },
+ "custom": {
+ "axisCenteredZero": false,
+ "axisColorMode": "text",
+ "axisLabel": "",
+ "axisPlacement": "auto",
+ "barAlignment": 0,
+ "drawStyle": "line",
+ "fillOpacity": 0,
+ "gradientMode": "none",
+ "hideFrom": {
+ "legend": false,
+ "tooltip": false,
+ "viz": false
+ },
+ "lineInterpolation": "linear",
+ "lineWidth": 1,
+ "pointSize": 5,
+ "scaleDistribution": {
+ "type": "linear"
+ },
+ "showPoints": "never",
+ "spanNulls": false,
+ "stacking": {
+ "group": "A",
+ "mode": "none"
+ },
+ "thresholdsStyle": {
+ "mode": "line"
+ }
+ },
+ "links": [],
+ "mappings": [],
+ "min": 0,
+ "thresholds": {
+ "mode": "absolute",
+ "steps": [
+ {
+ "color": "green",
+ "value": null
+ }
+ ]
+ },
+ "unit": "decbytes"
+ },
+ "overrides": []
+ },
+ "gridPos": {
+ "h": 8,
+ "w": 12,
+ "x": 12,
+ "y": 26
+ },
+ "id": 201,
+ "links": [],
+ "options": {
+ "legend": {
+ "calcs": [
+ "mean",
+ "lastNotNull",
+ "max"
+ ],
+ "displayMode": "table",
+ "placement": "bottom",
+ "showLegend": true,
+ "sortBy": "Last *",
+ "sortDesc": true
+ },
+ "tooltip": {
+ "mode": "multi",
+ "sort": "desc"
+ }
+ },
+ "pluginVersion": "9.1.0",
+ "targets": [
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "editorMode": "code",
+ "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=~\"indexdb.*\"}) by(job, instance)",
+ "format": "time_series",
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}:{{instance}} (indexdb)",
+ "range": true,
+ "refId": "A"
+ },
+ {
+ "datasource": {
+ "type": "prometheus",
+ "uid": "$ds"
+ },
+ "editorMode": "code",
+ "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) by(job, instance)",
+ "format": "time_series",
+ "hide": false,
+ "intervalFactor": 1,
+ "legendFormat": "{{job}}:{{instance}} (datapoints)",
+ "range": true,
+ "refId": "B"
+ }
+ ],
+ "title": "Disk space usage by type ($instance)",
+ "type": "timeseries"
}
],
"title": "Drilldown",
@@ -8516,4 +8791,4 @@
"uid": "oS7Bi_0Wz",
"version": 1,
"weekStart": ""
-}
\ No newline at end of file
+}
diff --git a/dashboards/victoriametrics.json b/dashboards/victoriametrics.json
index f5bce5bd0a..9975145de5 100644
--- a/dashboards/victoriametrics.json
+++ b/dashboards/victoriametrics.json
@@ -225,7 +225,7 @@
"uid": "$ds"
},
"exemplar": false,
- "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"})",
+ "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})",
"format": "time_series",
"instant": true,
"interval": "",
@@ -3767,7 +3767,7 @@
"uid": "$ds"
},
"editorMode": "code",
- "expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"} \n/ ignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job\", instance=~\"$instance\"}[1d]) \n - ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) \n / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"})\n )\n )",
+ "expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"} \n/ ignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job\", instance=~\"$instance\"}[1d]) \n - ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) \n / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n )",
"format": "time_series",
"hide": false,
"interval": "",
@@ -3874,7 +3874,7 @@
"uid": "$ds"
},
"editorMode": "code",
- "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"})",
+ "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -3900,7 +3900,7 @@
"uid": "$ds"
},
"editorMode": "code",
- "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=\"indexdb\"})",
+ "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=~\"indexdb.*\"})",
"format": "time_series",
"hide": false,
"interval": "",
@@ -4156,7 +4156,7 @@
"type": "prometheus",
"uid": "$ds"
},
- "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type != \"indexdb\"})",
+ "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -5306,4 +5306,4 @@
"uid": "wNf0q_kZk",
"version": 1,
"weekStart": ""
-}
\ No newline at end of file
+}
diff --git a/deployment/docker/README.md b/deployment/docker/README.md
index e1fa97d18c..342f592e98 100644
--- a/deployment/docker/README.md
+++ b/deployment/docker/README.md
@@ -5,14 +5,14 @@ Docker compose environment for VictoriaMetrics includes VictoriaMetrics componen
and [Grafana](https://grafana.com/).
For starting the docker-compose environment ensure you have docker installed and running and access to the Internet.
-All commands should be executed from the root directory of this repo.
+**All commands should be executed from the root directory of [the repo](https://github.com/VictoriaMetrics/VictoriaMetrics).**
-To spin-up environment for single server VictoriaMetrics run the following command :
+To spin-up environment for single server VictoriaMetrics run the following command:
```
make docker-single-up
```
-To shutdown the docker compose environment for single server run the following command:
+To shut down the docker-compose environment for single server run the following command:
```
make docker-single-down
```
@@ -22,7 +22,7 @@ For cluster version the command will be the following:
make docker-cluster-up
```
-To shutdown the docker compose environment for cluster version run the following command:
+To shut down the docker compose environment for cluster version run the following command:
```
make docker-cluster-down
```
@@ -36,51 +36,49 @@ VictoriaMetrics will be accessible on the following ports:
* `--httpListenAddr=:8428`
The communication scheme between components is the following:
-* [vmagent](#vmagent) sends scraped metrics to VictoriaMetrics;
-* [grafana](#grafana) is configured with datasource pointing to VictoriaMetrics;
-* [vmalert](#vmalert) is configured to query VictoriaMetrics and send alerts state
+* [vmagent](#vmagent) sends scraped metrics to `single server VictoriaMetrics`;
+* [grafana](#grafana) is configured with datasource pointing to `single server VictoriaMetrics`;
+* [vmalert](#vmalert) is configured to query `single server VictoriaMetrics` and send alerts state
and recording rules back to it;
-* [alertmanager](#alertmanager) is configured to receive notifications from vmalert.
+* [alertmanager](#alertmanager) is configured to receive notifications from `vmalert`.
-To access `vmalert` via `vmselect`
-use link [http://localhost:8428/vmalert](http://localhost:8428/vmalert/).
+To access `vmalert` use link [http://localhost:8428/vmalert](http://localhost:8428/vmalert/).
To access [vmui](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#vmui)
use link [http://localhost:8428/vmui](http://localhost:8428/vmui).
## VictoriaMetrics cluster
-VictoriaMetrics cluster environemnt consists of vminsert, vmstorage and vmselect components. vmselect
-has exposed port `:8481`, vminsert has exposed port `:8480` and the rest of components are available
-only inside of environment.
+VictoriaMetrics cluster environment consists of `vminsert`, `vmstorage` and `vmselect` components.
+`vmselect` has exposed port `:8481`, `vminsert` has exposed port `:8480` and the rest of components
+are available only inside the environment.
The communication scheme between components is the following:
-* [vmagent](#vmagent) sends scraped metrics to vminsert;
-* vminsert forwards data to vmstorage;
-* vmselect is connected to vmstorage for querying data;
-* [grafana](#grafana) is configured with datasource pointing to vmselect;
-* [vmalert](#vmalert) is configured to query vmselect and send alerts state
- and recording rules to vminsert;
-* [alertmanager](#alertmanager) is configured to receive notifications from vmalert.
+* [vmagent](#vmagent) sends scraped metrics to `vminsert`;
+* `vminsert` forwards data to `vmstorage`;
+* `vmselect` is connected to `vmstorage` for querying data;
+* [grafana](#grafana) is configured with datasource pointing to `vmselect`;
+* [vmalert](#vmalert) is configured to query `vmselect` and send alerts state
+ and recording rules to `vminsert`;
+* [alertmanager](#alertmanager) is configured to receive notifications from `vmalert`.
-To access `vmalert` via `vmselect`
-use link [http://localhost:8481/select/0/prometheus/vmalert](http://localhost:8481/select/0/prometheus/vmalert/).
+To access `vmalert` use link [http://localhost:8481/select/0/prometheus/vmalert](http://localhost:8481/select/0/prometheus/vmalert/).
To access [vmui](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#vmui)
use link [http://localhost:8481/select/0/prometheus/vmui](http://localhost:8481/select/0/prometheus/vmui).
## vmagent
-vmagent is used for scraping and pushing timeseries to
-VictoriaMetrics instance. It accepts Prometheus-compatible
-configuration `prometheus.yml` with listed targets for scraping.
+vmagent is used for scraping and pushing time series to VictoriaMetrics instance.
+It accepts Prometheus-compatible configuration [prometheus.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/prometheus.yml)
+with listed targets for scraping.
[Web interface link](http://localhost:8429/).
## vmalert
-vmalert evaluates alerting rules (`alerts.yml`) to track VictoriaMetrics
-health state. It is connected with AlertManager for firing alerts,
+vmalert evaluates alerting rules [alerts.yml(https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml)
+to track VictoriaMetrics health state. It is connected with AlertManager for firing alerts,
and with VictoriaMetrics for executing queries and storing alert's state.
[Web interface link](http://localhost:8880/).
diff --git a/deployment/docker/alerts-cluster.yml b/deployment/docker/alerts-cluster.yml
index 1a99a08fb4..3e68bd6e36 100644
--- a/deployment/docker/alerts-cluster.yml
+++ b/deployment/docker/alerts-cluster.yml
@@ -18,8 +18,8 @@ groups:
ignoring(type) rate(vm_deduplicated_samples_total{type="merge"}[1d])
)
* scalar(
- sum(vm_data_size_bytes{type!="indexdb"}) /
- sum(vm_rows{type!="indexdb"})
+ sum(vm_data_size_bytes{type!~"indexdb.*"}) /
+ sum(vm_rows{type!~"indexdb.*"})
)
) < 3 * 24 * 3600 > 0
for: 30m
@@ -43,7 +43,7 @@ groups:
labels:
severity: critical
annotations:
- dashboard: http://localhost:3000/d/oS7Bi_0Wz?viewPanel=110&var-instance={{ $labels.instance }}"
+ dashboard: http://localhost:3000/d/oS7Bi_0Wz?viewPanel=200&var-instance={{ $labels.instance }}"
summary: "Instance {{ $labels.instance }} will run out of disk space soon"
description: "Disk utilisation on instance {{ $labels.instance }} is more than 80%.\n
Having less than 20% of free disk space could cripple merges processes and overall performance.
diff --git a/deployment/docker/alerts.yml b/deployment/docker/alerts.yml
index 5d478f0c76..efa3c5f7e7 100644
--- a/deployment/docker/alerts.yml
+++ b/deployment/docker/alerts.yml
@@ -18,8 +18,8 @@ groups:
ignoring(type) rate(vm_deduplicated_samples_total{type="merge"}[1d])
)
* scalar(
- sum(vm_data_size_bytes{type!="indexdb"}) /
- sum(vm_rows{type!="indexdb"})
+ sum(vm_data_size_bytes{type!~"indexdb.*"}) /
+ sum(vm_rows{type!~"indexdb.*"})
)
) < 3 * 24 * 3600 > 0
for: 30m
diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md
index eb4d4d9434..d73dc154a3 100644
--- a/docs/CHANGELOG.md
+++ b/docs/CHANGELOG.md
@@ -15,14 +15,51 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip
+**Update note 1:** this release drops support for direct upgrade from VictoriaMetrics versions prior [v1.28.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.28.0). Please upgrade to `v1.84.0`, wait until `finished round 2 of background conversion` line is emitted to log by single-node VictoriaMetrics or by `vmstorage`, and then upgrade to newer releases.
+
+**Update note 2:** this release splits `type="indexdb"` metrics into `type="indexdb/inmemory"` and `type="indexdb/file"` metrics. This may break old dashboards and alerting rules, which contain [label filter](https://docs.victoriametrics.com/keyConcepts.html#filtering) on `{type="indexdb"}`. Such label filter must be substituted with `{type=~"indexdb.*"}`, so it matches `indexdb` from the previous releases and `indexdb/inmemory` + `indexdb/file` from new releases. It is recommended upgrading to the latest available dashboards and alerting rules mentioned in [these docs](https://docs.victoriametrics.com/#monitoring), since they already contain fixed label filters.
+
+* FEATURE: add `-inmemoryDataFlushInterval` command-line flag, which can be used for controlling the frequency of in-memory data flush to disk. The data flush frequency can be reduced when VictoriaMetrics stores data to low-end flash device with limited number of write cycles (for example, on Raspberry PI). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3337).
+* FEATURE: expose additional metrics for `indexdb` and `storage` parts stored in memory and for `indexdb` parts stored in files (see [storage docs](https://docs.victoriametrics.com/#storage) for technical details):
+ * `vm_active_merges{type="storage/inmemory"}` - active merges for in-memory `storage` parts
+ * `vm_active_merges{type="indexdb/inmemory"}` - active merges for in-memory `indexdb` parts
+ * `vm_active_merges{type="indexdb/file"}` - active merges for file-based `indexdb` parts
+ * `vm_merges_total{type="storage/inmemory"}` - the total merges for in-memory `storage` parts
+ * `vm_merges_total{type="indexdb/inmemory"}` - the total merges for in-memory `indexdb` parts
+ * `vm_merges_total{type="indexdb/file"}` - the total merges for file-based `indexdb` parts
+ * `vm_rows_merged_total{type="storage/inmemory"}` - the total rows merged for in-memory `storage` parts
+ * `vm_rows_merged_total{type="indexdb/inmemory"}` - the total rows merged for in-memory `indexdb` parts
+ * `vm_rows_merged_total{type="indexdb/file"}` - the total rows merged for file-based `indexdb` parts
+ * `vm_rows_deleted_total{type="storage/inmemory"}` - the total rows deleted for in-memory `storage` parts
+ * `vm_assisted_merges_total{type="storage/inmemory"}` - the total number of assisted merges for in-memory `storage` parts
+ * `vm_assisted_merges_total{type="indexdb/inmemory"}` - the total number of assisted merges for in-memory `indexdb` parts
+ * `vm_parts{type="storage/inmemory"}` - the total number of in-memory `storage` parts
+ * `vm_parts{type="indexdb/inmemory"}` - the total number of in-memory `indexdb` parts
+ * `vm_parts{type="indexdb/file"}` - the total number of file-based `indexdb` parts
+ * `vm_blocks{type="storage/inmemory"}` - the total number of in-memory `storage` blocks
+ * `vm_blocks{type="indexdb/inmemory"}` - the total number of in-memory `indexdb` blocks
+ * `vm_blocks{type="indexdb/file"}` - the total number of file-based `indexdb` blocks
+ * `vm_data_size_bytes{type="storage/inmemory"}` - the total size of in-memory `storage` blocks
+ * `vm_data_size_bytes{type="indexdb/inmemory"}` - the total size of in-memory `indexdb` blocks
+ * `vm_data_size_bytes{type="indexdb/file"}` - the total size of file-based `indexdb` blocks
+ * `vm_rows{type="storage/inmemory"}` - the total number of in-memory `storage` rows
+ * `vm_rows{type="indexdb/inmemory"}` - the total number of in-memory `indexdb` rows
+ * `vm_rows{type="indexdb/file"}` - the total number of file-based `indexdb` rows
+* FEATURE: [DataDog parser](https://docs.victoriametrics.com/#how-to-send-data-from-datadog-agent): add `device` tag when it is passed in the `device` field is present in the `series` object of the input request. Thanks to @PerGon for the provided [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3431).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): improve [service discovery](https://docs.victoriametrics.com/sd_configs.html) performance when discovering big number of targets (10K and more).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `exported_` prefix to metric names exported by scrape targets if these metric names clash with [automatically generated metrics](https://docs.victoriametrics.com/vmagent.html#automatically-generated-metrics) such as `up`, `scrape_samples_scraped`, etc. This prevents from corruption of automatically generated metrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3406).
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve error message when the requested path cannot be properly parsed, so users could identify the issue and properly fix the path. Now the error message links to [url format docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3402).
-* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to copy data from sources via Prometheus `remote_read` protocol. See [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-by-remote-read-protocol). The related issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3132) and [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1101).
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `-remoteWrite.sendTimeout` command-line flag, which allows configuring timeout for sending data to `-remoteWrite.url`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3408).
+* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to migrate data between VictoriaMetrics clusters with automatic tenants discovery. See [these docs](https://docs.victoriametrics.com/vmctl.html#cluster-to-cluster-migration-mode) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2930).
+* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to copy data from sources via Prometheus `remote_read` protocol. See [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-by-remote-read-protocol). The related issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3132) and [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1101).
+* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): allow changing timezones for the requested data. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3075).
+* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): provide fast path for hiding results for all the queries except the given one by clicking `eye` icon with `ctrl` key pressed. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3446).
+* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `range_trim_spikes(phi, q)` function for trimming `phi` percent of the largest spikes per each time series returned by `q`. See [these docs](https://docs.victoriametrics.com/MetricsQL.html#range_trim_spikes).
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly pass HTTP headers during the alert state restore procedure. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3418).
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly specify rule evaluation step during the [replay mode](https://docs.victoriametrics.com/vmalert.html#rules-backfilling). The `step` value was previously overriden by `-datasource.queryStep` command-line flag.
+* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly put multi-line queries in the url, so it could be copy-n-pasted and opened without issues in a new browser tab. Previously the url for multi-line query couldn't be opened. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3444).
+* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): correctly handle `up` and `down` keypresses when editing multi-line queries. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3445).
## [v1.84.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.84.0)
diff --git a/docs/MetricsQL.md b/docs/MetricsQL.md
index 67bd9aa610..9b0a8c447f 100644
--- a/docs/MetricsQL.md
+++ b/docs/MetricsQL.md
@@ -1247,6 +1247,11 @@ per each time series returned by `q` on the selected time range.
`range_sum(q)` is a [transform function](#transform-functions), which calculates the sum of points per each time series returned by `q`.
+#### range_trim_spikes
+
+`range_trim_spikes(phi, q)` is a [transform function](#transform-functions), which drops `phi` percent of biggest spikes from time series returned by `q`.
+The `phi` must be in the range `[0..1]`, where `0` means `0%` and `1` means `100%`.
+
#### remove_resets
`remove_resets(q)` is a [transform function](#transform-functions), which removes counter resets from time series returned by `q`.
diff --git a/docs/README.md b/docs/README.md
index dce732e898..c9d5f9ea95 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -276,7 +276,7 @@ It also provides the following features:
- [query tracer](#query-tracing)
- [top queries explorer](#top-queries)
-Graphs in vmui support scrolling and zooming:
+Graphs in `vmui` support scrolling and zooming:
* Select the needed time range on the graph in order to zoom in into the selected time range. Hold `ctrl` (or `cmd` on MacOS) and scroll down in order to zoom out.
* Hold `ctrl` (or `cmd` on MacOS) and scroll up in order to zoom in the area under cursor.
@@ -294,6 +294,8 @@ VMUI allows investigating correlations between multiple queries on the same grap
enter an additional query in the newly appeared input field and press `Enter`.
Results for all the queries are displayed simultaneously on the same graph.
Graphs for a particular query can be temporarily hidden by clicking the `eye` icon on the right side of the input field.
+When the `eye` icon is clicked while holding the `ctrl` key, then query results for the rest of queries become hidden
+except of the current query results.
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
@@ -1364,18 +1366,50 @@ It is recommended passing different `-promscrape.cluster.name` values to HA pair
## Storage
-VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like
-data structures. On insert, VictoriaMetrics accumulates up to 1s of data and dumps it on disk to
-`<-storageDataPath>/data/small/YYYY_MM/` subdirectory forming a `part` with the following
-name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each part consists of two "columns":
-values and timestamps. These are sorted and compressed raw time series values. Additionally, part contains
-index files for searching for specific series in the values and timestamps files.
+VictoriaMetrics buffers the ingested data in memory for up to a second. Then the buffered data is written to in-memory `parts`,
+which can be searched during queries. The in-memory `parts` are periodically persisted to disk, so they could survive unclean shutdown
+such as out of memory crash, hardware power loss or `SIGKILL` signal. The interval for flushing the in-memory data to disk
+can be configured with the `-inmemoryDataFlushInterval` command-line flag (note that too short flush interval may significantly increase disk IO).
-`Parts` are periodically merged into the bigger parts. The resulting `part` is constructed
-under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` subdirectory.
-When the resulting `part` is complete, it is atomically moved from the `tmp`
-to its own subdirectory, while the source parts are atomically removed. The end result is that the source
-parts are substituted by a single resulting bigger `part` in the `<-storageDataPath>/data/{small,big}/YYYY_MM/` directory.
+In-memory parts are persisted to disk into `part` directories under the `<-storageDataPath>/data/small/YYYY_MM/` folder,
+where `YYYY_MM` is the month partition for the stored data. For example, `2022_11` is the partition for `parts`
+with [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) from `November 2022`.
+
+The `part` directory has the following name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`, where:
+
+- `rowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part
+- `blocksCount` - the number of blocks stored in the part (see details about blocks below)
+- `minTimestamp` and `maxTimestamp` - minimum and maximum timestamps across raw samples stored in the part
+
+Each `part` consists of `blocks` sorted by internal time series id (aka `TSID`).
+Each `block` contains up to 8K [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples),
+which belong to a single [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series).
+Raw samples in each block are sorted by `timestamp`. Blocks for the same time series are sorted
+by the `timestamp` of the first sample. Timestamps and values for all the blocks
+are stored in [compressed form](https://faun.pub/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932)
+in separate files under `part` directory - `timestamps.bin` and `values.bin`.
+
+The `part` directory also contains `index.bin` and `metaindex.bin` files - these files contain index
+for fast block lookups, which belong to the given `TSID` and cover the given time range.
+
+`Parts` are periodically merged into bigger parts in background. The background merge provides the following benefits:
+
+* keeping the number of data files under control, so they don't exceed limits on open files
+* improved data compression, since bigger parts are usually compressed better than smaller parts
+* improved query speed, since queries over smaller number of parts are executed faster
+* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling)
+ and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge
+
+Newly added `parts` either successfully appear in the storage or fail to appear.
+The newly added `parts` are being created in a temporary directory under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` folder.
+When the newly added `part` is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html)
+to a temporary directory, then it is atomically moved to the storage directory.
+Thanks to this alogrithm, storage never contains partially created parts, even if hardware power off
+occurrs in the middle of writing the `part` to disk - such incompletely written `parts`
+are automatically deleted on the next VictoriaMetrics start.
+
+The same applies to merge process — `parts` are either fully merged into a new `part` or fail to merge,
+leaving the source `parts` untouched.
VictoriaMetrics doesn't merge parts if their summary size exceeds free disk space.
This prevents from potential out of disk space errors during merge.
@@ -1384,24 +1418,10 @@ This increases overhead during data querying, since VictoriaMetrics needs to rea
bigger number of parts per each request. That's why it is recommended to have at least 20%
of free disk space under directory pointed by `-storageDataPath` command-line flag.
-Information about merging process is available in [single-node VictoriaMetrics](https://grafana.com/dashboards/10229)
-and [clustered VictoriaMetrics](https://grafana.com/grafana/dashboards/11176) Grafana dashboards.
+Information about merging process is available in [the dashboard for single-node VictoriaMetrics](https://grafana.com/dashboards/10229)
+and [the dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176).
See more details in [monitoring docs](#monitoring).
-The `merge` process improves compression rate and keeps number of `parts` on disk relatively low.
-Benefits of doing the merge process are the following:
-
-* it improves query performance, since lower number of `parts` are inspected with each query
-* it reduces the number of data files, since each `part` contains fixed number of files
-* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling)
- and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge.
-
-Newly added `parts` either appear in the storage or fail to appear.
-Storage never contains partially created parts. The same applies to merge process — `parts` are either fully
-merged into a new `part` or fail to merge. MergeTree doesn't contain partially merged `parts`.
-`Part` contents in MergeTree never change. Parts are immutable. They may be only deleted after the merge
-to a bigger `part` or when the `part` contents goes outside the configured `-retentionPeriod`.
-
See [this article](https://valyala.medium.com/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) for more details.
See also [how to work with snapshots](#how-to-work-with-snapshots).
@@ -1724,10 +1744,11 @@ and [cardinality explorer docs](#cardinality-explorer).
* VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage.
This may lead to the following "issues":
- * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to persistent storage
+ * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to searchable parts
by requesting `/internal/force_flush` http handler. This handler is mostly needed for testing and debugging purposes.
* The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset).
- See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704).
+ The `-inmemoryDataFlushInterval` command-line flag allows controlling the frequency of in-memory data flush to persistent storage.
+ See [storage docs](#storage) and [this article](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704) for more details.
* If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second,
then it is likely you have too many [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) for the current amount of RAM.
@@ -2134,6 +2155,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
-influxTrimTimestamp duration
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
+ -inmemoryDataFlushInterval duration
+ The interval for guaranteed saving of in-memory data to disk. The saved data survives unclean shutdown such as OOM crash, hardware reset, SIGKILL, etc. Bigger intervals may help increasing lifetime of flash storage with limited write cycles (e.g. Raspberry PI). Smaller intervals increase disk IO load. Minimum supported value is 1s (default 5s)
-insert.maxQueueDuration duration
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
-logNewSeries
diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md
index 7a9908aabd..aff18a8497 100644
--- a/docs/Single-server-VictoriaMetrics.md
+++ b/docs/Single-server-VictoriaMetrics.md
@@ -279,7 +279,7 @@ It also provides the following features:
- [query tracer](#query-tracing)
- [top queries explorer](#top-queries)
-Graphs in vmui support scrolling and zooming:
+Graphs in `vmui` support scrolling and zooming:
* Select the needed time range on the graph in order to zoom in into the selected time range. Hold `ctrl` (or `cmd` on MacOS) and scroll down in order to zoom out.
* Hold `ctrl` (or `cmd` on MacOS) and scroll up in order to zoom in the area under cursor.
@@ -297,6 +297,8 @@ VMUI allows investigating correlations between multiple queries on the same grap
enter an additional query in the newly appeared input field and press `Enter`.
Results for all the queries are displayed simultaneously on the same graph.
Graphs for a particular query can be temporarily hidden by clicking the `eye` icon on the right side of the input field.
+When the `eye` icon is clicked while holding the `ctrl` key, then query results for the rest of queries become hidden
+except of the current query results.
See the [example VMUI at VictoriaMetrics playground](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/prometheus/graph/?g0.expr=100%20*%20sum(rate(process_cpu_seconds_total))%20by%20(job)&g0.range_input=1d).
@@ -1367,18 +1369,50 @@ It is recommended passing different `-promscrape.cluster.name` values to HA pair
## Storage
-VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like
-data structures. On insert, VictoriaMetrics accumulates up to 1s of data and dumps it on disk to
-`<-storageDataPath>/data/small/YYYY_MM/` subdirectory forming a `part` with the following
-name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each part consists of two "columns":
-values and timestamps. These are sorted and compressed raw time series values. Additionally, part contains
-index files for searching for specific series in the values and timestamps files.
+VictoriaMetrics buffers the ingested data in memory for up to a second. Then the buffered data is written to in-memory `parts`,
+which can be searched during queries. The in-memory `parts` are periodically persisted to disk, so they could survive unclean shutdown
+such as out of memory crash, hardware power loss or `SIGKILL` signal. The interval for flushing the in-memory data to disk
+can be configured with the `-inmemoryDataFlushInterval` command-line flag (note that too short flush interval may significantly increase disk IO).
-`Parts` are periodically merged into the bigger parts. The resulting `part` is constructed
-under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` subdirectory.
-When the resulting `part` is complete, it is atomically moved from the `tmp`
-to its own subdirectory, while the source parts are atomically removed. The end result is that the source
-parts are substituted by a single resulting bigger `part` in the `<-storageDataPath>/data/{small,big}/YYYY_MM/` directory.
+In-memory parts are persisted to disk into `part` directories under the `<-storageDataPath>/data/small/YYYY_MM/` folder,
+where `YYYY_MM` is the month partition for the stored data. For example, `2022_11` is the partition for `parts`
+with [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) from `November 2022`.
+
+The `part` directory has the following name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`, where:
+
+- `rowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part
+- `blocksCount` - the number of blocks stored in the part (see details about blocks below)
+- `minTimestamp` and `maxTimestamp` - minimum and maximum timestamps across raw samples stored in the part
+
+Each `part` consists of `blocks` sorted by internal time series id (aka `TSID`).
+Each `block` contains up to 8K [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples),
+which belong to a single [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series).
+Raw samples in each block are sorted by `timestamp`. Blocks for the same time series are sorted
+by the `timestamp` of the first sample. Timestamps and values for all the blocks
+are stored in [compressed form](https://faun.pub/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932)
+in separate files under `part` directory - `timestamps.bin` and `values.bin`.
+
+The `part` directory also contains `index.bin` and `metaindex.bin` files - these files contain index
+for fast block lookups, which belong to the given `TSID` and cover the given time range.
+
+`Parts` are periodically merged into bigger parts in background. The background merge provides the following benefits:
+
+* keeping the number of data files under control, so they don't exceed limits on open files
+* improved data compression, since bigger parts are usually compressed better than smaller parts
+* improved query speed, since queries over smaller number of parts are executed faster
+* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling)
+ and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge
+
+Newly added `parts` either successfully appear in the storage or fail to appear.
+The newly added `parts` are being created in a temporary directory under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` folder.
+When the newly added `part` is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html)
+to a temporary directory, then it is atomically moved to the storage directory.
+Thanks to this alogrithm, storage never contains partially created parts, even if hardware power off
+occurrs in the middle of writing the `part` to disk - such incompletely written `parts`
+are automatically deleted on the next VictoriaMetrics start.
+
+The same applies to merge process — `parts` are either fully merged into a new `part` or fail to merge,
+leaving the source `parts` untouched.
VictoriaMetrics doesn't merge parts if their summary size exceeds free disk space.
This prevents from potential out of disk space errors during merge.
@@ -1387,24 +1421,10 @@ This increases overhead during data querying, since VictoriaMetrics needs to rea
bigger number of parts per each request. That's why it is recommended to have at least 20%
of free disk space under directory pointed by `-storageDataPath` command-line flag.
-Information about merging process is available in [single-node VictoriaMetrics](https://grafana.com/dashboards/10229)
-and [clustered VictoriaMetrics](https://grafana.com/grafana/dashboards/11176) Grafana dashboards.
+Information about merging process is available in [the dashboard for single-node VictoriaMetrics](https://grafana.com/dashboards/10229)
+and [the dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176).
See more details in [monitoring docs](#monitoring).
-The `merge` process improves compression rate and keeps number of `parts` on disk relatively low.
-Benefits of doing the merge process are the following:
-
-* it improves query performance, since lower number of `parts` are inspected with each query
-* it reduces the number of data files, since each `part` contains fixed number of files
-* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling)
- and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge.
-
-Newly added `parts` either appear in the storage or fail to appear.
-Storage never contains partially created parts. The same applies to merge process — `parts` are either fully
-merged into a new `part` or fail to merge. MergeTree doesn't contain partially merged `parts`.
-`Part` contents in MergeTree never change. Parts are immutable. They may be only deleted after the merge
-to a bigger `part` or when the `part` contents goes outside the configured `-retentionPeriod`.
-
See [this article](https://valyala.medium.com/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) for more details.
See also [how to work with snapshots](#how-to-work-with-snapshots).
@@ -1727,10 +1747,11 @@ and [cardinality explorer docs](#cardinality-explorer).
* VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage.
This may lead to the following "issues":
- * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to persistent storage
+ * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to searchable parts
by requesting `/internal/force_flush` http handler. This handler is mostly needed for testing and debugging purposes.
* The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset).
- See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704).
+ The `-inmemoryDataFlushInterval` command-line flag allows controlling the frequency of in-memory data flush to persistent storage.
+ See [storage docs](#storage) and [this article](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704) for more details.
* If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second,
then it is likely you have too many [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) for the current amount of RAM.
@@ -2137,6 +2158,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field
-influxTrimTimestamp duration
Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms)
+ -inmemoryDataFlushInterval duration
+ The interval for guaranteed saving of in-memory data to disk. The saved data survives unclean shutdown such as OOM crash, hardware reset, SIGKILL, etc. Bigger intervals may help increasing lifetime of flash storage with limited write cycles (e.g. Raspberry PI). Smaller intervals increase disk IO load. Minimum supported value is 1s (default 5s)
-insert.maxQueueDuration duration
The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s)
-logNewSeries
diff --git a/docs/vmctl.md b/docs/vmctl.md
index 6f77c01f34..dbbaf6e2cd 100644
--- a/docs/vmctl.md
+++ b/docs/vmctl.md
@@ -837,6 +837,80 @@ Total: 16 B ↗ Speed: 186.32 KiB p/s
2022/08/30 19:48:24 Total time: 12.680582ms
```
+#### Cluster-to-cluster migration mode
+
+Using cluster-to-cluster migration mode helps to migrate all tenants data in a single `vmctl` run.
+
+Cluster-to-cluster uses `/admin/tenants` endpoint (available starting from [v1.84.0](https://docs.victoriametrics.com/CHANGELOG.html#v1840)) to discover list of tenants from source cluster.
+
+To use this mode you need to set `--vm-intercluster` flag to `true`, `--vm-native-src-addr` flag to 'http://vmselect:8481/' and `--vm-native-dst-addr` value to http://vminsert:8480/:
+
+```console
+./bin/vmctl vm-native --vm-intercluster=true --vm-native-src-addr=http://localhost:8481/ --vm-native-dst-addr=http://172.17.0.3:8480/
+VictoriaMetrics Native import mode
+2022/12/05 21:20:06 Discovered tenants: [123:1 12812919:1 1289198:1 1289:1283 12:1 1:0 1:1 1:1231231 1:1271727 1:12819 1:281 812891298:1]
+2022/12/05 21:20:06 Initing export pipe from "http://localhost:8481/select/123:1/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/123:1/prometheus/api/v1/import/native":
+Total: 61.13 MiB ↖ Speed: 2.05 MiB p/s
+Total: 61.13 MiB ↗ Speed: 2.30 MiB p/s
+2022/12/05 21:20:33 Initing export pipe from "http://localhost:8481/select/12812919:1/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/12812919:1/prometheus/api/v1/import/native":
+Total: 43.14 MiB ↘ Speed: 1.86 MiB p/s
+Total: 43.14 MiB ↙ Speed: 2.36 MiB p/s
+2022/12/05 21:20:51 Initing export pipe from "http://localhost:8481/select/1289198:1/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/1289198:1/prometheus/api/v1/import/native":
+Total: 16.64 MiB ↗ Speed: 2.66 MiB p/s
+Total: 16.64 MiB ↘ Speed: 2.19 MiB p/s
+2022/12/05 21:20:59 Initing export pipe from "http://localhost:8481/select/1289:1283/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/1289:1283/prometheus/api/v1/import/native":
+Total: 43.33 MiB ↙ Speed: 1.94 MiB p/s
+Total: 43.33 MiB ↖ Speed: 2.35 MiB p/s
+2022/12/05 21:21:18 Initing export pipe from "http://localhost:8481/select/12:1/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/12:1/prometheus/api/v1/import/native":
+Total: 63.78 MiB ↙ Speed: 1.96 MiB p/s
+Total: 63.78 MiB ↖ Speed: 2.28 MiB p/s
+2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:0/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/1:0/prometheus/api/v1/import/native":
+2022/12/05 21:21:46 Import finished!
+Total: 330 B ↗ Speed: 3.53 MiB p/s
+2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:1/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/1:1/prometheus/api/v1/import/native":
+Total: 63.81 MiB ↙ Speed: 1.96 MiB p/s
+Total: 63.81 MiB ↖ Speed: 2.28 MiB p/s
+2022/12/05 21:22:14 Initing export pipe from "http://localhost:8481/select/1:1231231/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/1:1231231/prometheus/api/v1/import/native":
+Total: 63.84 MiB ↙ Speed: 1.93 MiB p/s
+Total: 63.84 MiB ↖ Speed: 2.29 MiB p/s
+2022/12/05 21:22:42 Initing export pipe from "http://localhost:8481/select/1:1271727/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/1:1271727/prometheus/api/v1/import/native":
+Total: 54.37 MiB ↘ Speed: 1.90 MiB p/s
+Total: 54.37 MiB ↙ Speed: 2.37 MiB p/s
+2022/12/05 21:23:05 Initing export pipe from "http://localhost:8481/select/1:12819/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/1:12819/prometheus/api/v1/import/native":
+Total: 17.01 MiB ↙ Speed: 1.75 MiB p/s
+Total: 17.01 MiB ↖ Speed: 2.15 MiB p/s
+2022/12/05 21:23:13 Initing export pipe from "http://localhost:8481/select/1:281/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/1:281/prometheus/api/v1/import/native":
+Total: 63.89 MiB ↘ Speed: 1.90 MiB p/s
+Total: 63.89 MiB ↙ Speed: 2.29 MiB p/s
+2022/12/05 21:23:42 Initing export pipe from "http://localhost:8481/select/812891298:1/prometheus/api/v1/export/native" with filters:
+ filter: match[]={__name__!=""}
+Initing import process to "http://172.17.0.3:8480/insert/812891298:1/prometheus/api/v1/import/native":
+Total: 63.84 MiB ↖ Speed: 1.99 MiB p/s
+Total: 63.84 MiB ↗ Speed: 2.26 MiB p/s
+2022/12/05 21:24:10 Total time: 4m4.1466565s
+```
## Verifying exported blocks from VictoriaMetrics
diff --git a/go.mod b/go.mod
index b3a7728c72..1cda345523 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.19
require (
- cloud.google.com/go/storage v1.28.0
+ cloud.google.com/go/storage v1.28.1
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1
github.com/VictoriaMetrics/fastcache v1.12.0
@@ -12,12 +12,12 @@ require (
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.23.0
- github.com/VictoriaMetrics/metricsql v0.49.1
- github.com/aws/aws-sdk-go-v2 v1.17.1
- github.com/aws/aws-sdk-go-v2/config v1.18.3
- github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42
- github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4
- github.com/cespare/xxhash/v2 v2.1.2
+ github.com/VictoriaMetrics/metricsql v0.50.0
+ github.com/aws/aws-sdk-go-v2 v1.17.2
+ github.com/aws/aws-sdk-go-v2/config v1.18.4
+ github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.43
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.29.5
+ github.com/cespare/xxhash/v2 v2.2.0
github.com/cheggaaa/pb/v3 v3.1.0
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/fatih/color v1.13.0 // indirect
@@ -31,44 +31,44 @@ require (
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/oklog/ulid v1.3.1
github.com/prometheus/common v0.37.0 // indirect
- github.com/prometheus/prometheus v0.40.4
- github.com/urfave/cli/v2 v2.23.5
+ github.com/prometheus/prometheus v0.40.5
+ github.com/urfave/cli/v2 v2.23.6
github.com/valyala/fastjson v1.6.3
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.2
github.com/valyala/gozstd v1.17.0
github.com/valyala/quicktemplate v1.7.0
- golang.org/x/net v0.2.0
+ golang.org/x/net v0.3.0
golang.org/x/oauth2 v0.2.0
- golang.org/x/sys v0.2.0
+ golang.org/x/sys v0.3.0
google.golang.org/api v0.103.0
gopkg.in/yaml.v2 v2.4.0
)
require (
cloud.google.com/go v0.107.0 // indirect
- cloud.google.com/go/compute v1.12.1 // indirect
- cloud.google.com/go/compute/metadata v0.2.1 // indirect
- cloud.google.com/go/iam v0.7.0 // indirect
+ cloud.google.com/go/compute v1.14.0 // indirect
+ cloud.google.com/go/compute/metadata v0.2.2 // indirect
+ cloud.google.com/go/iam v0.8.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
- github.com/aws/aws-sdk-go v1.44.149 // indirect
- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.13.3 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 // indirect
- github.com/aws/smithy-go v1.13.4 // indirect
+ github.com/aws/aws-sdk-go v1.44.153 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.17 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.21 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.20 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect
+ github.com/aws/smithy-go v1.13.5 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dennwc/varint v1.0.0 // indirect
@@ -101,19 +101,19 @@ require (
github.com/valyala/histogram v1.2.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 // indirect
- go.opentelemetry.io/otel v1.11.1 // indirect
- go.opentelemetry.io/otel/metric v0.33.0 // indirect
- go.opentelemetry.io/otel/trace v1.11.1 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 // indirect
+ go.opentelemetry.io/otel v1.11.2 // indirect
+ go.opentelemetry.io/otel/metric v0.34.0 // indirect
+ go.opentelemetry.io/otel/trace v1.11.2 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.2.0 // indirect
- golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect
+ golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
golang.org/x/sync v0.1.0 // indirect
- golang.org/x/text v0.4.0 // indirect
- golang.org/x/time v0.2.0 // indirect
+ golang.org/x/text v0.5.0 // indirect
+ golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
- google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect
+ google.golang.org/genproto v0.0.0-20221205194025-8222ab48f5fc // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 9a29ee29fe..40941e6879 100644
--- a/go.sum
+++ b/go.sum
@@ -21,14 +21,14 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0=
-cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
-cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
-cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
+cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
+cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
+cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k=
+cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/iam v0.7.0 h1:k4MuwOsS7zGJJ+QfZ5vBK8SgHBAvYN/23BWsiihJ1vs=
-cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg=
+cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk=
+cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
@@ -39,8 +39,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
-cloud.google.com/go/storage v1.28.0 h1:DLrIZ6xkeZX6K70fU/boWx5INJumt6f+nwwWSHXzzGY=
-cloud.google.com/go/storage v1.28.0/go.mod h1:qlgZML35PXA3zoEnIkiPLY4/TOkUleufRlu6qmcf7sI=
+cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI=
+cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 h1:sVW/AFBTGyJxDaMYlq0ct3jUXTtj12tQ6zE2GZUgVQw=
@@ -71,8 +71,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
github.com/VictoriaMetrics/metrics v1.23.0 h1:WzfqyzCaxUZip+OBbg1+lV33WChDSu4ssYII3nxtpeA=
github.com/VictoriaMetrics/metrics v1.23.0/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc=
-github.com/VictoriaMetrics/metricsql v0.49.1 h1:9JAbpiZhlQnylclcf5xNtYRaBd5dr2CTPQ85RIoruuk=
-github.com/VictoriaMetrics/metricsql v0.49.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
+github.com/VictoriaMetrics/metricsql v0.50.0 h1:MCBhjn1qlfMqPGP6HiR9JgmEw7oTRGm/O8YwSeoaI1E=
+github.com/VictoriaMetrics/metricsql v0.50.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
@@ -89,54 +89,55 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.44.149 h1:zTWaUTbSjgMHvwhaQ91s/6ER8wMb3mA8M1GCZFO9QIo=
-github.com/aws/aws-sdk-go v1.44.149/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
-github.com/aws/aws-sdk-go-v2 v1.17.1 h1:02c72fDJr87N8RAC2s3Qu0YuvMRZKNZJ9F+lAehCazk=
-github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9 h1:RKci2D7tMwpvGpDNZnGQw9wk6v7o/xSwFcUAuNPoB8k=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9/go.mod h1:vCmV1q1VK8eoQJ5+aYE7PkK1K6v41qJ5pJdK3ggCDvg=
-github.com/aws/aws-sdk-go-v2/config v1.18.3 h1:3kfBKcX3votFX84dm00U8RGA1sCCh3eRMOGzg5dCWfU=
-github.com/aws/aws-sdk-go-v2/config v1.18.3/go.mod h1:BYdrbeCse3ZnOD5+2/VE/nATOK8fEUpBtmPMdKSyhMU=
-github.com/aws/aws-sdk-go-v2/credentials v1.13.3 h1:ur+FHdp4NbVIv/49bUjBW+FE7e57HOo03ELodttmagk=
-github.com/aws/aws-sdk-go-v2/credentials v1.13.3/go.mod h1:/rOMmqYBcFfNbRPU0iN9IgGqD5+V2yp3iWNmIlz0wI4=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 h1:E3PXZSI3F2bzyj6XxUXdTIfvp425HHhwKsFvmzBwHgs=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19/go.mod h1:VihW95zQpeKQWVPGkwT+2+WJNQV8UXFfMTWdU6VErL8=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42 h1:bxgBYvvBh+W1RnNYP4ROXEB8N+HSSucDszfE7Rb+kfU=
-github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42/go.mod h1:LHOsygMiW/14CkFxdXxvzKyMh3jbk/QfZVaDtCbLkl8=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 h1:nBO/RFxeq/IS5G9Of+ZrgucRciie2qpLy++3UGZ+q2E=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 h1:oRHDrwCTVT8ZXi4sr9Ld+EXk7N/KGssOr2ygNeojEhw=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 h1:Mza+vlnZr+fPKFKRq/lKGVvM6B/8ZZmNdEopOwSQLms=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26/go.mod h1:Y2OJ+P+MC1u1VKnavT+PshiEuGPyh/7DqxoDNij4/bg=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16 h1:2EXB7dtGwRYIN3XQ9qwIW504DVbKIw3r89xQnonGdsQ=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16/go.mod h1:XH+3h395e3WVdd6T2Z3mPxuI+x/HVtdqVOREkTiyubs=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10 h1:dpiPHgmFstgkLG07KaYAewvuptq5kvo52xn7tVSrtrQ=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10/go.mod h1:9cBNUHI2aW4ho0A5T87O294iPDuuUOSIEDjnd1Lq/z0=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20 h1:KSvtm1+fPXE0swe9GPjc6msyrdTT0LB/BP8eLugL1FI=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20/go.mod h1:Mp4XI/CkWGD79AQxZ5lIFlgvC0A+gl+4BmyG1F+SfNc=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 h1:GE25AWCdNUPh9AOJzI9KIJnja7IwUc1WyUqz/JTyJ/I=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19/go.mod h1:02CP6iuYP+IVnBX5HULVdSAku/85eHB2Y9EsFhrkEwU=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19 h1:piDBAaWkaxkkVV3xJJbTehXCZRXYs49kvpi/LG6LR2o=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19/go.mod h1:BmQWRVkLTmyNzYPFAZgon53qKLWBNSvonugD1MrSWUs=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4 h1:QgmmWifaYZZcpaw3y1+ccRlgH6jAvLm4K/MBGUc7cNM=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4/go.mod h1:/NHbqPRiwxSPVOB2Xr+StDEH+GWV/64WwnUjv4KYzV0=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 h1:GFZitO48N/7EsFDt8fMa5iYdmWqkUDDB3Eje6z3kbG0=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.25/go.mod h1:IARHuzTXmj1C0KS35vboR0FeJ89OkEy1M9mWbK2ifCI=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 h1:jcw6kKZrtNfBPJkaHrscDOZoe5gvi9wjudnxvozYFJo=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8/go.mod h1:er2JHN+kBY6FcMfcBBKNGCT3CarImmdFzishsqBmSRI=
-github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 h1:60SJ4lhvn///8ygCzYy2l53bFW/Q15bVfyjyAWo6zuw=
-github.com/aws/aws-sdk-go-v2/service/sts v1.17.5/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4=
-github.com/aws/smithy-go v1.13.4 h1:/RN2z1txIJWeXeOkzX+Hk/4Uuvv7dWtCjbmVJcrskyk=
-github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
+github.com/aws/aws-sdk-go v1.44.153 h1:KfN5URb9O/Fk48xHrAinrPV2DzPcLa0cd9yo1ax5KGg=
+github.com/aws/aws-sdk-go v1.44.153/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8=
+github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
+github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE=
+github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg=
+github.com/aws/aws-sdk-go-v2/credentials v1.13.4/go.mod h1:/Cj5w9LRsNTLSwexsohwDME32OzJ6U81Zs33zr2ZWOM=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 h1:tpNOglTZ8kg9T38NpcGBxudqfUAwUzyUnLQ4XSd0CHE=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20/go.mod h1:d9xFpWd3qYwdIXM0fvu7deD08vvdRXyc/ueV+0SqaWE=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.43 h1:+bkAMTd5OGyHu2nwNOangjEsP65fR0uhMbZJA52sZ64=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.43/go.mod h1:sS2tu0VEspKuY5eM1vQgy7P/hpZX8F62o6qsghZExWc=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 h1:5WU31cY7m0tG+AiaXuXGoMzo2GBQ1IixtWa8Yywsgco=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26/go.mod h1:2E0LdbJW6lbeU4uxjum99GZzI0ZjDpAb0CoSCM0oeEY=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 h1:WW0qSzDWoiWU2FS5DbKpxGilFVlCEJPwx4YtjdfI0Jw=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20/go.mod h1:/+6lSiby8TBFpTVXZgKiN/rCfkYXEGvhlM4zCgPpt7w=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 h1:N2eKFw2S+JWRCtTt0IhIX7uoGGQciD4p6ba+SJv4WEU=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27/go.mod h1:RdwFVc7PBYWY33fa2+8T1mSqQ7ZEK4ILpM0wfioDC3w=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.17 h1:5tXbMJ7Jq0iG65oiMg6tCLsHkSaO2xLXa2EmZ29vaTA=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.17/go.mod h1:twV0fKMQuqLY4klyFH56aXNq3AFiA5LO0/frTczEOFE=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.21 h1:77b1GfaSuIok5yB/3HYbG+ypWvOJDQ2rVdq943D17R4=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.21/go.mod h1:sPOz31BVdqeeurKEuUpLNSve4tdCNPluE+070HNcEHI=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 h1:jlgyHbkZQAgAc7VIxJDmtouH8eNjOk2REVAQfVhdaiQ=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20/go.mod h1:Xs52xaLBqDEKRcAfX/hgjmD3YQ7c/W+BEyfamlO/W2E=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.20 h1:4K6dbmR0mlp3o4Bo78PnpvzHtYAqEeVMguvEenpMGsI=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.20/go.mod h1:1XpDcReIEOHsjwNToDKhIAO3qwLo1BnfbtSqWJa8j7g=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.29.5 h1:nRSEQj1JergKTVc8RGkhZvOEGgcvo4fWpDPwGDeg2ok=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.29.5/go.mod h1:wcaJTmjKFDW0s+Se55HBNIds6ghdAGoDDw+SGUdrfAk=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 h1:ActQgdTNQej/RuUJjB9uxYVLDOvRGtUreXF8L3c8wyg=
+github.com/aws/aws-sdk-go-v2/service/sso v1.11.26/go.mod h1:uB9tV79ULEZUXc6Ob18A46KSQ0JDlrplPni9XW6Ot60=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 h1:wihKuqYUlA2T/Rx+yu2s6NDAns8B9DgnRooB1PVhY+Q=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9/go.mod h1:2E/3D/mB8/r2J7nK42daoKP/ooCwbf0q1PznNc+DZTU=
+github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5bqK0mGMyk09xLoAY=
+github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c=
+github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
+github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
+github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb/v3 v3.1.0 h1:3uouEsl32RL7gTiQsuaXD4Bzbfl5tGztXGUvXbs4O04=
github.com/cheggaaa/pb/v3 v3.1.0/go.mod h1:YjrevcBqadFDaGQKRdmZxTY42pXEqda48Ea3lt0K/BE=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -402,8 +403,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
-github.com/prometheus/prometheus v0.40.4 h1:6aLtQSvnhmC/uo5Tx910AQm3Fxq1nzaJA6uiYtsA6So=
-github.com/prometheus/prometheus v0.40.4/go.mod h1:bxgdmtoSNLmmIVPGmeTJ3OiP67VmuY4yalE4ZP6L/j8=
+github.com/prometheus/prometheus v0.40.5 h1:wmk5yNrQlkQ2OvZucMhUB4k78AVfG34szb1UtopS8Vc=
+github.com/prometheus/prometheus v0.40.5/go.mod h1:bxgdmtoSNLmmIVPGmeTJ3OiP67VmuY4yalE4ZP6L/j8=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
@@ -429,8 +430,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
-github.com/urfave/cli/v2 v2.23.5 h1:xbrU7tAYviSpqeR3X4nEFWUdB/uDZ6DE+HxmRU7Xtyw=
-github.com/urfave/cli/v2 v2.23.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
+github.com/urfave/cli/v2 v2.23.6 h1:iWmtKD+prGo1nKUtLO0Wg4z9esfBM4rAV4QRLQiEmJ4=
+github.com/urfave/cli/v2 v2.23.6/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@@ -462,14 +463,14 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4 h1:aUEBEdCa6iamGzg6fuYxDA8ThxvOG240mAvWDU+XLio=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4/go.mod h1:l2MdsbKTocpPS5nQZscqTR9jd8u96VYZdcpF8Sye7mA=
-go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4=
-go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE=
-go.opentelemetry.io/otel/metric v0.33.0 h1:xQAyl7uGEYvrLAiV/09iTJlp1pZnQ9Wl793qbVvED1E=
-go.opentelemetry.io/otel/metric v0.33.0/go.mod h1:QlTYc+EnYNq/M2mNk1qDDMRLpqCOj2f/r5c7Fd5FYaI=
-go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ=
-go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U=
+go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0=
+go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI=
+go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8=
+go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8=
+go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0=
+go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
@@ -493,8 +494,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
-golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4=
-golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/exp v0.0.0-20221205204356-47842c84f3db h1:D/cFflL63o2KSLJIwjlcIt8PR064j/xsmdEJL/YvY/o=
+golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -555,8 +556,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
-golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
-golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
+golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk=
+golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -627,12 +628,12 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
-golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
+golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -640,13 +641,14 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE=
-golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
@@ -752,8 +754,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c=
-google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221205194025-8222ab48f5fc h1:nUKKji0AarrQKh6XpFEpG3p1TNztxhe7C8TcUvDgXqw=
+google.golang.org/genproto v0.0.0-20221205194025-8222ab48f5fc/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
diff --git a/lib/backup/fscommon/fscommon.go b/lib/backup/fscommon/fscommon.go
index fd9e3506b9..37d688aeb9 100644
--- a/lib/backup/fscommon/fscommon.go
+++ b/lib/backup/fscommon/fscommon.go
@@ -45,7 +45,7 @@ func fsync(path string) error {
func AppendFiles(dst []string, dir string) ([]string, error) {
d, err := os.Open(dir)
if err != nil {
- return nil, fmt.Errorf("cannot open %q: %w", dir, err)
+ return nil, fmt.Errorf("cannot open directory: %w", err)
}
dst, err = appendFilesInternal(dst, d)
if err1 := d.Close(); err1 != nil {
diff --git a/lib/backup/fslocal/fslocal.go b/lib/backup/fslocal/fslocal.go
index 051182a624..055c38123c 100644
--- a/lib/backup/fslocal/fslocal.go
+++ b/lib/backup/fslocal/fslocal.go
@@ -159,7 +159,7 @@ func (fs *FS) DeletePath(path string) (uint64, error) {
// The file could be deleted earlier via symlink.
return 0, nil
}
- return 0, fmt.Errorf("cannot open %q at %q: %w", path, fullPath, err)
+ return 0, fmt.Errorf("cannot open %q: %w", path, err)
}
fi, err := f.Stat()
_ = f.Close()
diff --git a/lib/backup/fsremote/fsremote.go b/lib/backup/fsremote/fsremote.go
index 4e4939f912..d2a7ce8512 100644
--- a/lib/backup/fsremote/fsremote.go
+++ b/lib/backup/fsremote/fsremote.go
@@ -107,12 +107,12 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
// Cannot create hardlink. Just copy file contents
srcFile, err := os.Open(srcPath)
if err != nil {
- return fmt.Errorf("cannot open file %q: %w", srcPath, err)
+ return fmt.Errorf("cannot open source file: %w", err)
}
dstFile, err := os.Create(dstPath)
if err != nil {
_ = srcFile.Close()
- return fmt.Errorf("cannot create file %q: %w", dstPath, err)
+ return fmt.Errorf("cannot create destination file: %w", err)
}
n, err := io.Copy(dstFile, srcFile)
if err1 := dstFile.Close(); err1 != nil {
@@ -141,7 +141,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
path := fs.path(p)
r, err := os.Open(path)
if err != nil {
- return fmt.Errorf("cannot open %q: %w", path, err)
+ return err
}
n, err := io.Copy(w, r)
if err1 := r.Close(); err1 != nil && err == nil {
diff --git a/lib/filestream/filestream.go b/lib/filestream/filestream.go
index 8af10f5cec..2f0e4e5953 100644
--- a/lib/filestream/filestream.go
+++ b/lib/filestream/filestream.go
@@ -79,7 +79,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) {
func Open(path string, nocache bool) (*Reader, error) {
f, err := os.Open(path)
if err != nil {
- return nil, fmt.Errorf("cannot open file %q: %w", path, err)
+ return nil, err
}
r := &Reader{
f: f,
@@ -179,7 +179,7 @@ type Writer struct {
func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
- return nil, fmt.Errorf("cannot open %q: %w", path, err)
+ return nil, err
}
n, err := f.Seek(offset, io.SeekStart)
if err != nil {
diff --git a/lib/fs/fs.go b/lib/fs/fs.go
index 14ae2d6823..92ae1e67d5 100644
--- a/lib/fs/fs.go
+++ b/lib/fs/fs.go
@@ -25,11 +25,38 @@ func MustSyncPath(path string) {
mustSyncPath(path)
}
+// WriteFileAndSync writes data to the file at path and then calls fsync on the created file.
+//
+// The fsync guarantees that the written data survives hardware reset after successful call.
+//
+// This function may leave the file at the path in inconsistent state on app crash
+// in the middle of the write.
+// Use WriteFileAtomically if the file at the path must be either written in full
+// or not written at all on app crash in the middle of the write.
+func WriteFileAndSync(path string, data []byte) error {
+ f, err := filestream.Create(path, false)
+ if err != nil {
+ return err
+ }
+ if _, err := f.Write(data); err != nil {
+ f.MustClose()
+ // Do not call MustRemoveAll(path), so the user could inpsect
+ // the file contents during investigation of the issue.
+ return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err)
+ }
+ // Sync and close the file.
+ f.MustClose()
+ return nil
+}
+
// WriteFileAtomically atomically writes data to the given file path.
//
-// WriteFileAtomically returns only after the file is fully written and synced
+// This function returns only after the file is fully written and synced
// to the underlying storage.
//
+// This function guarantees that the file at path either fully written or not written at all on app crash
+// in the middle of the write.
+//
// If the file at path already exists, then the file is overwritten atomically if canOverwrite is true.
// Otherwise error is returned.
func WriteFileAtomically(path string, data []byte, canOverwrite bool) error {
@@ -40,26 +67,18 @@ func WriteFileAtomically(path string, data []byte, canOverwrite bool) error {
return fmt.Errorf("cannot create file %q, since it already exists", path)
}
+ // Write data to a temporary file.
n := atomic.AddUint64(&tmpFileNum, 1)
tmpPath := fmt.Sprintf("%s.tmp.%d", path, n)
- f, err := filestream.Create(tmpPath, false)
- if err != nil {
- return fmt.Errorf("cannot create file %q: %w", tmpPath, err)
- }
- if _, err := f.Write(data); err != nil {
- f.MustClose()
- MustRemoveAll(tmpPath)
- return fmt.Errorf("cannot write %d bytes to file %q: %w", len(data), tmpPath, err)
+ if err := WriteFileAndSync(tmpPath, data); err != nil {
+ return fmt.Errorf("cannot write data to temporary file: %w", err)
}
- // Sync and close the file.
- f.MustClose()
-
- // Atomically move the file from tmpPath to path.
+ // Atomically move the temporary file from tmpPath to path.
if err := os.Rename(tmpPath, path); err != nil {
// do not call MustRemoveAll(tmpPath) here, so the user could inspect
- // the file contents during investigating the issue.
- return fmt.Errorf("cannot move %q to %q: %w", tmpPath, path, err)
+ // the file contents during investigation of the issue.
+ return fmt.Errorf("cannot move temporary file %q to %q: %w", tmpPath, path, err)
}
// Sync the containing directory, so the file is guaranteed to appear in the directory.
@@ -123,7 +142,7 @@ func RemoveDirContents(dir string) {
}
d, err := os.Open(dir)
if err != nil {
- logger.Panicf("FATAL: cannot open dir %q: %s", dir, err)
+ logger.Panicf("FATAL: cannot open dir: %s", err)
}
defer MustClose(d)
names, err := d.Readdirnames(-1)
@@ -185,7 +204,7 @@ func IsEmptyDir(path string) bool {
// See https://stackoverflow.com/a/30708914/274937
f, err := os.Open(path)
if err != nil {
- logger.Panicf("FATAL: unexpected error when opening directory %q: %s", path, err)
+ logger.Panicf("FATAL: cannot open dir: %s", err)
}
_, err = f.Readdirnames(1)
MustClose(f)
@@ -230,7 +249,7 @@ var atomicDirRemoveCounter = uint64(time.Now().UnixNano())
func MustRemoveTemporaryDirs(dir string) {
d, err := os.Open(dir)
if err != nil {
- logger.Panicf("FATAL: cannot open dir %q: %s", dir, err)
+ logger.Panicf("FATAL: cannot open dir: %s", err)
}
defer MustClose(d)
fis, err := d.Readdir(-1)
@@ -259,7 +278,7 @@ func HardLinkFiles(srcDir, dstDir string) error {
d, err := os.Open(srcDir)
if err != nil {
- return fmt.Errorf("cannot open srcDir=%q: %w", srcDir, err)
+ return fmt.Errorf("cannot open srcDir: %w", err)
}
defer func() {
if err := d.Close(); err != nil {
diff --git a/lib/fs/fs_solaris.go b/lib/fs/fs_solaris.go
index 8cddca829d..ac94ea406c 100644
--- a/lib/fs/fs_solaris.go
+++ b/lib/fs/fs_solaris.go
@@ -19,7 +19,7 @@ func mUnmap(data []byte) error {
func mustSyncPath(path string) {
d, err := os.Open(path)
if err != nil {
- logger.Panicf("FATAL: cannot open %q: %s", path, err)
+ logger.Panicf("FATAL: cannot open file for fsync: %s", err)
}
if err := d.Sync(); err != nil {
_ = d.Close()
@@ -51,7 +51,7 @@ func createFlockFile(flockFile string) (*os.File, error) {
func mustGetFreeSpace(path string) uint64 {
d, err := os.Open(path)
if err != nil {
- logger.Panicf("FATAL: cannot determine free disk space on %q: %s", path, err)
+ logger.Panicf("FATAL: cannot open dir for determining free disk space: %s", err)
}
defer MustClose(d)
diff --git a/lib/fs/fs_unix.go b/lib/fs/fs_unix.go
index 20cf6f6c08..bcb789c94a 100644
--- a/lib/fs/fs_unix.go
+++ b/lib/fs/fs_unix.go
@@ -22,7 +22,7 @@ func mUnmap(data []byte) error {
func mustSyncPath(path string) {
d, err := os.Open(path)
if err != nil {
- logger.Panicf("FATAL: cannot open %q: %s", path, err)
+ logger.Panicf("FATAL: cannot open file for fsync: %s", err)
}
if err := d.Sync(); err != nil {
_ = d.Close()
@@ -47,7 +47,7 @@ func createFlockFile(flockFile string) (*os.File, error) {
func mustGetFreeSpace(path string) uint64 {
d, err := os.Open(path)
if err != nil {
- logger.Panicf("FATAL: cannot determine free disk space on %q: %s", path, err)
+ logger.Panicf("FATAL: cannot open dir for determining free disk space: %s", err)
}
defer MustClose(d)
diff --git a/lib/fs/reader_at.go b/lib/fs/reader_at.go
index 53ccb44c37..abc1c46990 100644
--- a/lib/fs/reader_at.go
+++ b/lib/fs/reader_at.go
@@ -89,7 +89,7 @@ func (r *ReaderAt) MustFadviseSequentialRead(prefetch bool) {
func MustOpenReaderAt(path string) *ReaderAt {
f, err := os.Open(path)
if err != nil {
- logger.Panicf("FATAL: cannot open file %q for reading: %s", path, err)
+ logger.Panicf("FATAL: cannot open file for reading: %s", err)
}
var r ReaderAt
r.f = f
diff --git a/lib/mergeset/block_stream_writer.go b/lib/mergeset/block_stream_writer.go
index b25e473257..9c348fbc23 100644
--- a/lib/mergeset/block_stream_writer.go
+++ b/lib/mergeset/block_stream_writer.go
@@ -63,13 +63,10 @@ func (bsw *blockStreamWriter) reset() {
bsw.mrFirstItemCaught = false
}
-func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart) {
+func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart, compressLevel int) {
bsw.reset()
- // Use the minimum compression level for in-memory blocks,
- // since they are going to be re-compressed during the merge into file-based blocks.
- bsw.compressLevel = -5 // See https://github.com/facebook/zstd/releases/tag/v1.3.4
-
+ bsw.compressLevel = compressLevel
bsw.metaindexWriter = &mp.metaindexData
bsw.indexWriter = &mp.indexData
bsw.itemsWriter = &mp.itemsData
diff --git a/lib/mergeset/encoding.go b/lib/mergeset/encoding.go
index 09d1e0f762..b93383e817 100644
--- a/lib/mergeset/encoding.go
+++ b/lib/mergeset/encoding.go
@@ -47,7 +47,9 @@ func (it Item) String(data []byte) string {
return *(*string)(unsafe.Pointer(sh))
}
-func (ib *inmemoryBlock) Len() int { return len(ib.items) }
+func (ib *inmemoryBlock) Len() int {
+ return len(ib.items)
+}
func (ib *inmemoryBlock) Less(i, j int) bool {
items := ib.items
diff --git a/lib/mergeset/encoding_test.go b/lib/mergeset/encoding_test.go
index 48398ae2fc..8e6ecffd0f 100644
--- a/lib/mergeset/encoding_test.go
+++ b/lib/mergeset/encoding_test.go
@@ -115,7 +115,7 @@ func TestInmemoryBlockMarshalUnmarshal(t *testing.T) {
var itemsLen uint32
var mt marshalType
- for i := 0; i < 1000; i++ {
+ for i := 0; i < 1000; i += 10 {
var items []string
totalLen := 0
ib.Reset()
diff --git a/lib/mergeset/inmemory_part.go b/lib/mergeset/inmemory_part.go
index 9b41faca82..d8da08c973 100644
--- a/lib/mergeset/inmemory_part.go
+++ b/lib/mergeset/inmemory_part.go
@@ -1,8 +1,12 @@
package mergeset
import (
+ "fmt"
+ "path/filepath"
+
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
+ "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
@@ -28,6 +32,36 @@ func (mp *inmemoryPart) Reset() {
mp.lensData.Reset()
}
+// StoreToDisk stores mp to the given path on disk.
+func (mp *inmemoryPart) StoreToDisk(path string) error {
+ if err := fs.MkdirAllIfNotExist(path); err != nil {
+ return fmt.Errorf("cannot create directory %q: %w", path, err)
+ }
+ metaindexPath := path + "/metaindex.bin"
+ if err := fs.WriteFileAndSync(metaindexPath, mp.metaindexData.B); err != nil {
+ return fmt.Errorf("cannot store metaindex: %w", err)
+ }
+ indexPath := path + "/index.bin"
+ if err := fs.WriteFileAndSync(indexPath, mp.indexData.B); err != nil {
+ return fmt.Errorf("cannot store index: %w", err)
+ }
+ itemsPath := path + "/items.bin"
+ if err := fs.WriteFileAndSync(itemsPath, mp.itemsData.B); err != nil {
+ return fmt.Errorf("cannot store items: %w", err)
+ }
+ lensPath := path + "/lens.bin"
+ if err := fs.WriteFileAndSync(lensPath, mp.lensData.B); err != nil {
+ return fmt.Errorf("cannot store lens: %w", err)
+ }
+ if err := mp.ph.WriteMetadata(path); err != nil {
+ return fmt.Errorf("cannot store metadata: %w", err)
+ }
+ // Sync parent directory in order to make sure the written files remain visible after hardware reset
+ parentDirPath := filepath.Dir(path)
+ fs.MustSyncPath(parentDirPath)
+ return nil
+}
+
// Init initializes mp from ib.
func (mp *inmemoryPart) Init(ib *inmemoryBlock) {
mp.Reset()
@@ -60,14 +94,14 @@ func (mp *inmemoryPart) Init(ib *inmemoryBlock) {
bb := inmemoryPartBytePool.Get()
bb.B = mp.bh.Marshal(bb.B[:0])
- mp.indexData.B = encoding.CompressZSTDLevel(mp.indexData.B[:0], bb.B, 0)
+ mp.indexData.B = encoding.CompressZSTDLevel(mp.indexData.B[:0], bb.B, compressLevel)
mp.mr.firstItem = append(mp.mr.firstItem[:0], mp.bh.firstItem...)
mp.mr.blockHeadersCount = 1
mp.mr.indexBlockOffset = 0
mp.mr.indexBlockSize = uint32(len(mp.indexData.B))
bb.B = mp.mr.Marshal(bb.B[:0])
- mp.metaindexData.B = encoding.CompressZSTDLevel(mp.metaindexData.B[:0], bb.B, 0)
+ mp.metaindexData.B = encoding.CompressZSTDLevel(mp.metaindexData.B[:0], bb.B, compressLevel)
inmemoryPartBytePool.Put(bb)
}
@@ -76,9 +110,8 @@ var inmemoryPartBytePool bytesutil.ByteBufferPool
// It is safe calling NewPart multiple times.
// It is unsafe re-using mp while the returned part is in use.
func (mp *inmemoryPart) NewPart() *part {
- ph := mp.ph
size := mp.size()
- p, err := newPart(&ph, "", size, mp.metaindexData.NewReader(), &mp.indexData, &mp.itemsData, &mp.lensData)
+ p, err := newPart(&mp.ph, "", size, mp.metaindexData.NewReader(), &mp.indexData, &mp.itemsData, &mp.lensData)
if err != nil {
logger.Panicf("BUG: cannot create a part from inmemoryPart: %s", err)
}
@@ -86,5 +119,5 @@ func (mp *inmemoryPart) NewPart() *part {
}
func (mp *inmemoryPart) size() uint64 {
- return uint64(len(mp.metaindexData.B) + len(mp.indexData.B) + len(mp.itemsData.B) + len(mp.lensData.B))
+ return uint64(cap(mp.metaindexData.B) + cap(mp.indexData.B) + cap(mp.itemsData.B) + cap(mp.lensData.B))
}
diff --git a/lib/mergeset/merge_test.go b/lib/mergeset/merge_test.go
index f042bae0fc..6ba874a676 100644
--- a/lib/mergeset/merge_test.go
+++ b/lib/mergeset/merge_test.go
@@ -30,14 +30,14 @@ func TestMultilevelMerge(t *testing.T) {
// First level merge
var dstIP1 inmemoryPart
var bsw1 blockStreamWriter
- bsw1.InitFromInmemoryPart(&dstIP1)
+ bsw1.InitFromInmemoryPart(&dstIP1, -5)
if err := mergeBlockStreams(&dstIP1.ph, &bsw1, bsrs[:5], nil, nil, &itemsMerged); err != nil {
t.Fatalf("cannot merge first level part 1: %s", err)
}
var dstIP2 inmemoryPart
var bsw2 blockStreamWriter
- bsw2.InitFromInmemoryPart(&dstIP2)
+ bsw2.InitFromInmemoryPart(&dstIP2, -5)
if err := mergeBlockStreams(&dstIP2.ph, &bsw2, bsrs[5:], nil, nil, &itemsMerged); err != nil {
t.Fatalf("cannot merge first level part 2: %s", err)
}
@@ -54,7 +54,7 @@ func TestMultilevelMerge(t *testing.T) {
newTestBlockStreamReader(&dstIP1),
newTestBlockStreamReader(&dstIP2),
}
- bsw.InitFromInmemoryPart(&dstIP)
+ bsw.InitFromInmemoryPart(&dstIP, 1)
if err := mergeBlockStreams(&dstIP.ph, &bsw, bsrsTop, nil, nil, &itemsMerged); err != nil {
t.Fatalf("cannot merge second level: %s", err)
}
@@ -73,7 +73,7 @@ func TestMergeForciblyStop(t *testing.T) {
bsrs, _ := newTestInmemoryBlockStreamReaders(20, 4000)
var dstIP inmemoryPart
var bsw blockStreamWriter
- bsw.InitFromInmemoryPart(&dstIP)
+ bsw.InitFromInmemoryPart(&dstIP, 1)
ch := make(chan struct{})
var itemsMerged uint64
close(ch)
@@ -120,7 +120,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error {
var itemsMerged uint64
var dstIP inmemoryPart
var bsw blockStreamWriter
- bsw.InitFromInmemoryPart(&dstIP)
+ bsw.InitFromInmemoryPart(&dstIP, -4)
if err := mergeBlockStreams(&dstIP.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
return fmt.Errorf("cannot merge block streams: %w", err)
}
diff --git a/lib/mergeset/part_search_test.go b/lib/mergeset/part_search_test.go
index fb178bcde0..c042d44acd 100644
--- a/lib/mergeset/part_search_test.go
+++ b/lib/mergeset/part_search_test.go
@@ -149,7 +149,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) {
var itemsMerged uint64
var ip inmemoryPart
var bsw blockStreamWriter
- bsw.InitFromInmemoryPart(&ip)
+ bsw.InitFromInmemoryPart(&ip, -3)
if err := mergeBlockStreams(&ip.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil {
return nil, nil, fmt.Errorf("cannot merge blocks: %w", err)
}
diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go
index aa4b595cca..08b1e4e378 100644
--- a/lib/mergeset/table.go
+++ b/lib/mergeset/table.go
@@ -22,10 +22,10 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg"
)
-// maxParts is the maximum number of parts in the table.
+// maxInmemoryParts is the maximum number of inmemory parts in the table.
//
// This number may be reached when the insertion pace outreaches merger pace.
-const maxParts = 512
+const maxInmemoryParts = 64
// Default number of parts to merge at once.
//
@@ -46,6 +46,24 @@ const finalPartsToMerge = 2
// The required time shouldn't exceed a day.
const maxPartSize = 400e9
+// The interval for flushing buffered data to parts, so it becomes visible to search.
+const pendingItemsFlushInterval = time.Second
+
+// The interval for guaranteed flush of recently ingested data from memory to on-disk parts,
+// so they survive process crash.
+var dataFlushInterval = 5 * time.Second
+
+// SetDataFlushInterval sets the interval for guaranteed flush of recently ingested data from memory to disk.
+//
+// The data can be flushed from memory to disk more frequently if it doesn't fit the memory limit.
+//
+// This function must be called before initializing the indexdb.
+func SetDataFlushInterval(d time.Duration) {
+ if d > pendingItemsFlushInterval {
+ dataFlushInterval = d
+ }
+}
+
// maxItemsPerCachedPart is the maximum items per created part by the merge,
// which must be cached in the OS page cache.
//
@@ -65,20 +83,23 @@ func maxItemsPerCachedPart() uint64 {
return maxItems
}
-// The interval for flushing (converting) recent raw items into parts,
-// so they become visible to search.
-const rawItemsFlushInterval = time.Second
-
// Table represents mergeset table.
type Table struct {
// Atomically updated counters must go first in the struct, so they are properly
// aligned to 8 bytes on 32-bit architectures.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
- activeMerges uint64
- mergesCount uint64
- itemsMerged uint64
- assistedMerges uint64
+ activeInmemoryMerges uint64
+ activeFileMerges uint64
+
+ inmemoryMergesCount uint64
+ fileMergesCount uint64
+
+ inmemoryItemsMerged uint64
+ fileItemsMerged uint64
+
+ assistedInmemoryMerges uint64
+
itemsAdded uint64
itemsAddedSizeBytes uint64
@@ -93,26 +114,27 @@ type Table struct {
prepareBlock PrepareBlockCallback
isReadOnly *uint32
- partsLock sync.Mutex
- parts []*partWrapper
-
// rawItems contains recently added items that haven't been converted to parts yet.
//
// rawItems aren't used in search for performance reasons
rawItems rawItemsShards
+ // partsLock protects inmemoryParts and fileParts.
+ partsLock sync.Mutex
+
+ // inmemoryParts contains inmemory parts.
+ inmemoryParts []*partWrapper
+
+ // fileParts contains file-backed parts.
+ fileParts []*partWrapper
+
snapshotLock sync.RWMutex
flockF *os.File
stopCh chan struct{}
- // Use syncwg instead of sync, since Add/Wait may be called from concurrent goroutines.
- partMergersWG syncwg.WaitGroup
-
- rawItemsFlusherWG sync.WaitGroup
-
- convertersWG sync.WaitGroup
+ wg sync.WaitGroup
// Use syncwg instead of sync, since Add/Wait may be called from concurrent goroutines.
rawItemsPendingFlushesWG syncwg.WaitGroup
@@ -143,12 +165,14 @@ func (riss *rawItemsShards) init() {
riss.shards = make([]rawItemsShard, rawItemsShardsPerTable)
}
-func (riss *rawItemsShards) addItems(tb *Table, items [][]byte) error {
- n := atomic.AddUint32(&riss.shardIdx, 1)
+func (riss *rawItemsShards) addItems(tb *Table, items [][]byte) {
shards := riss.shards
- idx := n % uint32(len(shards))
- shard := &shards[idx]
- return shard.addItems(tb, items)
+ shardsLen := uint32(len(shards))
+ for len(items) > 0 {
+ n := atomic.AddUint32(&riss.shardIdx, 1)
+ idx := n % shardsLen
+ items = shards[idx].addItems(tb, items)
+ }
}
func (riss *rawItemsShards) Len() int {
@@ -185,9 +209,9 @@ func (ris *rawItemsShard) Len() int {
return n
}
-func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) error {
- var err error
- var blocksToFlush []*inmemoryBlock
+func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) [][]byte {
+ var ibsToFlush []*inmemoryBlock
+ var tailItems [][]byte
ris.mu.Lock()
ibs := ris.ibs
@@ -197,30 +221,31 @@ func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) error {
ris.ibs = ibs
}
ib := ibs[len(ibs)-1]
- for _, item := range items {
- if !ib.Add(item) {
- ib = getInmemoryBlock()
- if !ib.Add(item) {
- putInmemoryBlock(ib)
- err = fmt.Errorf("cannot insert an item %q into an empty inmemoryBlock; it looks like the item is too large? len(item)=%d", item, len(item))
- break
- }
+ for i, item := range items {
+ if ib.Add(item) {
+ continue
+ }
+ if len(ibs) >= maxBlocksPerShard {
+ ibsToFlush = ibs
+ ibs = make([]*inmemoryBlock, 0, maxBlocksPerShard)
+ tailItems = items[i:]
+ atomic.StoreUint64(&ris.lastFlushTime, fasttime.UnixTimestamp())
+ break
+ }
+ ib = getInmemoryBlock()
+ if ib.Add(item) {
ibs = append(ibs, ib)
- ris.ibs = ibs
+ continue
}
+ putInmemoryBlock(ib)
+ logger.Panicf("BUG: cannot insert too big item into an empty inmemoryBlock len(item)=%d; the caller should be responsible for avoiding too big items", len(item))
}
- if len(ibs) >= maxBlocksPerShard {
- blocksToFlush = append(blocksToFlush, ibs...)
- for i := range ibs {
- ibs[i] = nil
- }
- ris.ibs = ibs[:0]
- atomic.StoreUint64(&ris.lastFlushTime, fasttime.UnixTimestamp())
- }
+ ris.ibs = ibs
ris.mu.Unlock()
- tb.mergeRawItemsBlocks(blocksToFlush, false)
- return err
+ tb.flushBlocksToParts(ibsToFlush, false)
+
+ return tailItems
}
type partWrapper struct {
@@ -231,6 +256,9 @@ type partWrapper struct {
refCount uint64
isInMerge bool
+
+ // The deadline when the in-memory part must be flushed to disk.
+ flushToDiskDeadline time.Time
}
func (pw *partWrapper) incRef() {
@@ -292,25 +320,18 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
flushCallback: flushCallback,
prepareBlock: prepareBlock,
isReadOnly: isReadOnly,
- parts: pws,
+ fileParts: pws,
mergeIdx: uint64(time.Now().UnixNano()),
flockF: flockF,
stopCh: make(chan struct{}),
}
tb.rawItems.init()
- tb.startPartMergers()
- tb.startRawItemsFlusher()
+ tb.startBackgroundWorkers()
var m TableMetrics
tb.UpdateMetrics(&m)
logger.Infof("table %q has been opened in %.3f seconds; partsCount: %d; blocksCount: %d, itemsCount: %d; sizeBytes: %d",
- path, time.Since(startTime).Seconds(), m.PartsCount, m.BlocksCount, m.ItemsCount, m.SizeBytes)
-
- tb.convertersWG.Add(1)
- go func() {
- tb.convertToV1280()
- tb.convertersWG.Done()
- }()
+ path, time.Since(startTime).Seconds(), m.FilePartsCount, m.FileBlocksCount, m.FileItemsCount, m.FileSizeBytes)
if flushCallback != nil {
tb.flushCallbackWorkerWG.Add(1)
@@ -336,64 +357,43 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
return tb, nil
}
+func (tb *Table) startBackgroundWorkers() {
+ tb.startMergeWorkers()
+ tb.startInmemoryPartsFlusher()
+ tb.startPendingItemsFlusher()
+}
+
// MustClose closes the table.
func (tb *Table) MustClose() {
close(tb.stopCh)
- logger.Infof("waiting for raw items flusher to stop on %q...", tb.path)
+ logger.Infof("waiting for background workers to stop on %q...", tb.path)
startTime := time.Now()
- tb.rawItemsFlusherWG.Wait()
- logger.Infof("raw items flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path)
-
- logger.Infof("waiting for converters to stop on %q...", tb.path)
- startTime = time.Now()
- tb.convertersWG.Wait()
- logger.Infof("converters stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path)
-
- logger.Infof("waiting for part mergers to stop on %q...", tb.path)
- startTime = time.Now()
- tb.partMergersWG.Wait()
- logger.Infof("part mergers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path)
+ tb.wg.Wait()
+ logger.Infof("background workers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path)
logger.Infof("flushing inmemory parts to files on %q...", tb.path)
startTime = time.Now()
-
- // Flush raw items the last time before exit.
- tb.flushRawItems(true)
-
- // Flush inmemory parts to disk.
- var pws []*partWrapper
- tb.partsLock.Lock()
- for _, pw := range tb.parts {
- if pw.mp == nil {
- continue
- }
- if pw.isInMerge {
- logger.Panicf("BUG: the inmemory part %s mustn't be in merge after stopping parts merger in %q", &pw.mp.ph, tb.path)
- }
- pw.isInMerge = true
- pws = append(pws, pw)
- }
- tb.partsLock.Unlock()
-
- if err := tb.mergePartsOptimal(pws, nil); err != nil {
- logger.Panicf("FATAL: cannot flush inmemory parts to files in %q: %s", tb.path, err)
- }
- logger.Infof("%d inmemory parts have been flushed to files in %.3f seconds on %q", len(pws), time.Since(startTime).Seconds(), tb.path)
+ tb.flushInmemoryItems()
+ logger.Infof("inmemory parts have been successfully flushed to files in %.3f seconds at %q", time.Since(startTime).Seconds(), tb.path)
logger.Infof("waiting for flush callback worker to stop on %q...", tb.path)
startTime = time.Now()
tb.flushCallbackWorkerWG.Wait()
logger.Infof("flush callback worker stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path)
- // Remove references to parts from the tb, so they may be eventually closed
- // after all the searches are done.
+ // Remove references to parts from the tb, so they may be eventually closed after all the searches are done.
tb.partsLock.Lock()
- parts := tb.parts
- tb.parts = nil
+ inmemoryParts := tb.inmemoryParts
+ fileParts := tb.fileParts
+ tb.inmemoryParts = nil
+ tb.fileParts = nil
tb.partsLock.Unlock()
- for _, pw := range parts {
+ for _, pw := range inmemoryParts {
+ pw.decRef()
+ }
+ for _, pw := range fileParts {
pw.decRef()
}
@@ -410,20 +410,33 @@ func (tb *Table) Path() string {
// TableMetrics contains essential metrics for the Table.
type TableMetrics struct {
- ActiveMerges uint64
- MergesCount uint64
- ItemsMerged uint64
- AssistedMerges uint64
+ ActiveInmemoryMerges uint64
+ ActiveFileMerges uint64
+
+ InmemoryMergesCount uint64
+ FileMergesCount uint64
+
+ InmemoryItemsMerged uint64
+ FileItemsMerged uint64
+
+ AssistedInmemoryMerges uint64
+
ItemsAdded uint64
ItemsAddedSizeBytes uint64
PendingItems uint64
- PartsCount uint64
+ InmemoryPartsCount uint64
+ FilePartsCount uint64
- BlocksCount uint64
- ItemsCount uint64
- SizeBytes uint64
+ InmemoryBlocksCount uint64
+ FileBlocksCount uint64
+
+ InmemoryItemsCount uint64
+ FileItemsCount uint64
+
+ InmemorySizeBytes uint64
+ FileSizeBytes uint64
DataBlocksCacheSize uint64
DataBlocksCacheSizeBytes uint64
@@ -440,26 +453,46 @@ type TableMetrics struct {
PartsRefCount uint64
}
+// TotalItemsCount returns the total number of items in the table.
+func (tm *TableMetrics) TotalItemsCount() uint64 {
+ return tm.InmemoryItemsCount + tm.FileItemsCount
+}
+
// UpdateMetrics updates m with metrics from tb.
func (tb *Table) UpdateMetrics(m *TableMetrics) {
- m.ActiveMerges += atomic.LoadUint64(&tb.activeMerges)
- m.MergesCount += atomic.LoadUint64(&tb.mergesCount)
- m.ItemsMerged += atomic.LoadUint64(&tb.itemsMerged)
- m.AssistedMerges += atomic.LoadUint64(&tb.assistedMerges)
+ m.ActiveInmemoryMerges += atomic.LoadUint64(&tb.activeInmemoryMerges)
+ m.ActiveFileMerges += atomic.LoadUint64(&tb.activeFileMerges)
+
+ m.InmemoryMergesCount += atomic.LoadUint64(&tb.inmemoryMergesCount)
+ m.FileMergesCount += atomic.LoadUint64(&tb.fileMergesCount)
+
+ m.InmemoryItemsMerged += atomic.LoadUint64(&tb.inmemoryItemsMerged)
+ m.FileItemsMerged += atomic.LoadUint64(&tb.fileItemsMerged)
+
+ m.AssistedInmemoryMerges += atomic.LoadUint64(&tb.assistedInmemoryMerges)
+
m.ItemsAdded += atomic.LoadUint64(&tb.itemsAdded)
m.ItemsAddedSizeBytes += atomic.LoadUint64(&tb.itemsAddedSizeBytes)
m.PendingItems += uint64(tb.rawItems.Len())
tb.partsLock.Lock()
- m.PartsCount += uint64(len(tb.parts))
- for _, pw := range tb.parts {
+
+ m.InmemoryPartsCount += uint64(len(tb.inmemoryParts))
+ for _, pw := range tb.inmemoryParts {
p := pw.p
+ m.InmemoryBlocksCount += p.ph.blocksCount
+ m.InmemoryItemsCount += p.ph.itemsCount
+ m.InmemorySizeBytes += p.size
+ m.PartsRefCount += atomic.LoadUint64(&pw.refCount)
+ }
- m.BlocksCount += p.ph.blocksCount
- m.ItemsCount += p.ph.itemsCount
- m.SizeBytes += p.size
-
+ m.FilePartsCount += uint64(len(tb.fileParts))
+ for _, pw := range tb.fileParts {
+ p := pw.p
+ m.FileBlocksCount += p.ph.blocksCount
+ m.FileItemsCount += p.ph.itemsCount
+ m.FileSizeBytes += p.size
m.PartsRefCount += atomic.LoadUint64(&pw.refCount)
}
tb.partsLock.Unlock()
@@ -478,17 +511,17 @@ func (tb *Table) UpdateMetrics(m *TableMetrics) {
}
// AddItems adds the given items to the tb.
-func (tb *Table) AddItems(items [][]byte) error {
- if err := tb.rawItems.addItems(tb, items); err != nil {
- return fmt.Errorf("cannot insert data into %q: %w", tb.path, err)
- }
+//
+// The function panics when items contains an item with length exceeding maxInmemoryBlockSize.
+// It is caller's responsibility to make sure there are no too long items.
+func (tb *Table) AddItems(items [][]byte) {
+ tb.rawItems.addItems(tb, items)
atomic.AddUint64(&tb.itemsAdded, uint64(len(items)))
n := 0
for _, item := range items {
n += len(item)
}
atomic.AddUint64(&tb.itemsAddedSizeBytes, uint64(n))
- return nil
}
// getParts appends parts snapshot to dst and returns it.
@@ -496,10 +529,14 @@ func (tb *Table) AddItems(items [][]byte) error {
// The appended parts must be released with putParts.
func (tb *Table) getParts(dst []*partWrapper) []*partWrapper {
tb.partsLock.Lock()
- for _, pw := range tb.parts {
+ for _, pw := range tb.inmemoryParts {
pw.incRef()
}
- dst = append(dst, tb.parts...)
+ for _, pw := range tb.fileParts {
+ pw.incRef()
+ }
+ dst = append(dst, tb.inmemoryParts...)
+ dst = append(dst, tb.fileParts...)
tb.partsLock.Unlock()
return dst
@@ -512,131 +549,142 @@ func (tb *Table) putParts(pws []*partWrapper) {
}
}
-func (tb *Table) startRawItemsFlusher() {
- tb.rawItemsFlusherWG.Add(1)
+func (tb *Table) mergePartsOptimal(pws []*partWrapper) error {
+ sortPartsForOptimalMerge(pws)
+ for len(pws) > 0 {
+ n := defaultPartsToMerge
+ if n > len(pws) {
+ n = len(pws)
+ }
+ pwsChunk := pws[:n]
+ pws = pws[n:]
+ err := tb.mergeParts(pwsChunk, nil, true)
+ if err == nil {
+ continue
+ }
+ tb.releasePartsToMerge(pws)
+ return fmt.Errorf("cannot optimally merge %d parts: %w", n, err)
+ }
+ return nil
+}
+
+// DebugFlush flushes all the added items to the storage, so they become visible to search.
+//
+// This function is only for debugging and testing.
+func (tb *Table) DebugFlush() {
+ tb.flushPendingItems(nil, true)
+
+ // Wait for background flushers to finish.
+ tb.rawItemsPendingFlushesWG.Wait()
+}
+
+func (tb *Table) startInmemoryPartsFlusher() {
+ tb.wg.Add(1)
go func() {
- tb.rawItemsFlusher()
- tb.rawItemsFlusherWG.Done()
+ tb.inmemoryPartsFlusher()
+ tb.wg.Done()
}()
}
-func (tb *Table) rawItemsFlusher() {
- ticker := time.NewTicker(rawItemsFlushInterval)
+func (tb *Table) startPendingItemsFlusher() {
+ tb.wg.Add(1)
+ go func() {
+ tb.pendingItemsFlusher()
+ tb.wg.Done()
+ }()
+}
+
+func (tb *Table) inmemoryPartsFlusher() {
+ ticker := time.NewTicker(dataFlushInterval)
defer ticker.Stop()
for {
select {
case <-tb.stopCh:
return
case <-ticker.C:
- tb.flushRawItems(false)
+ tb.flushInmemoryParts(false)
}
}
}
-const convertToV1280FileName = "converted-to-v1.28.0"
-
-func (tb *Table) convertToV1280() {
- // Convert tag->metricID rows into tag->metricIDs rows when upgrading to v1.28.0+.
- flagFilePath := tb.path + "/" + convertToV1280FileName
- if fs.IsPathExist(flagFilePath) {
- // The conversion has been already performed.
- return
- }
-
- getAllPartsForMerge := func() []*partWrapper {
- var pws []*partWrapper
- tb.partsLock.Lock()
- for _, pw := range tb.parts {
- if pw.isInMerge {
- continue
+func (tb *Table) pendingItemsFlusher() {
+ ticker := time.NewTicker(pendingItemsFlushInterval)
+ defer ticker.Stop()
+ var ibs []*inmemoryBlock
+ for {
+ select {
+ case <-tb.stopCh:
+ return
+ case <-ticker.C:
+ ibs = tb.flushPendingItems(ibs[:0], false)
+ for i := range ibs {
+ ibs[i] = nil
+ }
+ }
+ }
+}
+
+func (tb *Table) flushPendingItems(dst []*inmemoryBlock, isFinal bool) []*inmemoryBlock {
+ return tb.rawItems.flush(tb, dst, isFinal)
+}
+
+func (tb *Table) flushInmemoryItems() {
+ tb.rawItems.flush(tb, nil, true)
+ tb.flushInmemoryParts(true)
+}
+
+func (tb *Table) flushInmemoryParts(isFinal bool) {
+ for {
+ currentTime := time.Now()
+ var pws []*partWrapper
+
+ tb.partsLock.Lock()
+ for _, pw := range tb.inmemoryParts {
+ if !pw.isInMerge && (isFinal || pw.flushToDiskDeadline.Before(currentTime)) {
+ pw.isInMerge = true
+ pws = append(pws, pw)
}
- pw.isInMerge = true
- pws = append(pws, pw)
}
tb.partsLock.Unlock()
- return pws
- }
- pws := getAllPartsForMerge()
- if len(pws) > 0 {
- logger.Infof("started round 1 of background conversion of %q to v1.28.0 format; merge %d parts", tb.path, len(pws))
- startTime := time.Now()
- if err := tb.mergePartsOptimal(pws, tb.stopCh); err != nil {
- logger.Errorf("failed round 1 of background conversion of %q to v1.28.0 format: %s", tb.path, err)
+
+ if err := tb.mergePartsOptimal(pws); err != nil {
+ logger.Panicf("FATAL: cannot merge in-memory parts: %s", err)
+ }
+ if !isFinal {
return
}
- logger.Infof("finished round 1 of background conversion of %q to v1.28.0 format in %.3f seconds", tb.path, time.Since(startTime).Seconds())
-
- // The second round is needed in order to merge small blocks
- // with tag->metricIDs rows left after the first round.
- pws = getAllPartsForMerge()
- logger.Infof("started round 2 of background conversion of %q to v1.28.0 format; merge %d parts", tb.path, len(pws))
- startTime = time.Now()
- if len(pws) > 0 {
- if err := tb.mergePartsOptimal(pws, tb.stopCh); err != nil {
- logger.Errorf("failed round 2 of background conversion of %q to v1.28.0 format: %s", tb.path, err)
- return
- }
+ tb.partsLock.Lock()
+ n := len(tb.inmemoryParts)
+ tb.partsLock.Unlock()
+ if n == 0 {
+ // All the in-memory parts were flushed to disk.
+ return
}
- logger.Infof("finished round 2 of background conversion of %q to v1.28.0 format in %.3f seconds", tb.path, time.Since(startTime).Seconds())
- }
-
- if err := fs.WriteFileAtomically(flagFilePath, []byte("ok"), false); err != nil {
- logger.Panicf("FATAL: cannot create %q: %s", flagFilePath, err)
+ // Some parts weren't flushed to disk because they were being merged.
+ // Sleep for a while and try flushing them again.
+ time.Sleep(10 * time.Millisecond)
}
}
-func (tb *Table) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error {
- for len(pws) > defaultPartsToMerge {
- pwsChunk := pws[:defaultPartsToMerge]
- pws = pws[defaultPartsToMerge:]
- if err := tb.mergeParts(pwsChunk, stopCh, false); err != nil {
- tb.releasePartsToMerge(pws)
- return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err)
- }
- }
- if len(pws) == 0 {
- return nil
- }
- if err := tb.mergeParts(pws, stopCh, false); err != nil {
- return fmt.Errorf("cannot merge %d parts: %w", len(pws), err)
- }
- return nil
-}
-
-// DebugFlush flushes all the added items to the storage,
-// so they become visible to search.
-//
-// This function is only for debugging and testing.
-func (tb *Table) DebugFlush() {
- tb.flushRawItems(true)
-
- // Wait for background flushers to finish.
- tb.rawItemsPendingFlushesWG.Wait()
-}
-
-func (tb *Table) flushRawItems(isFinal bool) {
- tb.rawItems.flush(tb, isFinal)
-}
-
-func (riss *rawItemsShards) flush(tb *Table, isFinal bool) {
+func (riss *rawItemsShards) flush(tb *Table, dst []*inmemoryBlock, isFinal bool) []*inmemoryBlock {
tb.rawItemsPendingFlushesWG.Add(1)
defer tb.rawItemsPendingFlushesWG.Done()
- var blocksToFlush []*inmemoryBlock
for i := range riss.shards {
- blocksToFlush = riss.shards[i].appendBlocksToFlush(blocksToFlush, tb, isFinal)
+ dst = riss.shards[i].appendBlocksToFlush(dst, tb, isFinal)
}
- tb.mergeRawItemsBlocks(blocksToFlush, isFinal)
+ tb.flushBlocksToParts(dst, isFinal)
+ return dst
}
func (ris *rawItemsShard) appendBlocksToFlush(dst []*inmemoryBlock, tb *Table, isFinal bool) []*inmemoryBlock {
currentTime := fasttime.UnixTimestamp()
- flushSeconds := int64(rawItemsFlushInterval.Seconds())
+ flushSeconds := int64(pendingItemsFlushInterval.Seconds())
if flushSeconds <= 0 {
flushSeconds = 1
}
lastFlushTime := atomic.LoadUint64(&ris.lastFlushTime)
- if !isFinal && currentTime <= lastFlushTime+uint64(flushSeconds) {
+ if !isFinal && currentTime < lastFlushTime+uint64(flushSeconds) {
// Fast path - nothing to flush
return dst
}
@@ -653,29 +701,29 @@ func (ris *rawItemsShard) appendBlocksToFlush(dst []*inmemoryBlock, tb *Table, i
return dst
}
-func (tb *Table) mergeRawItemsBlocks(ibs []*inmemoryBlock, isFinal bool) {
+func (tb *Table) flushBlocksToParts(ibs []*inmemoryBlock, isFinal bool) {
if len(ibs) == 0 {
return
}
- tb.partMergersWG.Add(1)
- defer tb.partMergersWG.Done()
-
- pws := make([]*partWrapper, 0, (len(ibs)+defaultPartsToMerge-1)/defaultPartsToMerge)
var pwsLock sync.Mutex
- var wg sync.WaitGroup
+ pws := make([]*partWrapper, 0, (len(ibs)+defaultPartsToMerge-1)/defaultPartsToMerge)
+ wg := getWaitGroup()
for len(ibs) > 0 {
n := defaultPartsToMerge
if n > len(ibs) {
n = len(ibs)
}
wg.Add(1)
- go func(ibsPart []*inmemoryBlock) {
- defer wg.Done()
- pw := tb.mergeInmemoryBlocks(ibsPart)
+ flushConcurrencyCh <- struct{}{}
+ go func(ibsChunk []*inmemoryBlock) {
+ defer func() {
+ <-flushConcurrencyCh
+ wg.Done()
+ }()
+ pw := tb.createInmemoryPart(ibsChunk)
if pw == nil {
return
}
- pw.isInMerge = true
pwsLock.Lock()
pws = append(pws, pw)
pwsLock.Unlock()
@@ -683,48 +731,82 @@ func (tb *Table) mergeRawItemsBlocks(ibs []*inmemoryBlock, isFinal bool) {
ibs = ibs[n:]
}
wg.Wait()
- if len(pws) > 0 {
- if err := tb.mergeParts(pws, nil, true); err != nil {
- logger.Panicf("FATAL: cannot merge raw parts: %s", err)
- }
- if tb.flushCallback != nil {
- if isFinal {
- tb.flushCallback()
- } else {
- atomic.CompareAndSwapUint32(&tb.needFlushCallbackCall, 0, 1)
- }
+ putWaitGroup(wg)
+
+ tb.partsLock.Lock()
+ tb.inmemoryParts = append(tb.inmemoryParts, pws...)
+ tb.partsLock.Unlock()
+
+ flushConcurrencyCh <- struct{}{}
+ tb.assistedMergeForInmemoryParts()
+ <-flushConcurrencyCh
+ // There is no need in assited merge for file parts,
+ // since the bottleneck is possible only at inmemory parts.
+
+ if tb.flushCallback != nil {
+ if isFinal {
+ tb.flushCallback()
+ } else {
+ atomic.CompareAndSwapUint32(&tb.needFlushCallbackCall, 0, 1)
}
}
+}
+var flushConcurrencyCh = make(chan struct{}, cgroup.AvailableCPUs())
+
+func (tb *Table) assistedMergeForInmemoryParts() {
for {
tb.partsLock.Lock()
- ok := len(tb.parts) <= maxParts
+ ok := getNotInMergePartsCount(tb.inmemoryParts) < maxInmemoryParts
tb.partsLock.Unlock()
if ok {
return
}
- // The added part exceeds maxParts count. Assist with merging other parts.
- //
// Prioritize assisted merges over searches.
storagepacelimiter.Search.Inc()
- err := tb.mergeExistingParts(false)
+ err := tb.mergeInmemoryParts()
storagepacelimiter.Search.Dec()
if err == nil {
- atomic.AddUint64(&tb.assistedMerges, 1)
+ atomic.AddUint64(&tb.assistedInmemoryMerges, 1)
continue
}
- if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) {
+ if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
return
}
- logger.Panicf("FATAL: cannot merge small parts: %s", err)
+ logger.Panicf("FATAL: cannot assist with merging inmemory parts: %s", err)
}
}
-func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper {
- atomic.AddUint64(&tb.mergesCount, 1)
- atomic.AddUint64(&tb.activeMerges, 1)
- defer atomic.AddUint64(&tb.activeMerges, ^uint64(0))
+func getNotInMergePartsCount(pws []*partWrapper) int {
+ n := 0
+ for _, pw := range pws {
+ if !pw.isInMerge {
+ n++
+ }
+ }
+ return n
+}
+
+func getWaitGroup() *sync.WaitGroup {
+ v := wgPool.Get()
+ if v == nil {
+ return &sync.WaitGroup{}
+ }
+ return v.(*sync.WaitGroup)
+}
+
+func putWaitGroup(wg *sync.WaitGroup) {
+ wgPool.Put(wg)
+}
+
+var wgPool sync.Pool
+
+func (tb *Table) createInmemoryPart(ibs []*inmemoryBlock) *partWrapper {
+ outItemsCount := uint64(0)
+ for _, ib := range ibs {
+ outItemsCount += uint64(ib.Len())
+ }
// Prepare blockStreamReaders for source blocks.
bsrs := make([]*blockStreamReader, 0, len(ibs))
@@ -740,27 +822,29 @@ func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper {
if len(bsrs) == 0 {
return nil
}
+ flushToDiskDeadline := time.Now().Add(dataFlushInterval)
if len(bsrs) == 1 {
// Nothing to merge. Just return a single inmemory part.
+ bsr := bsrs[0]
mp := &inmemoryPart{}
- mp.Init(&bsrs[0].Block)
- p := mp.NewPart()
- return &partWrapper{
- p: p,
- mp: mp,
- refCount: 1,
- }
+ mp.Init(&bsr.Block)
+ putBlockStreamReader(bsr)
+ return newPartWrapperFromInmemoryPart(mp, flushToDiskDeadline)
}
// Prepare blockStreamWriter for destination part.
+ compressLevel := getCompressLevel(outItemsCount)
bsw := getBlockStreamWriter()
mpDst := &inmemoryPart{}
- bsw.InitFromInmemoryPart(mpDst)
+ bsw.InitFromInmemoryPart(mpDst, compressLevel)
// Merge parts.
// The merge shouldn't be interrupted by stopCh,
// since it may be final after stopCh is closed.
- err := mergeBlockStreams(&mpDst.ph, bsw, bsrs, tb.prepareBlock, nil, &tb.itemsMerged)
+ atomic.AddUint64(&tb.activeInmemoryMerges, 1)
+ err := mergeBlockStreams(&mpDst.ph, bsw, bsrs, tb.prepareBlock, nil, &tb.inmemoryItemsMerged)
+ atomic.AddUint64(&tb.activeInmemoryMerges, ^uint64(0))
+ atomic.AddUint64(&tb.inmemoryMergesCount, 1)
if err != nil {
logger.Panicf("FATAL: cannot merge inmemoryBlocks: %s", err)
}
@@ -768,33 +852,64 @@ func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper {
for _, bsr := range bsrs {
putBlockStreamReader(bsr)
}
+ return newPartWrapperFromInmemoryPart(mpDst, flushToDiskDeadline)
+}
- p := mpDst.NewPart()
+func newPartWrapperFromInmemoryPart(mp *inmemoryPart, flushToDiskDeadline time.Time) *partWrapper {
+ p := mp.NewPart()
return &partWrapper{
- p: p,
- mp: mpDst,
- refCount: 1,
+ p: p,
+ mp: mp,
+ refCount: 1,
+ flushToDiskDeadline: flushToDiskDeadline,
}
}
-func (tb *Table) startPartMergers() {
- for i := 0; i < mergeWorkersCount; i++ {
- tb.partMergersWG.Add(1)
+func (tb *Table) startMergeWorkers() {
+ for i := 0; i < cap(mergeWorkersLimitCh); i++ {
+ tb.wg.Add(1)
go func() {
- if err := tb.partMerger(); err != nil {
- logger.Panicf("FATAL: unrecoverable error when merging parts in %q: %s", tb.path, err)
- }
- tb.partMergersWG.Done()
+ tb.mergeWorker()
+ tb.wg.Done()
}()
}
}
+func getMaxInmemoryPartSize() uint64 {
+ // Allow up to 5% of memory for in-memory parts.
+ n := uint64(0.05 * float64(memory.Allowed()) / maxInmemoryParts)
+ if n < 1e6 {
+ n = 1e6
+ }
+ return n
+}
+
+func (tb *Table) getMaxFilePartSize() uint64 {
+ n := fs.MustGetFreeSpace(tb.path)
+ // Divide free space by the max number of concurrent merges.
+ maxOutBytes := n / uint64(cap(mergeWorkersLimitCh))
+ if maxOutBytes > maxPartSize {
+ maxOutBytes = maxPartSize
+ }
+ return maxOutBytes
+}
+
func (tb *Table) canBackgroundMerge() bool {
return atomic.LoadUint32(tb.isReadOnly) == 0
}
var errReadOnlyMode = fmt.Errorf("storage is in readonly mode")
+func (tb *Table) mergeInmemoryParts() error {
+ maxOutBytes := tb.getMaxFilePartSize()
+
+ tb.partsLock.Lock()
+ pws := getPartsToMerge(tb.inmemoryParts, maxOutBytes, false)
+ tb.partsLock.Unlock()
+
+ return tb.mergeParts(pws, tb.stopCh, false)
+}
+
func (tb *Table) mergeExistingParts(isFinal bool) error {
if !tb.canBackgroundMerge() {
// Do not perform background merge in read-only mode
@@ -802,32 +917,32 @@ func (tb *Table) mergeExistingParts(isFinal bool) error {
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
return errReadOnlyMode
}
- n := fs.MustGetFreeSpace(tb.path)
- // Divide free space by the max number of concurrent merges.
- maxOutBytes := n / uint64(mergeWorkersCount)
- if maxOutBytes > maxPartSize {
- maxOutBytes = maxPartSize
- }
+ maxOutBytes := tb.getMaxFilePartSize()
tb.partsLock.Lock()
- pws := getPartsToMerge(tb.parts, maxOutBytes, isFinal)
+ dst := make([]*partWrapper, 0, len(tb.inmemoryParts)+len(tb.fileParts))
+ dst = append(dst, tb.inmemoryParts...)
+ dst = append(dst, tb.fileParts...)
+ pws := getPartsToMerge(dst, maxOutBytes, isFinal)
tb.partsLock.Unlock()
- return tb.mergeParts(pws, tb.stopCh, false)
+ return tb.mergeParts(pws, tb.stopCh, isFinal)
}
const (
- minMergeSleepTime = time.Millisecond
- maxMergeSleepTime = time.Second
+ minMergeSleepTime = 10 * time.Millisecond
+ maxMergeSleepTime = 10 * time.Second
)
-func (tb *Table) partMerger() error {
+func (tb *Table) mergeWorker() {
sleepTime := minMergeSleepTime
var lastMergeTime uint64
isFinal := false
t := time.NewTimer(sleepTime)
for {
+ mergeWorkersLimitCh <- struct{}{}
err := tb.mergeExistingParts(isFinal)
+ <-mergeWorkersLimitCh
if err == nil {
// Try merging additional parts.
sleepTime = minMergeSleepTime
@@ -837,12 +952,13 @@ func (tb *Table) partMerger() error {
}
if errors.Is(err, errForciblyStopped) {
// The merger has been stopped.
- return nil
+ return
}
if !errors.Is(err, errNothingToMerge) && !errors.Is(err, errReadOnlyMode) {
- return err
+ // Unexpected error.
+ logger.Panicf("FATAL: unrecoverable error when merging inmemory parts in %q: %s", tb.path, err)
}
- if fasttime.UnixTimestamp()-lastMergeTime > 30 {
+ if finalMergeDelaySeconds > 0 && fasttime.UnixTimestamp()-lastMergeTime > finalMergeDelaySeconds {
// We have free time for merging into bigger parts.
// This should improve select performance.
lastMergeTime = fasttime.UnixTimestamp()
@@ -857,13 +973,27 @@ func (tb *Table) partMerger() error {
}
select {
case <-tb.stopCh:
- return nil
+ return
case <-t.C:
t.Reset(sleepTime)
}
}
}
+// Disable final merge by default, since it may lead to high disk IO and CPU usage
+// after some inactivity time.
+var finalMergeDelaySeconds = uint64(0)
+
+// SetFinalMergeDelay sets the delay before doing final merge for Table without newly ingested data.
+//
+// This function may be called only before Table initialization.
+func SetFinalMergeDelay(delay time.Duration) {
+ if delay <= 0 {
+ return
+ }
+ finalMergeDelaySeconds = uint64(delay.Seconds() + 1)
+}
+
var errNothingToMerge = fmt.Errorf("nothing to merge")
func (tb *Table) releasePartsToMerge(pws []*partWrapper) {
@@ -877,155 +1007,318 @@ func (tb *Table) releasePartsToMerge(pws []*partWrapper) {
tb.partsLock.Unlock()
}
-// mergeParts merges pws.
+// mergeParts merges pws to a single resulting part.
//
// Merging is immediately stopped if stopCh is closed.
//
+// If isFinal is set, then the resulting part will be stored to disk.
+//
// All the parts inside pws must have isInMerge field set to true.
-func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterParts bool) error {
+func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isFinal bool) error {
if len(pws) == 0 {
// Nothing to merge.
return errNothingToMerge
}
defer tb.releasePartsToMerge(pws)
- atomic.AddUint64(&tb.mergesCount, 1)
- atomic.AddUint64(&tb.activeMerges, 1)
- defer atomic.AddUint64(&tb.activeMerges, ^uint64(0))
-
startTime := time.Now()
- // Prepare blockStreamReaders for source parts.
- bsrs := make([]*blockStreamReader, 0, len(pws))
- defer func() {
+ // Initialize destination paths.
+ dstPartType := getDstPartType(pws, isFinal)
+ tmpPartPath, mergeIdx := tb.getDstPartPaths(dstPartType)
+
+ if isFinal && len(pws) == 1 && pws[0].mp != nil {
+ // Fast path: flush a single in-memory part to disk.
+ mp := pws[0].mp
+ if tmpPartPath == "" {
+ logger.Panicf("BUG: tmpPartPath must be non-empty")
+ }
+ if err := mp.StoreToDisk(tmpPartPath); err != nil {
+ return fmt.Errorf("cannot store in-memory part to %q: %w", tmpPartPath, err)
+ }
+ pwNew, err := tb.openCreatedPart(&mp.ph, pws, nil, tmpPartPath, mergeIdx)
+ if err != nil {
+ return fmt.Errorf("cannot atomically register the created part: %w", err)
+ }
+ tb.swapSrcWithDstParts(pws, pwNew, dstPartType)
+ return nil
+ }
+
+ // Prepare BlockStreamReaders for source parts.
+ bsrs, err := openBlockStreamReaders(pws)
+ if err != nil {
+ return err
+ }
+ closeBlockStreamReaders := func() {
for _, bsr := range bsrs {
putBlockStreamReader(bsr)
}
- }()
+ bsrs = nil
+ }
+
+ // Prepare BlockStreamWriter for destination part.
+ srcSize := uint64(0)
+ srcItemsCount := uint64(0)
+ srcBlocksCount := uint64(0)
+ for _, pw := range pws {
+ srcSize += pw.p.size
+ srcItemsCount += pw.p.ph.itemsCount
+ srcBlocksCount += pw.p.ph.blocksCount
+ }
+ compressLevel := getCompressLevel(srcItemsCount)
+ bsw := getBlockStreamWriter()
+ var mpNew *inmemoryPart
+ if dstPartType == partInmemory {
+ mpNew = &inmemoryPart{}
+ bsw.InitFromInmemoryPart(mpNew, compressLevel)
+ } else {
+ if tmpPartPath == "" {
+ logger.Panicf("BUG: tmpPartPath must be non-empty")
+ }
+ nocache := srcItemsCount > maxItemsPerCachedPart()
+ if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil {
+ closeBlockStreamReaders()
+ return fmt.Errorf("cannot create destination part at %q: %w", tmpPartPath, err)
+ }
+ }
+
+ // Merge source parts to destination part.
+ ph, err := tb.mergePartsInternal(tmpPartPath, bsw, bsrs, dstPartType, stopCh)
+ putBlockStreamWriter(bsw)
+ closeBlockStreamReaders()
+ if err != nil {
+ return fmt.Errorf("cannot merge %d parts: %w", len(pws), err)
+ }
+ if mpNew != nil {
+ // Update partHeader for destination inmemory part after the merge.
+ mpNew.ph = *ph
+ }
+
+ // Atomically move the created part from tmpPartPath to its destination
+ // and swap the source parts with the newly created part.
+ pwNew, err := tb.openCreatedPart(ph, pws, mpNew, tmpPartPath, mergeIdx)
+ if err != nil {
+ return fmt.Errorf("cannot atomically register the created part: %w", err)
+ }
+ tb.swapSrcWithDstParts(pws, pwNew, dstPartType)
+
+ d := time.Since(startTime)
+ if d <= 30*time.Second {
+ return nil
+ }
+
+ // Log stats for long merges.
+ dstItemsCount := uint64(0)
+ dstBlocksCount := uint64(0)
+ dstSize := uint64(0)
+ dstPartPath := ""
+ if pwNew != nil {
+ pDst := pwNew.p
+ dstItemsCount = pDst.ph.itemsCount
+ dstBlocksCount = pDst.ph.blocksCount
+ dstSize = pDst.size
+ dstPartPath = pDst.path
+ }
+ durationSecs := d.Seconds()
+ itemsPerSec := int(float64(srcItemsCount) / durationSecs)
+ logger.Infof("merged (%d parts, %d items, %d blocks, %d bytes) into (1 part, %d items, %d blocks, %d bytes) in %.3f seconds at %d items/sec to %q",
+ len(pws), srcItemsCount, srcBlocksCount, srcSize, dstItemsCount, dstBlocksCount, dstSize, durationSecs, itemsPerSec, dstPartPath)
+
+ return nil
+}
+
+func getFlushToDiskDeadline(pws []*partWrapper) time.Time {
+ d := pws[0].flushToDiskDeadline
+ for _, pw := range pws[1:] {
+ if pw.flushToDiskDeadline.Before(d) {
+ d = pw.flushToDiskDeadline
+ }
+ }
+ return d
+}
+
+type partType int
+
+var (
+ partInmemory = partType(0)
+ partFile = partType(1)
+)
+
+func getDstPartType(pws []*partWrapper, isFinal bool) partType {
+ dstPartSize := getPartsSize(pws)
+ if isFinal || dstPartSize > getMaxInmemoryPartSize() {
+ return partFile
+ }
+ if !areAllInmemoryParts(pws) {
+ // If at least a single source part is located in file,
+ // then the destination part must be in file for durability reasons.
+ return partFile
+ }
+ return partInmemory
+}
+
+func (tb *Table) getDstPartPaths(dstPartType partType) (string, uint64) {
+ tmpPartPath := ""
+ mergeIdx := tb.nextMergeIdx()
+ switch dstPartType {
+ case partInmemory:
+ case partFile:
+ tmpPartPath = fmt.Sprintf("%s/tmp/%016X", tb.path, mergeIdx)
+ default:
+ logger.Panicf("BUG: unknown partType=%d", dstPartType)
+ }
+ return tmpPartPath, mergeIdx
+}
+
+func openBlockStreamReaders(pws []*partWrapper) ([]*blockStreamReader, error) {
+ bsrs := make([]*blockStreamReader, 0, len(pws))
for _, pw := range pws {
bsr := getBlockStreamReader()
if pw.mp != nil {
- if !isOuterParts {
- logger.Panicf("BUG: inmemory part must be always outer")
- }
bsr.InitFromInmemoryPart(pw.mp)
} else {
if err := bsr.InitFromFilePart(pw.p.path); err != nil {
- return fmt.Errorf("cannot open source part for merging: %w", err)
+ for _, bsr := range bsrs {
+ putBlockStreamReader(bsr)
+ }
+ return nil, fmt.Errorf("cannot open source part for merging: %w", err)
}
}
bsrs = append(bsrs, bsr)
}
+ return bsrs, nil
+}
- outItemsCount := uint64(0)
- outBlocksCount := uint64(0)
- for _, pw := range pws {
- outItemsCount += pw.p.ph.itemsCount
- outBlocksCount += pw.p.ph.blocksCount
- }
- nocache := true
- if outItemsCount < maxItemsPerCachedPart() {
- // Cache small (i.e. recent) output parts in OS file cache,
- // since there is high chance they will be read soon.
- nocache = false
- }
-
- // Prepare blockStreamWriter for destination part.
- mergeIdx := tb.nextMergeIdx()
- tmpPartPath := fmt.Sprintf("%s/tmp/%016X", tb.path, mergeIdx)
- bsw := getBlockStreamWriter()
- compressLevel := getCompressLevelForPartItems(outItemsCount, outBlocksCount)
- if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil {
- return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err)
- }
-
- // Merge parts into a temporary location.
+func (tb *Table) mergePartsInternal(tmpPartPath string, bsw *blockStreamWriter, bsrs []*blockStreamReader, dstPartType partType, stopCh <-chan struct{}) (*partHeader, error) {
var ph partHeader
- err := mergeBlockStreams(&ph, bsw, bsrs, tb.prepareBlock, stopCh, &tb.itemsMerged)
- putBlockStreamWriter(bsw)
+ var itemsMerged *uint64
+ var mergesCount *uint64
+ var activeMerges *uint64
+ switch dstPartType {
+ case partInmemory:
+ itemsMerged = &tb.inmemoryItemsMerged
+ mergesCount = &tb.inmemoryMergesCount
+ activeMerges = &tb.activeInmemoryMerges
+ case partFile:
+ itemsMerged = &tb.fileItemsMerged
+ mergesCount = &tb.fileMergesCount
+ activeMerges = &tb.activeFileMerges
+ default:
+ logger.Panicf("BUG: unknown partType=%d", dstPartType)
+ }
+ atomic.AddUint64(activeMerges, 1)
+ err := mergeBlockStreams(&ph, bsw, bsrs, tb.prepareBlock, stopCh, itemsMerged)
+ atomic.AddUint64(activeMerges, ^uint64(0))
+ atomic.AddUint64(mergesCount, 1)
if err != nil {
- return fmt.Errorf("error when merging parts to %q: %w", tmpPartPath, err)
+ return nil, fmt.Errorf("cannot merge parts to %q: %w", tmpPartPath, err)
}
- if err := ph.WriteMetadata(tmpPartPath); err != nil {
- return fmt.Errorf("cannot write metadata to destination part %q: %w", tmpPartPath, err)
- }
-
- // Close bsrs (aka source parts).
- for _, bsr := range bsrs {
- putBlockStreamReader(bsr)
- }
- bsrs = nil
-
- // Create a transaction for atomic deleting old parts and moving
- // new part to its destination place.
- var bb bytesutil.ByteBuffer
- for _, pw := range pws {
- if pw.mp == nil {
- fmt.Fprintf(&bb, "%s\n", pw.p.path)
+ if tmpPartPath != "" {
+ if err := ph.WriteMetadata(tmpPartPath); err != nil {
+ return nil, fmt.Errorf("cannot write metadata to destination part %q: %w", tmpPartPath, err)
}
}
- dstPartPath := ph.Path(tb.path, mergeIdx)
- fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath)
- txnPath := fmt.Sprintf("%s/txn/%016X", tb.path, mergeIdx)
- if err := fs.WriteFileAtomically(txnPath, bb.B, false); err != nil {
- return fmt.Errorf("cannot create transaction file %q: %w", txnPath, err)
- }
+ return &ph, nil
+}
- // Run the created transaction.
- if err := runTransaction(&tb.snapshotLock, tb.path, txnPath); err != nil {
- return fmt.Errorf("cannot execute transaction %q: %w", txnPath, err)
- }
+func (tb *Table) openCreatedPart(ph *partHeader, pws []*partWrapper, mpNew *inmemoryPart, tmpPartPath string, mergeIdx uint64) (*partWrapper, error) {
+ dstPartPath := ""
+ if mpNew == nil || !areAllInmemoryParts(pws) {
+ // Either source or destination parts are located on disk.
+ // Create a transaction for atomic deleting of old parts and moving new part to its destination on disk.
+ var bb bytesutil.ByteBuffer
+ for _, pw := range pws {
+ if pw.mp == nil {
+ fmt.Fprintf(&bb, "%s\n", pw.p.path)
+ }
+ }
+ dstPartPath = ph.Path(tb.path, mergeIdx)
+ fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath)
+ txnPath := fmt.Sprintf("%s/txn/%016X", tb.path, mergeIdx)
+ if err := fs.WriteFileAtomically(txnPath, bb.B, false); err != nil {
+ return nil, fmt.Errorf("cannot create transaction file %q: %w", txnPath, err)
+ }
- // Open the merged part.
- newP, err := openFilePart(dstPartPath)
+ // Run the created transaction.
+ if err := runTransaction(&tb.snapshotLock, tb.path, txnPath); err != nil {
+ return nil, fmt.Errorf("cannot execute transaction %q: %w", txnPath, err)
+ }
+ }
+ // Open the created part.
+ if mpNew != nil {
+ // Open the created part from memory.
+ flushToDiskDeadline := getFlushToDiskDeadline(pws)
+ pwNew := newPartWrapperFromInmemoryPart(mpNew, flushToDiskDeadline)
+ return pwNew, nil
+ }
+ // Open the created part from disk.
+ pNew, err := openFilePart(dstPartPath)
if err != nil {
- return fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err)
+ return nil, fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err)
}
- newPSize := newP.size
- newPW := &partWrapper{
- p: newP,
+ pwNew := &partWrapper{
+ p: pNew,
refCount: 1,
}
+ return pwNew, nil
+}
- // Atomically remove old parts and add new part.
+func areAllInmemoryParts(pws []*partWrapper) bool {
+ for _, pw := range pws {
+ if pw.mp == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func (tb *Table) swapSrcWithDstParts(pws []*partWrapper, pwNew *partWrapper, dstPartType partType) {
+ // Atomically unregister old parts and add new part to tb.
m := make(map[*partWrapper]bool, len(pws))
for _, pw := range pws {
m[pw] = true
}
if len(m) != len(pws) {
- logger.Panicf("BUG: %d duplicate parts found in the merge of %d parts", len(pws)-len(m), len(pws))
+ logger.Panicf("BUG: %d duplicate parts found when merging %d parts", len(pws)-len(m), len(pws))
}
- removedParts := 0
+ removedInmemoryParts := 0
+ removedFileParts := 0
+
tb.partsLock.Lock()
- tb.parts, removedParts = removeParts(tb.parts, m)
- tb.parts = append(tb.parts, newPW)
+ tb.inmemoryParts, removedInmemoryParts = removeParts(tb.inmemoryParts, m)
+ tb.fileParts, removedFileParts = removeParts(tb.fileParts, m)
+ if pwNew != nil {
+ switch dstPartType {
+ case partInmemory:
+ tb.inmemoryParts = append(tb.inmemoryParts, pwNew)
+ case partFile:
+ tb.fileParts = append(tb.fileParts, pwNew)
+ default:
+ logger.Panicf("BUG: unknown partType=%d", dstPartType)
+ }
+ }
tb.partsLock.Unlock()
+
+ removedParts := removedInmemoryParts + removedFileParts
if removedParts != len(m) {
- if !isOuterParts {
- logger.Panicf("BUG: unexpected number of parts removed; got %d; want %d", removedParts, len(m))
- }
- if removedParts != 0 {
- logger.Panicf("BUG: removed non-zero outer parts: %d", removedParts)
- }
+ logger.Panicf("BUG: unexpected number of parts removed; got %d, want %d", removedParts, len(m))
}
- // Remove partition references from old parts.
+ // Remove references from old parts.
for _, pw := range pws {
pw.decRef()
}
-
- d := time.Since(startTime)
- if d > 30*time.Second {
- logger.Infof("merged %d items across %d blocks in %.3f seconds at %d items/sec to %q; sizeBytes: %d",
- outItemsCount, outBlocksCount, d.Seconds(), int(float64(outItemsCount)/d.Seconds()), dstPartPath, newPSize)
- }
-
- return nil
}
-func getCompressLevelForPartItems(itemsCount, blocksCount uint64) int {
- // There is no need in using blocksCount here, since mergeset blocks are usually full.
+func getPartsSize(pws []*partWrapper) uint64 {
+ n := uint64(0)
+ for _, pw := range pws {
+ n += pw.p.size
+ }
+ return n
+}
+func getCompressLevel(itemsCount uint64) int {
if itemsCount <= 1<<16 {
// -5 is the minimum supported compression for zstd.
// See https://github.com/facebook/zstd/releases/tag/v1.3.4
@@ -1059,7 +1352,7 @@ func (tb *Table) nextMergeIdx() uint64 {
return atomic.AddUint64(&tb.mergeIdx, 1)
}
-var mergeWorkersCount = cgroup.AvailableCPUs()
+var mergeWorkersLimitCh = make(chan struct{}, cgroup.AvailableCPUs())
func openParts(path string) ([]*partWrapper, error) {
// The path can be missing after restoring from backup, so create it if needed.
@@ -1164,7 +1457,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
}
// Flush inmemory items to disk.
- tb.flushRawItems(true)
+ tb.flushInmemoryItems()
// The snapshot must be created under the lock in order to prevent from
// concurrent modifications via runTransaction.
@@ -1188,16 +1481,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
for _, fi := range fis {
fn := fi.Name()
if !fs.IsDirOrSymlink(fi) {
- switch fn {
- case convertToV1280FileName:
- srcPath := srcDir + "/" + fn
- dstPath := dstDir + "/" + fn
- if err := os.Link(srcPath, dstPath); err != nil {
- return fmt.Errorf("cannot hard link from %q to %q: %w", srcPath, dstPath, err)
- }
- default:
- // Skip other non-directories.
- }
+ // Skip non-directories.
continue
}
if isSpecialDir(fn) {
@@ -1232,7 +1516,7 @@ func runTransactions(txnLock *sync.RWMutex, path string) error {
if os.IsNotExist(err) {
return nil
}
- return fmt.Errorf("cannot open %q: %w", txnDir, err)
+ return fmt.Errorf("cannot open transaction dir: %w", err)
}
defer fs.MustClose(d)
@@ -1412,8 +1696,7 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte
}
src = tmp
- // Sort src parts by size.
- sort.Slice(src, func(i, j int) bool { return src[i].p.size < src[j].p.size })
+ sortPartsForOptimalMerge(src)
maxSrcParts := maxPartsToMerge
if maxSrcParts > len(src) {
@@ -1464,17 +1747,24 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte
return append(dst, pws...)
}
+func sortPartsForOptimalMerge(pws []*partWrapper) {
+ // Sort src parts by size.
+ sort.Slice(pws, func(i, j int) bool {
+ return pws[i].p.size < pws[j].p.size
+ })
+}
+
func removeParts(pws []*partWrapper, partsToRemove map[*partWrapper]bool) ([]*partWrapper, int) {
- removedParts := 0
dst := pws[:0]
for _, pw := range pws {
if !partsToRemove[pw] {
dst = append(dst, pw)
- continue
}
- removedParts++
}
- return dst, removedParts
+ for i := len(dst); i < len(pws); i++ {
+ pws[i] = nil
+ }
+ return dst, len(pws) - len(dst)
}
func isSpecialDir(name string) bool {
diff --git a/lib/mergeset/table_search_test.go b/lib/mergeset/table_search_test.go
index 249aa3109a..f0ec1f8882 100644
--- a/lib/mergeset/table_search_test.go
+++ b/lib/mergeset/table_search_test.go
@@ -161,9 +161,7 @@ func newTestTable(path string, itemsCount int) (*Table, []string, error) {
items := make([]string, itemsCount)
for i := 0; i < itemsCount; i++ {
item := fmt.Sprintf("%d:%d", rand.Intn(1e9), i)
- if err := tb.AddItems([][]byte{[]byte(item)}); err != nil {
- return nil, nil, fmt.Errorf("cannot add item: %w", err)
- }
+ tb.AddItems([][]byte{[]byte(item)})
items[i] = item
}
tb.DebugFlush()
diff --git a/lib/mergeset/table_test.go b/lib/mergeset/table_test.go
index f99ab937c2..eff20bcb5f 100644
--- a/lib/mergeset/table_test.go
+++ b/lib/mergeset/table_test.go
@@ -7,8 +7,6 @@ import (
"sync"
"sync/atomic"
"testing"
-
- "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
func TestTableOpenClose(t *testing.T) {
@@ -31,7 +29,7 @@ func TestTableOpenClose(t *testing.T) {
tb.MustClose()
// Re-open created table multiple times.
- for i := 0; i < 10; i++ {
+ for i := 0; i < 4; i++ {
tb, err := OpenTable(path, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open created table: %s", err)
@@ -53,7 +51,7 @@ func TestTableOpenMultipleTimes(t *testing.T) {
}
defer tb1.MustClose()
- for i := 0; i < 10; i++ {
+ for i := 0; i < 4; i++ {
tb2, err := OpenTable(path, nil, nil, &isReadOnly)
if err == nil {
tb2.MustClose()
@@ -62,8 +60,8 @@ func TestTableOpenMultipleTimes(t *testing.T) {
}
}
-func TestTableAddItemSerial(t *testing.T) {
- const path = "TestTableAddItemSerial"
+func TestTableAddItemsSerial(t *testing.T) {
+ const path = "TestTableAddItemsSerial"
if err := os.RemoveAll(path); err != nil {
t.Fatalf("cannot remove %q: %s", path, err)
}
@@ -81,7 +79,7 @@ func TestTableAddItemSerial(t *testing.T) {
t.Fatalf("cannot open %q: %s", path, err)
}
- const itemsCount = 1e5
+ const itemsCount = 10e3
testAddItemsSerial(tb, itemsCount)
// Verify items count after pending items flush.
@@ -92,13 +90,13 @@ func TestTableAddItemSerial(t *testing.T) {
var m TableMetrics
tb.UpdateMetrics(&m)
- if m.ItemsCount != itemsCount {
- t.Fatalf("unexpected itemsCount; got %d; want %v", m.ItemsCount, itemsCount)
+ if n := m.TotalItemsCount(); n != itemsCount {
+ t.Fatalf("unexpected itemsCount; got %d; want %v", n, itemsCount)
}
tb.MustClose()
- // Re-open the table and make sure ItemsCount remains the same.
+ // Re-open the table and make sure itemsCount remains the same.
testReopenTable(t, path, itemsCount)
// Add more items in order to verify merge between inmemory parts and file-based parts.
@@ -110,7 +108,7 @@ func TestTableAddItemSerial(t *testing.T) {
testAddItemsSerial(tb, moreItemsCount)
tb.MustClose()
- // Re-open the table and verify ItemsCount again.
+ // Re-open the table and verify itemsCount again.
testReopenTable(t, path, itemsCount+moreItemsCount)
}
@@ -120,9 +118,7 @@ func testAddItemsSerial(tb *Table, itemsCount int) {
if len(item) > maxInmemoryBlockSize {
item = item[:maxInmemoryBlockSize]
}
- if err := tb.AddItems([][]byte{item}); err != nil {
- logger.Panicf("BUG: cannot add item to table: %s", err)
- }
+ tb.AddItems([][]byte{item})
}
}
@@ -146,9 +142,7 @@ func TestTableCreateSnapshotAt(t *testing.T) {
const itemsCount = 3e5
for i := 0; i < itemsCount; i++ {
item := []byte(fmt.Sprintf("item %d", i))
- if err := tb.AddItems([][]byte{item}); err != nil {
- t.Fatalf("cannot add item to table: %s", err)
- }
+ tb.AddItems([][]byte{item})
}
tb.DebugFlush()
@@ -221,9 +215,7 @@ func TestTableAddItemsConcurrent(t *testing.T) {
flushCallback := func() {
atomic.AddUint64(&flushes, 1)
}
- var itemsMerged uint64
prepareBlock := func(data []byte, items []Item) ([]byte, []Item) {
- atomic.AddUint64(&itemsMerged, uint64(len(items)))
return data, items
}
var isReadOnly uint32
@@ -232,7 +224,7 @@ func TestTableAddItemsConcurrent(t *testing.T) {
t.Fatalf("cannot open %q: %s", path, err)
}
- const itemsCount = 1e5
+ const itemsCount = 10e3
testAddItemsConcurrent(tb, itemsCount)
// Verify items count after pending items flush.
@@ -240,20 +232,16 @@ func TestTableAddItemsConcurrent(t *testing.T) {
if atomic.LoadUint64(&flushes) == 0 {
t.Fatalf("unexpected zero flushes")
}
- n := atomic.LoadUint64(&itemsMerged)
- if n < itemsCount {
- t.Fatalf("too low number of items merged; got %v; must be at least %v", n, itemsCount)
- }
var m TableMetrics
tb.UpdateMetrics(&m)
- if m.ItemsCount != itemsCount {
- t.Fatalf("unexpected itemsCount; got %d; want %v", m.ItemsCount, itemsCount)
+ if n := m.TotalItemsCount(); n != itemsCount {
+ t.Fatalf("unexpected itemsCount; got %d; want %v", n, itemsCount)
}
tb.MustClose()
- // Re-open the table and make sure ItemsCount remains the same.
+ // Re-open the table and make sure itemsCount remains the same.
testReopenTable(t, path, itemsCount)
// Add more items in order to verify merge between inmemory parts and file-based parts.
@@ -265,7 +253,7 @@ func TestTableAddItemsConcurrent(t *testing.T) {
testAddItemsConcurrent(tb, moreItemsCount)
tb.MustClose()
- // Re-open the table and verify ItemsCount again.
+ // Re-open the table and verify itemsCount again.
testReopenTable(t, path, itemsCount+moreItemsCount)
}
@@ -282,9 +270,7 @@ func testAddItemsConcurrent(tb *Table, itemsCount int) {
if len(item) > maxInmemoryBlockSize {
item = item[:maxInmemoryBlockSize]
}
- if err := tb.AddItems([][]byte{item}); err != nil {
- logger.Panicf("BUG: cannot add item to table: %s", err)
- }
+ tb.AddItems([][]byte{item})
}
}()
}
@@ -306,8 +292,8 @@ func testReopenTable(t *testing.T, path string, itemsCount int) {
}
var m TableMetrics
tb.UpdateMetrics(&m)
- if m.ItemsCount != uint64(itemsCount) {
- t.Fatalf("unexpected itemsCount after re-opening; got %d; want %v", m.ItemsCount, itemsCount)
+ if n := m.TotalItemsCount(); n != uint64(itemsCount) {
+ t.Fatalf("unexpected itemsCount after re-opening; got %d; want %v", n, itemsCount)
}
tb.MustClose()
}
diff --git a/lib/promrelabel/graphite.go b/lib/promrelabel/graphite.go
index 015f6bcdc2..de39634b2e 100644
--- a/lib/promrelabel/graphite.go
+++ b/lib/promrelabel/graphite.go
@@ -106,7 +106,7 @@ func (gmt *graphiteMatchTemplate) Match(dst []string, s string) ([]string, bool)
dst = append(dst, s)
return dst, true
}
- // Search for the the start of the next part.
+ // Search for the start of the next part.
p = parts[i+1]
i++
n := strings.Index(s, p)
diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go
index aaa6f8a8a6..eba623caec 100644
--- a/lib/promscrape/config.go
+++ b/lib/promscrape/config.go
@@ -137,7 +137,7 @@ func (cfg *Config) mustRestart(prevCfg *Config) {
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884
needGlobalRestart := !areEqualGlobalConfigs(&cfg.Global, &prevCfg.Global)
- // Loop over the the new jobs, start new ones and restart updated ones.
+ // Loop over the new jobs, start new ones and restart updated ones.
var started, stopped, restarted int
currentJobNames := make(map[string]struct{}, len(cfg.ScrapeConfigs))
for i, sc := range cfg.ScrapeConfigs {
diff --git a/lib/protoparser/datadog/parser.go b/lib/protoparser/datadog/parser.go
index 931617d5e2..7e36e931c2 100644
--- a/lib/protoparser/datadog/parser.go
+++ b/lib/protoparser/datadog/parser.go
@@ -67,6 +67,10 @@ type Series struct {
Metric string `json:"metric"`
Points []Point `json:"points"`
Tags []string `json:"tags"`
+ // The device field does not appear in the datadog docs, but datadog-agent does use it.
+ // Datadog agent (v7 at least), removes the tag "device" and adds it as its own field. Why? That I don't know!
+ // https://github.com/DataDog/datadog-agent/blob/0ada7a97fed6727838a6f4d9c87123d2aafde735/pkg/metrics/series.go#L84-L105
+ Device string `json:"device"`
// Do not decode Type, since it isn't used by VictoriaMetrics
// Type string `json:"type"`
diff --git a/lib/protoparser/datadog/parser_test.go b/lib/protoparser/datadog/parser_test.go
index 3c472d91cc..b6b12230a5 100644
--- a/lib/protoparser/datadog/parser_test.go
+++ b/lib/protoparser/datadog/parser_test.go
@@ -56,6 +56,7 @@ func TestRequestUnmarshalSuccess(t *testing.T) {
"host": "test.example.com",
"interval": 20,
"metric": "system.load.1",
+ "device": "/dev/sda",
"points": [[
1575317847,
0.5
@@ -71,6 +72,7 @@ func TestRequestUnmarshalSuccess(t *testing.T) {
Series: []Series{{
Host: "test.example.com",
Metric: "system.load.1",
+ Device: "/dev/sda",
Points: []Point{{
1575317847,
0.5,
diff --git a/lib/storage/block_stream_reader.go b/lib/storage/block_stream_reader.go
index aa3149d5b2..2f9e6fc59d 100644
--- a/lib/storage/block_stream_reader.go
+++ b/lib/storage/block_stream_reader.go
@@ -252,7 +252,7 @@ func (bsr *blockStreamReader) readBlock() error {
if err == io.EOF {
return io.EOF
}
- return fmt.Errorf("cannot read index block from index data: %w", err)
+ return fmt.Errorf("cannot read index block: %w", err)
}
}
@@ -354,11 +354,11 @@ func (bsr *blockStreamReader) readIndexBlock() error {
// Read index block.
bsr.compressedIndexData = bytesutil.ResizeNoCopyMayOverallocate(bsr.compressedIndexData, int(bsr.mr.IndexBlockSize))
if err := fs.ReadFullData(bsr.indexReader, bsr.compressedIndexData); err != nil {
- return fmt.Errorf("cannot read index block from index data at offset %d: %w", bsr.indexBlockOffset, err)
+ return fmt.Errorf("cannot read index block at offset %d: %w", bsr.indexBlockOffset, err)
}
tmpData, err := encoding.DecompressZSTD(bsr.indexData[:0], bsr.compressedIndexData)
if err != nil {
- return fmt.Errorf("cannot decompress index block read at offset %d: %w", bsr.indexBlockOffset, err)
+ return fmt.Errorf("cannot decompress index block at offset %d: %w", bsr.indexBlockOffset, err)
}
bsr.indexData = tmpData
bsr.indexCursor = bsr.indexData
diff --git a/lib/storage/block_stream_writer.go b/lib/storage/block_stream_writer.go
index 790c363668..ff43aa3cd2 100644
--- a/lib/storage/block_stream_writer.go
+++ b/lib/storage/block_stream_writer.go
@@ -80,13 +80,10 @@ func (bsw *blockStreamWriter) reset() {
}
// InitFromInmemoryPart initialzes bsw from inmemory part.
-func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart) {
+func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart, compressLevel int) {
bsw.reset()
- // Use the minimum compression level for in-memory blocks,
- // since they are going to be re-compressed during the merge into file-based blocks.
- bsw.compressLevel = -5 // See https://github.com/facebook/zstd/releases/tag/v1.3.4
-
+ bsw.compressLevel = compressLevel
bsw.timestampsWriter = &mp.timestampsData
bsw.valuesWriter = &mp.valuesData
bsw.indexWriter = &mp.indexData
diff --git a/lib/storage/block_stream_writer_timing_test.go b/lib/storage/block_stream_writer_timing_test.go
index 8e51d06a20..95ecbbe358 100644
--- a/lib/storage/block_stream_writer_timing_test.go
+++ b/lib/storage/block_stream_writer_timing_test.go
@@ -47,7 +47,7 @@ func benchmarkBlockStreamWriter(b *testing.B, ebs []Block, rowsCount int, writeR
}
}
- bsw.InitFromInmemoryPart(&mp)
+ bsw.InitFromInmemoryPart(&mp, -5)
for i := range ebsCopy {
bsw.WriteExternalBlock(&ebsCopy[i], &ph, &rowsMerged)
}
diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go
index 29658f4144..a28a8492f4 100644
--- a/lib/storage/index_db.go
+++ b/lib/storage/index_db.go
@@ -400,12 +400,8 @@ func (is *indexSearch) maybeCreateIndexes(tsid *TSID, metricNameRaw []byte, date
return false, fmt.Errorf("cannot unmarshal metricNameRaw %q: %w", metricNameRaw, err)
}
mn.sortTags()
- if err := is.createGlobalIndexes(tsid, mn); err != nil {
- return false, fmt.Errorf("cannot create global indexes: %w", err)
- }
- if err := is.createPerDayIndexes(date, tsid.MetricID, mn); err != nil {
- return false, fmt.Errorf("cannot create per-day indexes for date=%s: %w", dateToString(date), err)
- }
+ is.createGlobalIndexes(tsid, mn)
+ is.createPerDayIndexes(date, tsid.MetricID, mn)
PutMetricName(mn)
atomic.AddUint64(&is.db.timeseriesRepopulated, 1)
return true, nil
@@ -599,12 +595,8 @@ func (is *indexSearch) createTSIDByName(dst *TSID, metricName, metricNameRaw []b
if err := is.db.s.registerSeriesCardinality(dst.MetricID, metricNameRaw); err != nil {
return err
}
- if err := is.createGlobalIndexes(dst, mn); err != nil {
- return fmt.Errorf("cannot create global indexes: %w", err)
- }
- if err := is.createPerDayIndexes(date, dst.MetricID, mn); err != nil {
- return fmt.Errorf("cannot create per-day indexes for date=%s: %w", dateToString(date), err)
- }
+ is.createGlobalIndexes(dst, mn)
+ is.createPerDayIndexes(date, dst.MetricID, mn)
// There is no need in invalidating tag cache, since it is invalidated
// on db.tb flush via invalidateTagFiltersCache flushCallback passed to OpenTable.
@@ -668,7 +660,7 @@ func generateTSID(dst *TSID, mn *MetricName) {
dst.MetricID = generateUniqueMetricID()
}
-func (is *indexSearch) createGlobalIndexes(tsid *TSID, mn *MetricName) error {
+func (is *indexSearch) createGlobalIndexes(tsid *TSID, mn *MetricName) {
// The order of index items is important.
// It guarantees index consistency.
@@ -699,7 +691,7 @@ func (is *indexSearch) createGlobalIndexes(tsid *TSID, mn *MetricName) error {
ii.registerTagIndexes(prefix.B, mn, tsid.MetricID)
kbPool.Put(prefix)
- return is.db.tb.AddItems(ii.Items)
+ is.db.tb.AddItems(ii.Items)
}
type indexItems struct {
@@ -1640,9 +1632,7 @@ func (db *indexDB) searchMetricNameWithCache(dst []byte, metricID uint64) ([]byt
// Mark the metricID as deleted, so it will be created again when new data point
// for the given time series will arrive.
- if err := db.deleteMetricIDs([]uint64{metricID}); err != nil {
- return dst, fmt.Errorf("cannot delete metricID for missing metricID->metricName entry; metricID=%d; error: %w", metricID, err)
- }
+ db.deleteMetricIDs([]uint64{metricID})
return dst, io.EOF
}
@@ -1669,9 +1659,7 @@ func (db *indexDB) DeleteTSIDs(qt *querytracer.Tracer, tfss []*TagFilters) (int,
if err != nil {
return 0, err
}
- if err := db.deleteMetricIDs(metricIDs); err != nil {
- return 0, err
- }
+ db.deleteMetricIDs(metricIDs)
// Delete TSIDs in the extDB.
deletedCount := len(metricIDs)
@@ -1689,10 +1677,10 @@ func (db *indexDB) DeleteTSIDs(qt *querytracer.Tracer, tfss []*TagFilters) (int,
return deletedCount, nil
}
-func (db *indexDB) deleteMetricIDs(metricIDs []uint64) error {
+func (db *indexDB) deleteMetricIDs(metricIDs []uint64) {
if len(metricIDs) == 0 {
// Nothing to delete
- return nil
+ return
}
// atomically add deleted metricIDs to an inmemory map.
@@ -1717,9 +1705,8 @@ func (db *indexDB) deleteMetricIDs(metricIDs []uint64) error {
items.B = encoding.MarshalUint64(items.B, metricID)
items.Next()
}
- err := db.tb.AddItems(items.Items)
+ db.tb.AddItems(items.Items)
putIndexItems(items)
- return err
}
func (db *indexDB) loadDeletedMetricIDs() (*uint64set.Set, error) {
@@ -2793,7 +2780,7 @@ const (
int64Max = int64((1 << 63) - 1)
)
-func (is *indexSearch) createPerDayIndexes(date, metricID uint64, mn *MetricName) error {
+func (is *indexSearch) createPerDayIndexes(date, metricID uint64, mn *MetricName) {
ii := getIndexItems()
defer putIndexItems(ii)
@@ -2808,11 +2795,8 @@ func (is *indexSearch) createPerDayIndexes(date, metricID uint64, mn *MetricName
kb.B = marshalCommonPrefix(kb.B[:0], nsPrefixDateTagToMetricIDs)
kb.B = encoding.MarshalUint64(kb.B, date)
ii.registerTagIndexes(kb.B, mn, metricID)
- if err := is.db.tb.AddItems(ii.Items); err != nil {
- return fmt.Errorf("cannot add per-day entires for metricID %d: %w", metricID, err)
- }
+ is.db.tb.AddItems(ii.Items)
is.db.s.dateMetricIDCache.Set(date, metricID)
- return nil
}
func (ii *indexItems) registerTagIndexes(prefix []byte, mn *MetricName, metricID uint64) {
diff --git a/lib/storage/index_db_test.go b/lib/storage/index_db_test.go
index 9f00a579cb..4bf20312b2 100644
--- a/lib/storage/index_db_test.go
+++ b/lib/storage/index_db_test.go
@@ -523,22 +523,13 @@ func TestIndexDB(t *testing.T) {
}
}()
- if err := testIndexDBBigMetricName(db); err != nil {
- t.Fatalf("unexpected error: %s", err)
- }
mns, tsids, err := testIndexDBGetOrCreateTSIDByName(db, metricGroups)
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
- if err := testIndexDBBigMetricName(db); err != nil {
- t.Fatalf("unexpected error: %s", err)
- }
if err := testIndexDBCheckTSIDByName(db, mns, tsids, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
- if err := testIndexDBBigMetricName(db); err != nil {
- t.Fatalf("unexpected error: %s", err)
- }
// Re-open the db and verify it works as expected.
db.MustClose()
@@ -546,15 +537,9 @@ func TestIndexDB(t *testing.T) {
if err != nil {
t.Fatalf("cannot open indexDB: %s", err)
}
- if err := testIndexDBBigMetricName(db); err != nil {
- t.Fatalf("unexpected error: %s", err)
- }
if err := testIndexDBCheckTSIDByName(db, mns, tsids, false); err != nil {
t.Fatalf("unexpected error: %s", err)
}
- if err := testIndexDBBigMetricName(db); err != nil {
- t.Fatalf("unexpected error: %s", err)
- }
})
t.Run("concurrent", func(t *testing.T) {
@@ -577,27 +562,15 @@ func TestIndexDB(t *testing.T) {
ch := make(chan error, 3)
for i := 0; i < cap(ch); i++ {
go func() {
- if err := testIndexDBBigMetricName(db); err != nil {
- ch <- err
- return
- }
mns, tsid, err := testIndexDBGetOrCreateTSIDByName(db, metricGroups)
if err != nil {
ch <- err
return
}
- if err := testIndexDBBigMetricName(db); err != nil {
- ch <- err
- return
- }
if err := testIndexDBCheckTSIDByName(db, mns, tsid, true); err != nil {
ch <- err
return
}
- if err := testIndexDBBigMetricName(db); err != nil {
- ch <- err
- return
- }
ch <- nil
}()
}
@@ -618,74 +591,6 @@ func TestIndexDB(t *testing.T) {
})
}
-func testIndexDBBigMetricName(db *indexDB) error {
- var bigBytes []byte
- for i := 0; i < 128*1000; i++ {
- bigBytes = append(bigBytes, byte(i))
- }
- var mn MetricName
- var tsid TSID
-
- is := db.getIndexSearch(noDeadline)
- defer db.putIndexSearch(is)
-
- // Try creating too big metric group
- mn.Reset()
- mn.MetricGroup = append(mn.MetricGroup[:0], bigBytes...)
- mn.sortTags()
- metricName := mn.Marshal(nil)
- metricNameRaw := mn.marshalRaw(nil)
- if err := is.GetOrCreateTSIDByName(&tsid, metricName, metricNameRaw, 0); err == nil {
- return fmt.Errorf("expecting non-nil error on an attempt to insert metric with too big MetricGroup")
- }
-
- // Try creating too big tag key
- mn.Reset()
- mn.MetricGroup = append(mn.MetricGroup[:0], "xxx"...)
- mn.Tags = []Tag{{
- Key: append([]byte(nil), bigBytes...),
- Value: []byte("foobar"),
- }}
- mn.sortTags()
- metricName = mn.Marshal(nil)
- metricNameRaw = mn.marshalRaw(nil)
- if err := is.GetOrCreateTSIDByName(&tsid, metricName, metricNameRaw, 0); err == nil {
- return fmt.Errorf("expecting non-nil error on an attempt to insert metric with too big tag key")
- }
-
- // Try creating too big tag value
- mn.Reset()
- mn.MetricGroup = append(mn.MetricGroup[:0], "xxx"...)
- mn.Tags = []Tag{{
- Key: []byte("foobar"),
- Value: append([]byte(nil), bigBytes...),
- }}
- mn.sortTags()
- metricName = mn.Marshal(nil)
- metricNameRaw = mn.marshalRaw(nil)
- if err := is.GetOrCreateTSIDByName(&tsid, metricName, metricNameRaw, 0); err == nil {
- return fmt.Errorf("expecting non-nil error on an attempt to insert metric with too big tag value")
- }
-
- // Try creating metric name with too many tags
- mn.Reset()
- mn.MetricGroup = append(mn.MetricGroup[:0], "xxx"...)
- for i := 0; i < 60000; i++ {
- mn.Tags = append(mn.Tags, Tag{
- Key: []byte(fmt.Sprintf("foobar %d", i)),
- Value: []byte(fmt.Sprintf("sdfjdslkfj %d", i)),
- })
- }
- mn.sortTags()
- metricName = mn.Marshal(nil)
- metricNameRaw = mn.marshalRaw(nil)
- if err := is.GetOrCreateTSIDByName(&tsid, metricName, metricNameRaw, 0); err == nil {
- return fmt.Errorf("expecting non-nil error on an attempt to insert metric with too many tags")
- }
-
- return nil
-}
-
func testIndexDBGetOrCreateTSIDByName(db *indexDB, metricGroups int) ([]MetricName, []TSID, error) {
// Create tsids.
var mns []MetricName
@@ -727,9 +632,7 @@ func testIndexDBGetOrCreateTSIDByName(db *indexDB, metricGroups int) ([]MetricNa
date := uint64(timestampFromTime(time.Now())) / msecPerDay
for i := range tsids {
tsid := &tsids[i]
- if err := is.createPerDayIndexes(date, tsid.MetricID, &mns[i]); err != nil {
- return nil, nil, fmt.Errorf("error in createPerDayIndexes(%d, %d): %w", date, tsid.MetricID, err)
- }
+ is.createPerDayIndexes(date, tsid.MetricID, &mns[i])
}
// Flush index to disk, so it becomes visible for search
@@ -1549,11 +1452,10 @@ func TestMatchTagFilters(t *testing.T) {
func TestIndexDBRepopulateAfterRotation(t *testing.T) {
path := "TestIndexRepopulateAfterRotation"
- s, err := OpenStorage(path, 0, 1e5, 1e5)
+ s, err := OpenStorage(path, msecsPerMonth, 1e5, 1e5)
if err != nil {
t.Fatalf("cannot open storage: %s", err)
}
- s.retentionMsecs = msecsPerMonth
defer func() {
s.MustClose()
if err := os.RemoveAll(path); err != nil {
@@ -1578,8 +1480,8 @@ func TestIndexDBRepopulateAfterRotation(t *testing.T) {
// verify the storage contains rows.
var m Metrics
s.UpdateMetrics(&m)
- if m.TableMetrics.SmallRowsCount < uint64(metricRowsN) {
- t.Fatalf("expecting at least %d rows in the table; got %d", metricRowsN, m.TableMetrics.SmallRowsCount)
+ if rowsCount := m.TableMetrics.TotalRowsCount(); rowsCount < uint64(metricRowsN) {
+ t.Fatalf("expecting at least %d rows in the table; got %d", metricRowsN, rowsCount)
}
// check new series were registered in indexDB
@@ -1721,9 +1623,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
for i := range tsids {
tsid := &tsids[i]
metricIDs.Add(tsid.MetricID)
- if err := is.createPerDayIndexes(date, tsid.MetricID, &mns[i]); err != nil {
- t.Fatalf("error in createPerDayIndexes(%d, %d): %s", date, tsid.MetricID, err)
- }
+ is.createPerDayIndexes(date, tsid.MetricID, &mns[i])
}
allMetricIDs.Union(&metricIDs)
perDayMetricIDs[date] = &metricIDs
diff --git a/lib/storage/inmemory_part.go b/lib/storage/inmemory_part.go
index 3f78e24b7b..70f05c15fb 100644
--- a/lib/storage/inmemory_part.go
+++ b/lib/storage/inmemory_part.go
@@ -1,9 +1,13 @@
package storage
import (
+ "fmt"
+ "path/filepath"
+
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
+ "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
@@ -31,6 +35,36 @@ func (mp *inmemoryPart) Reset() {
mp.creationTime = 0
}
+// StoreToDisk stores the mp to the given path on disk.
+func (mp *inmemoryPart) StoreToDisk(path string) error {
+ if err := fs.MkdirAllIfNotExist(path); err != nil {
+ return fmt.Errorf("cannot create directory %q: %w", path, err)
+ }
+ timestampsPath := path + "/timestamps.bin"
+ if err := fs.WriteFileAndSync(timestampsPath, mp.timestampsData.B); err != nil {
+ return fmt.Errorf("cannot store timestamps: %w", err)
+ }
+ valuesPath := path + "/values.bin"
+ if err := fs.WriteFileAndSync(valuesPath, mp.valuesData.B); err != nil {
+ return fmt.Errorf("cannot store values: %w", err)
+ }
+ indexPath := path + "/index.bin"
+ if err := fs.WriteFileAndSync(indexPath, mp.indexData.B); err != nil {
+ return fmt.Errorf("cannot store index: %w", err)
+ }
+ metaindexPath := path + "/metaindex.bin"
+ if err := fs.WriteFileAndSync(metaindexPath, mp.metaindexData.B); err != nil {
+ return fmt.Errorf("cannot store metaindex: %w", err)
+ }
+ if err := mp.ph.writeMinDedupInterval(path); err != nil {
+ return fmt.Errorf("cannot store min dedup interval: %w", err)
+ }
+ // Sync parent directory in order to make sure the written files remain visible after hardware reset
+ parentDirPath := filepath.Dir(path)
+ fs.MustSyncPath(parentDirPath)
+ return nil
+}
+
// InitFromRows initializes mp from the given rows.
func (mp *inmemoryPart) InitFromRows(rows []rawRow) {
if len(rows) == 0 {
@@ -49,9 +83,12 @@ func (mp *inmemoryPart) InitFromRows(rows []rawRow) {
// It is safe calling NewPart multiple times.
// It is unsafe re-using mp while the returned part is in use.
func (mp *inmemoryPart) NewPart() (*part, error) {
- ph := mp.ph
- size := uint64(len(mp.timestampsData.B) + len(mp.valuesData.B) + len(mp.indexData.B) + len(mp.metaindexData.B))
- return newPart(&ph, "", size, mp.metaindexData.NewReader(), &mp.timestampsData, &mp.valuesData, &mp.indexData)
+ size := mp.size()
+ return newPart(&mp.ph, "", size, mp.metaindexData.NewReader(), &mp.timestampsData, &mp.valuesData, &mp.indexData)
+}
+
+func (mp *inmemoryPart) size() uint64 {
+ return uint64(cap(mp.timestampsData.B) + cap(mp.valuesData.B) + cap(mp.indexData.B) + cap(mp.metaindexData.B))
}
func getInmemoryPart() *inmemoryPart {
diff --git a/lib/storage/merge.go b/lib/storage/merge.go
index 7207fd2135..8759c09926 100644
--- a/lib/storage/merge.go
+++ b/lib/storage/merge.go
@@ -178,6 +178,10 @@ func mergeBlocks(ob, ib1, ib2 *Block, retentionDeadline int64, rowsDeleted *uint
}
func skipSamplesOutsideRetention(b *Block, retentionDeadline int64, rowsDeleted *uint64) {
+ if b.bh.MinTimestamp >= retentionDeadline {
+ // Fast path - the block contains only samples with timestamps bigger than retentionDeadline.
+ return
+ }
timestamps := b.timestamps
nextIdx := b.nextIdx
nextIdxOrig := nextIdx
diff --git a/lib/storage/merge_test.go b/lib/storage/merge_test.go
index 189935c41f..276abd3aa5 100644
--- a/lib/storage/merge_test.go
+++ b/lib/storage/merge_test.go
@@ -361,7 +361,7 @@ func TestMergeForciblyStop(t *testing.T) {
var mp inmemoryPart
var bsw blockStreamWriter
- bsw.InitFromInmemoryPart(&mp)
+ bsw.InitFromInmemoryPart(&mp, -5)
ch := make(chan struct{})
var rowsMerged, rowsDeleted uint64
close(ch)
@@ -384,7 +384,7 @@ func testMergeBlockStreams(t *testing.T, bsrs []*blockStreamReader, expectedBloc
var mp inmemoryPart
var bsw blockStreamWriter
- bsw.InitFromInmemoryPart(&mp)
+ bsw.InitFromInmemoryPart(&mp, -5)
strg := newTestStorage()
var rowsMerged, rowsDeleted uint64
diff --git a/lib/storage/merge_timing_test.go b/lib/storage/merge_timing_test.go
index 5cbbe54552..cfc440c705 100644
--- a/lib/storage/merge_timing_test.go
+++ b/lib/storage/merge_timing_test.go
@@ -41,7 +41,7 @@ func benchmarkMergeBlockStreams(b *testing.B, mps []*inmemoryPart, rowsPerLoop i
bsrs[i].InitFromInmemoryPart(mp)
}
mpOut.Reset()
- bsw.InitFromInmemoryPart(&mpOut)
+ bsw.InitFromInmemoryPart(&mpOut, -5)
if err := mergeBlockStreams(&mpOut.ph, &bsw, bsrs, nil, strg, 0, &rowsMerged, &rowsDeleted); err != nil {
panic(fmt.Errorf("cannot merge block streams: %w", err))
}
diff --git a/lib/storage/part_search.go b/lib/storage/part_search.go
index 1ceacd8284..88e92c501b 100644
--- a/lib/storage/part_search.go
+++ b/lib/storage/part_search.go
@@ -228,24 +228,29 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
}
func (ps *partSearch) searchBHS() bool {
- for i := range ps.bhs {
- bh := &ps.bhs[i]
-
- nextTSID:
- if bh.TSID.Less(&ps.BlockRef.bh.TSID) {
- // Skip blocks with small tsid values.
- continue
+ bhs := ps.bhs
+ for len(bhs) > 0 {
+ // Skip block headers with tsids smaller than the given tsid.
+ tsid := &ps.BlockRef.bh.TSID
+ n := sort.Search(len(bhs), func(i int) bool {
+ return !bhs[i].TSID.Less(tsid)
+ })
+ if n == len(bhs) {
+ // Nothing found.
+ break
}
+ bhs = bhs[n:]
- // Invariant: ps.BlockRef.bh.TSID <= bh.TSID
+ // Invariant: tsid <= bh.TSID
- if bh.TSID.MetricID != ps.BlockRef.bh.TSID.MetricID {
- // ps.BlockRef.bh.TSID < bh.TSID: no more blocks with the given tsid.
+ bh := &bhs[0]
+ if bh.TSID.MetricID != tsid.MetricID {
+ // tsid < bh.TSID: no more blocks with the given tsid.
// Proceed to the next (bigger) tsid.
if !ps.nextTSID() {
return false
}
- goto nextTSID
+ continue
}
// Found the block with the given tsid. Verify timestamp range.
@@ -254,6 +259,7 @@ func (ps *partSearch) searchBHS() bool {
// So use linear search instead of binary search.
if bh.MaxTimestamp < ps.tr.MinTimestamp {
// Skip the block with too small timestamps.
+ bhs = bhs[1:]
continue
}
if bh.MinTimestamp > ps.tr.MaxTimestamp {
@@ -269,10 +275,9 @@ func (ps *partSearch) searchBHS() bool {
// Read it.
ps.BlockRef.init(ps.p, bh)
- ps.bhs = ps.bhs[i+1:]
+ ps.bhs = bhs[1:]
return true
}
-
ps.bhs = nil
return false
}
diff --git a/lib/storage/partition.go b/lib/storage/partition.go
index 4b45d6557d..639b1b3779 100644
--- a/lib/storage/partition.go
+++ b/lib/storage/partition.go
@@ -19,33 +19,19 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
+ "github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg"
)
-func maxSmallPartSize() uint64 {
- // Small parts are cached in the OS page cache,
- // so limit their size by the remaining free RAM.
- mem := memory.Remaining()
- // It is expected no more than defaultPartsToMerge/2 parts exist
- // in the OS page cache before they are merged into bigger part.
- // Half of the remaining RAM must be left for lib/mergeset parts,
- // so the maxItems is calculated using the below code:
- maxSize := uint64(mem) / defaultPartsToMerge
- if maxSize < 10e6 {
- maxSize = 10e6
- }
- return maxSize
-}
-
// The maximum size of big part.
//
// This number limits the maximum time required for building big part.
// This time shouldn't exceed a few days.
const maxBigPartSize = 1e12
-// The maximum number of small parts in the partition.
-const maxSmallPartsPerPartition = 256
+// The maximum number of inmemory parts in the partition.
+const maxInmemoryPartsPerPartition = 32
// Default number of parts to merge at once.
//
@@ -65,6 +51,25 @@ const finalPartsToMerge = 3
// Higher number of shards reduces CPU contention and increases the max bandwidth on multi-core systems.
var rawRowsShardsPerPartition = (cgroup.AvailableCPUs() + 1) / 2
+// The interval for flushing bufferred rows into parts, so they become visible to search.
+const pendingRowsFlushInterval = time.Second
+
+// The interval for guaranteed flush of recently ingested data from memory to on-disk parts,
+// so they survive process crash.
+var dataFlushInterval = 5 * time.Second
+
+// SetDataFlushInterval sets the interval for guaranteed flush of recently ingested data from memory to disk.
+//
+// The data can be flushed from memory to disk more frequently if it doesn't fit the memory limit.
+//
+// This function must be called before initializing the storage.
+func SetDataFlushInterval(d time.Duration) {
+ if d > pendingRowsFlushInterval {
+ dataFlushInterval = d
+ mergeset.SetDataFlushInterval(d)
+ }
+}
+
// getMaxRawRowsPerShard returns the maximum number of rows that haven't been converted into parts yet.
func getMaxRawRowsPerShard() int {
maxRawRowsPerPartitionOnce.Do(func() {
@@ -85,32 +90,30 @@ var (
maxRawRowsPerPartitionOnce sync.Once
)
-// The interval for flushing (converting) recent raw rows into parts,
-// so they become visible to search.
-const rawRowsFlushInterval = time.Second
-
-// The interval for flushing inmemory parts to persistent storage,
-// so they survive process crash.
-const inmemoryPartsFlushInterval = 5 * time.Second
-
// partition represents a partition.
type partition struct {
// Put atomic counters to the top of struct, so they are aligned to 8 bytes on 32-bit arch.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
- activeBigMerges uint64
- activeSmallMerges uint64
- bigMergesCount uint64
- smallMergesCount uint64
- bigRowsMerged uint64
- smallRowsMerged uint64
- bigRowsDeleted uint64
- smallRowsDeleted uint64
+ activeInmemoryMerges uint64
+ activeSmallMerges uint64
+ activeBigMerges uint64
- smallAssistedMerges uint64
+ inmemoryMergesCount uint64
+ smallMergesCount uint64
+ bigMergesCount uint64
- smallMergeNeedFreeDiskSpace uint64
- bigMergeNeedFreeDiskSpace uint64
+ inmemoryRowsMerged uint64
+ smallRowsMerged uint64
+ bigRowsMerged uint64
+
+ inmemoryRowsDeleted uint64
+ smallRowsDeleted uint64
+ bigRowsDeleted uint64
+
+ inmemoryAssistedMerges uint64
+
+ mergeNeedFreeDiskSpace uint64
mergeIdx uint64
@@ -126,30 +129,29 @@ type partition struct {
// The time range for the partition. Usually this is a whole month.
tr TimeRange
- // partsLock protects smallParts and bigParts.
+ // rawRows contains recently added rows that haven't been converted into parts yet.
+ // rawRows are periodically converted into inmemroyParts.
+ // rawRows aren't used in search for performance reasons.
+ rawRows rawRowsShards
+
+ // partsLock protects inmemoryParts, smallParts and bigParts.
partsLock sync.Mutex
- // Contains all the inmemoryPart plus file-based parts
- // with small number of items (up to maxRowsCountPerSmallPart).
+ // Contains inmemory parts with recently ingested data.
+ // It must be merged into either smallParts or bigParts to become visible to search.
+ inmemoryParts []*partWrapper
+
+ // Contains file-based parts with small number of items.
smallParts []*partWrapper
// Contains file-based parts with big number of items.
bigParts []*partWrapper
- // rawRows contains recently added rows that haven't been converted into parts yet.
- //
- // rawRows aren't used in search for performance reasons.
- rawRows rawRowsShards
-
snapshotLock sync.RWMutex
stopCh chan struct{}
- smallPartsMergerWG sync.WaitGroup
- bigPartsMergerWG sync.WaitGroup
- rawRowsFlusherWG sync.WaitGroup
- inmemoryPartsFlusherWG sync.WaitGroup
- stalePartsRemoverWG sync.WaitGroup
+ wg sync.WaitGroup
}
// partWrapper is a wrapper for the part.
@@ -168,6 +170,9 @@ type partWrapper struct {
// Whether the part is in merge now.
isInMerge bool
+
+ // The deadline when in-memory part must be flushed to disk.
+ flushToDiskDeadline time.Time
}
func (pw *partWrapper) incRef() {
@@ -208,16 +213,20 @@ func createPartition(timestamp int64, smallPartitionsPath, bigPartitionsPath str
pt := newPartition(name, smallPartsPath, bigPartsPath, s)
pt.tr.fromPartitionTimestamp(timestamp)
- pt.startMergeWorkers()
- pt.startRawRowsFlusher()
- pt.startInmemoryPartsFlusher()
- pt.startStalePartsRemover()
+ pt.startBackgroundWorkers()
logger.Infof("partition %q has been created", name)
return pt, nil
}
+func (pt *partition) startBackgroundWorkers() {
+ pt.startMergeWorkers()
+ pt.startInmemoryPartsFlusher()
+ pt.startPendingRowsFlusher()
+ pt.startStalePartsRemover()
+}
+
// Drop drops all the data on the storage for the given pt.
//
// The pt must be detached from table before calling pt.Drop.
@@ -262,10 +271,7 @@ func openPartition(smallPartsPath, bigPartsPath string, s *Storage) (*partition,
if err := pt.tr.fromPartitionName(name); err != nil {
return nil, fmt.Errorf("cannot obtain partition time range from smallPartsPath %q: %w", smallPartsPath, err)
}
- pt.startMergeWorkers()
- pt.startRawRowsFlusher()
- pt.startInmemoryPartsFlusher()
- pt.startStalePartsRemover()
+ pt.startBackgroundWorkers()
return pt, nil
}
@@ -295,67 +301,83 @@ type partitionMetrics struct {
IndexBlocksCacheRequests uint64
IndexBlocksCacheMisses uint64
- BigSizeBytes uint64
- SmallSizeBytes uint64
+ InmemorySizeBytes uint64
+ SmallSizeBytes uint64
+ BigSizeBytes uint64
- BigRowsCount uint64
- SmallRowsCount uint64
+ InmemoryRowsCount uint64
+ SmallRowsCount uint64
+ BigRowsCount uint64
- BigBlocksCount uint64
- SmallBlocksCount uint64
+ InmemoryBlocksCount uint64
+ SmallBlocksCount uint64
+ BigBlocksCount uint64
- BigPartsCount uint64
- SmallPartsCount uint64
+ InmemoryPartsCount uint64
+ SmallPartsCount uint64
+ BigPartsCount uint64
- ActiveBigMerges uint64
- ActiveSmallMerges uint64
+ ActiveInmemoryMerges uint64
+ ActiveSmallMerges uint64
+ ActiveBigMerges uint64
- BigMergesCount uint64
- SmallMergesCount uint64
+ InmemoryMergesCount uint64
+ SmallMergesCount uint64
+ BigMergesCount uint64
- BigRowsMerged uint64
- SmallRowsMerged uint64
+ InmemoryRowsMerged uint64
+ SmallRowsMerged uint64
+ BigRowsMerged uint64
- BigRowsDeleted uint64
- SmallRowsDeleted uint64
+ InmemoryRowsDeleted uint64
+ SmallRowsDeleted uint64
+ BigRowsDeleted uint64
- BigPartsRefCount uint64
- SmallPartsRefCount uint64
+ InmemoryPartsRefCount uint64
+ SmallPartsRefCount uint64
+ BigPartsRefCount uint64
- SmallAssistedMerges uint64
+ InmemoryAssistedMerges uint64
- SmallMergeNeedFreeDiskSpace uint64
- BigMergeNeedFreeDiskSpace uint64
+ MergeNeedFreeDiskSpace uint64
+}
+
+// TotalRowsCount returns total number of rows in tm.
+func (pm *partitionMetrics) TotalRowsCount() uint64 {
+ return pm.PendingRows + pm.InmemoryRowsCount + pm.SmallRowsCount + pm.BigRowsCount
}
// UpdateMetrics updates m with metrics from pt.
func (pt *partition) UpdateMetrics(m *partitionMetrics) {
- rawRowsLen := uint64(pt.rawRows.Len())
- m.PendingRows += rawRowsLen
- m.SmallRowsCount += rawRowsLen
+ m.PendingRows += uint64(pt.rawRows.Len())
pt.partsLock.Lock()
+ for _, pw := range pt.inmemoryParts {
+ p := pw.p
+ m.InmemoryRowsCount += p.ph.RowsCount
+ m.InmemoryBlocksCount += p.ph.BlocksCount
+ m.InmemorySizeBytes += p.size
+ m.InmemoryPartsRefCount += atomic.LoadUint64(&pw.refCount)
+ }
+ for _, pw := range pt.smallParts {
+ p := pw.p
+ m.SmallRowsCount += p.ph.RowsCount
+ m.SmallBlocksCount += p.ph.BlocksCount
+ m.SmallSizeBytes += p.size
+ m.SmallPartsRefCount += atomic.LoadUint64(&pw.refCount)
+ }
for _, pw := range pt.bigParts {
p := pw.p
-
m.BigRowsCount += p.ph.RowsCount
m.BigBlocksCount += p.ph.BlocksCount
m.BigSizeBytes += p.size
m.BigPartsRefCount += atomic.LoadUint64(&pw.refCount)
}
- for _, pw := range pt.smallParts {
- p := pw.p
-
- m.SmallRowsCount += p.ph.RowsCount
- m.SmallBlocksCount += p.ph.BlocksCount
- m.SmallSizeBytes += p.size
- m.SmallPartsRefCount += atomic.LoadUint64(&pw.refCount)
- }
-
- m.BigPartsCount += uint64(len(pt.bigParts))
+ m.InmemoryPartsCount += uint64(len(pt.inmemoryParts))
m.SmallPartsCount += uint64(len(pt.smallParts))
+ m.BigPartsCount += uint64(len(pt.bigParts))
pt.partsLock.Unlock()
@@ -365,22 +387,25 @@ func (pt *partition) UpdateMetrics(m *partitionMetrics) {
m.IndexBlocksCacheRequests = ibCache.Requests()
m.IndexBlocksCacheMisses = ibCache.Misses()
- m.ActiveBigMerges += atomic.LoadUint64(&pt.activeBigMerges)
+ m.ActiveInmemoryMerges += atomic.LoadUint64(&pt.activeInmemoryMerges)
m.ActiveSmallMerges += atomic.LoadUint64(&pt.activeSmallMerges)
+ m.ActiveBigMerges += atomic.LoadUint64(&pt.activeBigMerges)
- m.BigMergesCount += atomic.LoadUint64(&pt.bigMergesCount)
+ m.InmemoryMergesCount += atomic.LoadUint64(&pt.inmemoryMergesCount)
m.SmallMergesCount += atomic.LoadUint64(&pt.smallMergesCount)
+ m.BigMergesCount += atomic.LoadUint64(&pt.bigMergesCount)
- m.BigRowsMerged += atomic.LoadUint64(&pt.bigRowsMerged)
+ m.InmemoryRowsMerged += atomic.LoadUint64(&pt.inmemoryRowsMerged)
m.SmallRowsMerged += atomic.LoadUint64(&pt.smallRowsMerged)
+ m.BigRowsMerged += atomic.LoadUint64(&pt.bigRowsMerged)
- m.BigRowsDeleted += atomic.LoadUint64(&pt.bigRowsDeleted)
+ m.InmemoryRowsDeleted += atomic.LoadUint64(&pt.inmemoryRowsDeleted)
m.SmallRowsDeleted += atomic.LoadUint64(&pt.smallRowsDeleted)
+ m.BigRowsDeleted += atomic.LoadUint64(&pt.bigRowsDeleted)
- m.SmallAssistedMerges += atomic.LoadUint64(&pt.smallAssistedMerges)
+ m.InmemoryAssistedMerges += atomic.LoadUint64(&pt.inmemoryAssistedMerges)
- m.SmallMergeNeedFreeDiskSpace += atomic.LoadUint64(&pt.smallMergeNeedFreeDiskSpace)
- m.BigMergeNeedFreeDiskSpace += atomic.LoadUint64(&pt.bigMergeNeedFreeDiskSpace)
+ m.MergeNeedFreeDiskSpace += atomic.LoadUint64(&pt.mergeNeedFreeDiskSpace)
}
// AddRows adds the given rows to the partition pt.
@@ -418,11 +443,13 @@ func (rrss *rawRowsShards) init() {
}
func (rrss *rawRowsShards) addRows(pt *partition, rows []rawRow) {
- n := atomic.AddUint32(&rrss.shardIdx, 1)
shards := rrss.shards
- idx := n % uint32(len(shards))
- shard := &shards[idx]
- shard.addRows(pt, rows)
+ shardsLen := uint32(len(shards))
+ for len(rows) > 0 {
+ n := atomic.AddUint32(&rrss.shardIdx, 1)
+ idx := n % shardsLen
+ rows = shards[idx].addRows(pt, rows)
+ }
}
func (rrss *rawRowsShards) Len() int {
@@ -456,8 +483,8 @@ func (rrs *rawRowsShard) Len() int {
return n
}
-func (rrs *rawRowsShard) addRows(pt *partition, rows []rawRow) {
- var rowsToFlush []rawRow
+func (rrs *rawRowsShard) addRows(pt *partition, rows []rawRow) []rawRow {
+ var rrb *rawRowsBlock
rrs.mu.Lock()
if cap(rrs.rows) == 0 {
@@ -467,23 +494,25 @@ func (rrs *rawRowsShard) addRows(pt *partition, rows []rawRow) {
rrs.rows = rrs.rows[:len(rrs.rows)+n]
rows = rows[n:]
if len(rows) > 0 {
- // Slow path - rows did't fit rrs.rows capacity.
- // Convert rrs.rows to rowsToFlush and convert it to a part,
- // then try moving the remaining rows to rrs.rows.
- rowsToFlush = rrs.rows
- rrs.rows = newRawRowsBlock()
- if len(rows) <= n {
- rrs.rows = append(rrs.rows[:0], rows...)
- } else {
- // The slowest path - rows do not fit rrs.rows capacity.
- // So append them directly to rowsToFlush.
- rowsToFlush = append(rowsToFlush, rows...)
- }
+ rrb = getRawRowsBlock()
+ rrb.rows, rrs.rows = rrs.rows, rrb.rows
+ n = copy(rrs.rows[:cap(rrs.rows)], rows)
+ rrs.rows = rrs.rows[:n]
+ rows = rows[n:]
atomic.StoreUint64(&rrs.lastFlushTime, fasttime.UnixTimestamp())
}
rrs.mu.Unlock()
- pt.flushRowsToParts(rowsToFlush)
+ if rrb != nil {
+ pt.flushRowsToParts(rrb.rows)
+ putRawRowsBlock(rrb)
+ }
+
+ return rows
+}
+
+type rawRowsBlock struct {
+ rows []rawRow
}
func newRawRowsBlock() []rawRow {
@@ -491,8 +520,30 @@ func newRawRowsBlock() []rawRow {
return make([]rawRow, 0, n)
}
+func getRawRowsBlock() *rawRowsBlock {
+ v := rawRowsBlockPool.Get()
+ if v == nil {
+ return &rawRowsBlock{
+ rows: newRawRowsBlock(),
+ }
+ }
+ return v.(*rawRowsBlock)
+}
+
+func putRawRowsBlock(rrb *rawRowsBlock) {
+ rrb.rows = rrb.rows[:0]
+ rawRowsBlockPool.Put(rrb)
+}
+
+var rawRowsBlockPool sync.Pool
+
func (pt *partition) flushRowsToParts(rows []rawRow) {
+ if len(rows) == 0 {
+ return
+ }
maxRows := getMaxRawRowsPerShard()
+ var pwsLock sync.Mutex
+ pws := make([]*partWrapper, 0, (len(rows)+maxRows-1)/maxRows)
wg := getWaitGroup()
for len(rows) > 0 {
n := maxRows
@@ -500,14 +551,73 @@ func (pt *partition) flushRowsToParts(rows []rawRow) {
n = len(rows)
}
wg.Add(1)
- go func(rowsPart []rawRow) {
- defer wg.Done()
- pt.addRowsPart(rowsPart)
+ flushConcurrencyCh <- struct{}{}
+ go func(rowsChunk []rawRow) {
+ defer func() {
+ <-flushConcurrencyCh
+ wg.Done()
+ }()
+ pw := pt.createInmemoryPart(rowsChunk)
+ if pw == nil {
+ return
+ }
+ pwsLock.Lock()
+ pws = append(pws, pw)
+ pwsLock.Unlock()
}(rows[:n])
rows = rows[n:]
}
wg.Wait()
putWaitGroup(wg)
+
+ pt.partsLock.Lock()
+ pt.inmemoryParts = append(pt.inmemoryParts, pws...)
+ pt.partsLock.Unlock()
+
+ flushConcurrencyCh <- struct{}{}
+ pt.assistedMergeForInmemoryParts()
+ <-flushConcurrencyCh
+ // There is no need in assisted merges for small and big parts,
+ // since the bottleneck is possible only at inmemory parts.
+}
+
+var flushConcurrencyCh = make(chan struct{}, cgroup.AvailableCPUs())
+
+func (pt *partition) assistedMergeForInmemoryParts() {
+ for {
+ pt.partsLock.Lock()
+ ok := getNotInMergePartsCount(pt.inmemoryParts) < maxInmemoryPartsPerPartition
+ pt.partsLock.Unlock()
+ if ok {
+ return
+ }
+
+ // There are too many unmerged inmemory parts.
+ // This usually means that the app cannot keep up with the data ingestion rate.
+ // Assist with mering inmemory parts.
+ // Prioritize assisted merges over searches.
+ storagepacelimiter.Search.Inc()
+ err := pt.mergeInmemoryParts()
+ storagepacelimiter.Search.Dec()
+ if err == nil {
+ atomic.AddUint64(&pt.inmemoryAssistedMerges, 1)
+ continue
+ }
+ if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
+ return
+ }
+ logger.Panicf("FATAL: cannot merge inmemory parts: %s", err)
+ }
+}
+
+func getNotInMergePartsCount(pws []*partWrapper) int {
+ n := 0
+ for _, pw := range pws {
+ if !pw.isInMerge {
+ n++
+ }
+ }
+ return n
}
func getWaitGroup() *sync.WaitGroup {
@@ -524,11 +634,10 @@ func putWaitGroup(wg *sync.WaitGroup) {
var wgPool sync.Pool
-func (pt *partition) addRowsPart(rows []rawRow) {
+func (pt *partition) createInmemoryPart(rows []rawRow) *partWrapper {
if len(rows) == 0 {
- return
+ return nil
}
-
mp := getInmemoryPart()
mp.InitFromRows(rows)
@@ -545,40 +654,22 @@ func (pt *partition) addRowsPart(rows []rawRow) {
logger.Panicf("BUG: the part %q cannot be added to partition %q because of too big MaxTimestamp; got %d; want at least %d",
&mp.ph, pt.smallPartsPath, mp.ph.MaxTimestamp, pt.tr.MaxTimestamp)
}
+ flushToDiskDeadline := time.Now().Add(dataFlushInterval)
+ return newPartWrapperFromInmemoryPart(mp, flushToDiskDeadline)
+}
+func newPartWrapperFromInmemoryPart(mp *inmemoryPart, flushToDiskDeadline time.Time) *partWrapper {
p, err := mp.NewPart()
if err != nil {
logger.Panicf("BUG: cannot create part from %q: %s", &mp.ph, err)
}
-
pw := &partWrapper{
- p: p,
- mp: mp,
- refCount: 1,
+ p: p,
+ mp: mp,
+ refCount: 1,
+ flushToDiskDeadline: flushToDiskDeadline,
}
-
- pt.partsLock.Lock()
- pt.smallParts = append(pt.smallParts, pw)
- ok := len(pt.smallParts) <= maxSmallPartsPerPartition
- pt.partsLock.Unlock()
- if ok {
- return
- }
-
- // The added part exceeds available limit. Help merging parts.
- //
- // Prioritize assisted merges over searches.
- storagepacelimiter.Search.Inc()
- err = pt.mergeSmallParts(false)
- storagepacelimiter.Search.Dec()
- if err == nil {
- atomic.AddUint64(&pt.smallAssistedMerges, 1)
- return
- }
- if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) {
- return
- }
- logger.Panicf("FATAL: cannot merge small parts: %s", err)
+ return pw
}
// HasTimestamp returns true if the pt contains the given timestamp.
@@ -591,6 +682,10 @@ func (pt *partition) HasTimestamp(timestamp int64) bool {
// The appended parts must be released with PutParts.
func (pt *partition) GetParts(dst []*partWrapper) []*partWrapper {
pt.partsLock.Lock()
+ for _, pw := range pt.inmemoryParts {
+ pw.incRef()
+ }
+ dst = append(dst, pt.inmemoryParts...)
for _, pw := range pt.smallParts {
pw.incRef()
}
@@ -620,121 +715,139 @@ func (pt *partition) MustClose() {
// Wait until all the pending transaction deletions are finished.
pendingTxnDeletionsWG.Wait()
- logger.Infof("waiting for stale parts remover to stop on %q...", pt.smallPartsPath)
+ logger.Infof("waiting for service workers to stop on %q...", pt.smallPartsPath)
startTime := time.Now()
- pt.stalePartsRemoverWG.Wait()
- logger.Infof("stale parts remover stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
-
- logger.Infof("waiting for inmemory parts flusher to stop on %q...", pt.smallPartsPath)
- startTime = time.Now()
- pt.inmemoryPartsFlusherWG.Wait()
- logger.Infof("inmemory parts flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
-
- logger.Infof("waiting for raw rows flusher to stop on %q...", pt.smallPartsPath)
- startTime = time.Now()
- pt.rawRowsFlusherWG.Wait()
- logger.Infof("raw rows flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
-
- logger.Infof("waiting for small part mergers to stop on %q...", pt.smallPartsPath)
- startTime = time.Now()
- pt.smallPartsMergerWG.Wait()
- logger.Infof("small part mergers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
-
- logger.Infof("waiting for big part mergers to stop on %q...", pt.bigPartsPath)
- startTime = time.Now()
- pt.bigPartsMergerWG.Wait()
- logger.Infof("big part mergers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.bigPartsPath)
+ pt.wg.Wait()
+ logger.Infof("service workers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
logger.Infof("flushing inmemory parts to files on %q...", pt.smallPartsPath)
startTime = time.Now()
+ pt.flushInmemoryRows()
+ logger.Infof("inmemory parts have been flushed to files in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
- // Flush raw rows the last time before exit.
- pt.flushRawRows(true)
-
- // Flush inmemory parts to disk.
- var pws []*partWrapper
- pt.partsLock.Lock()
- for _, pw := range pt.smallParts {
- if pw.mp == nil {
- continue
- }
- if pw.isInMerge {
- logger.Panicf("BUG: the inmemory part %q mustn't be in merge after stopping small parts merger in the partition %q", &pw.mp.ph, pt.smallPartsPath)
- }
- pw.isInMerge = true
- pws = append(pws, pw)
- }
- pt.partsLock.Unlock()
-
- if err := pt.mergePartsOptimal(pws, nil); err != nil {
- logger.Panicf("FATAL: cannot flush %d inmemory parts to files on %q: %s", len(pws), pt.smallPartsPath, err)
- }
- logger.Infof("%d inmemory parts have been flushed to files in %.3f seconds on %q", len(pws), time.Since(startTime).Seconds(), pt.smallPartsPath)
-
- // Remove references to smallParts from the pt, so they may be eventually closed
+ // Remove references from inmemoryParts, smallParts and bigParts, so they may be eventually closed
// after all the searches are done.
pt.partsLock.Lock()
+ inmemoryParts := pt.inmemoryParts
smallParts := pt.smallParts
- pt.smallParts = nil
- pt.partsLock.Unlock()
-
- for _, pw := range smallParts {
- pw.decRef()
- }
-
- // Remove references to bigParts from the pt, so they may be eventually closed
- // after all the searches are done.
- pt.partsLock.Lock()
bigParts := pt.bigParts
+ pt.inmemoryParts = nil
+ pt.smallParts = nil
pt.bigParts = nil
pt.partsLock.Unlock()
+ for _, pw := range inmemoryParts {
+ pw.decRef()
+ }
+ for _, pw := range smallParts {
+ pw.decRef()
+ }
for _, pw := range bigParts {
pw.decRef()
}
}
-func (pt *partition) startRawRowsFlusher() {
- pt.rawRowsFlusherWG.Add(1)
+func (pt *partition) startInmemoryPartsFlusher() {
+ pt.wg.Add(1)
go func() {
- pt.rawRowsFlusher()
- pt.rawRowsFlusherWG.Done()
+ pt.inmemoryPartsFlusher()
+ pt.wg.Done()
}()
}
-func (pt *partition) rawRowsFlusher() {
- ticker := time.NewTicker(rawRowsFlushInterval)
+func (pt *partition) startPendingRowsFlusher() {
+ pt.wg.Add(1)
+ go func() {
+ pt.pendingRowsFlusher()
+ pt.wg.Done()
+ }()
+}
+
+func (pt *partition) inmemoryPartsFlusher() {
+ ticker := time.NewTicker(dataFlushInterval)
defer ticker.Stop()
for {
select {
case <-pt.stopCh:
return
case <-ticker.C:
- pt.flushRawRows(false)
+ pt.flushInmemoryParts(false)
}
}
}
-func (pt *partition) flushRawRows(isFinal bool) {
- pt.rawRows.flush(pt, isFinal)
+func (pt *partition) pendingRowsFlusher() {
+ ticker := time.NewTicker(pendingRowsFlushInterval)
+ defer ticker.Stop()
+ var rows []rawRow
+ for {
+ select {
+ case <-pt.stopCh:
+ return
+ case <-ticker.C:
+ rows = pt.flushPendingRows(rows[:0], false)
+ }
+ }
}
-func (rrss *rawRowsShards) flush(pt *partition, isFinal bool) {
- var rowsToFlush []rawRow
- for i := range rrss.shards {
- rowsToFlush = rrss.shards[i].appendRawRowsToFlush(rowsToFlush, pt, isFinal)
+func (pt *partition) flushPendingRows(dst []rawRow, isFinal bool) []rawRow {
+ return pt.rawRows.flush(pt, dst, isFinal)
+}
+
+func (pt *partition) flushInmemoryRows() {
+ pt.rawRows.flush(pt, nil, true)
+ pt.flushInmemoryParts(true)
+}
+
+func (pt *partition) flushInmemoryParts(isFinal bool) {
+ for {
+ currentTime := time.Now()
+ var pws []*partWrapper
+
+ pt.partsLock.Lock()
+ for _, pw := range pt.inmemoryParts {
+ if !pw.isInMerge && (isFinal || pw.flushToDiskDeadline.Before(currentTime)) {
+ pw.isInMerge = true
+ pws = append(pws, pw)
+ }
+ }
+ pt.partsLock.Unlock()
+
+ if err := pt.mergePartsOptimal(pws, nil); err != nil {
+ logger.Panicf("FATAL: cannot merge in-memory parts: %s", err)
+ }
+ if !isFinal {
+ return
+ }
+ pt.partsLock.Lock()
+ n := len(pt.inmemoryParts)
+ pt.partsLock.Unlock()
+ if n == 0 {
+ // All the in-memory parts were flushed to disk.
+ return
+ }
+ // Some parts weren't flushed to disk because they were being merged.
+ // Sleep for a while and try flushing them again.
+ time.Sleep(10 * time.Millisecond)
}
- pt.flushRowsToParts(rowsToFlush)
+}
+
+func (rrss *rawRowsShards) flush(pt *partition, dst []rawRow, isFinal bool) []rawRow {
+ for i := range rrss.shards {
+ dst = rrss.shards[i].appendRawRowsToFlush(dst, pt, isFinal)
+ }
+ pt.flushRowsToParts(dst)
+ return dst
}
func (rrs *rawRowsShard) appendRawRowsToFlush(dst []rawRow, pt *partition, isFinal bool) []rawRow {
currentTime := fasttime.UnixTimestamp()
- flushSeconds := int64(rawRowsFlushInterval.Seconds())
+ flushSeconds := int64(pendingRowsFlushInterval.Seconds())
if flushSeconds <= 0 {
flushSeconds = 1
}
lastFlushTime := atomic.LoadUint64(&rrs.lastFlushTime)
- if !isFinal && currentTime <= lastFlushTime+uint64(flushSeconds) {
+ if !isFinal && currentTime < lastFlushTime+uint64(flushSeconds) {
// Fast path - nothing to flush
return dst
}
@@ -747,112 +860,73 @@ func (rrs *rawRowsShard) appendRawRowsToFlush(dst []rawRow, pt *partition, isFin
return dst
}
-func (pt *partition) startInmemoryPartsFlusher() {
- pt.inmemoryPartsFlusherWG.Add(1)
- go func() {
- pt.inmemoryPartsFlusher()
- pt.inmemoryPartsFlusherWG.Done()
- }()
-}
-
-func (pt *partition) inmemoryPartsFlusher() {
- ticker := time.NewTicker(inmemoryPartsFlushInterval)
- defer ticker.Stop()
- var pwsBuf []*partWrapper
- var err error
- for {
- select {
- case <-pt.stopCh:
- return
- case <-ticker.C:
- pwsBuf, err = pt.flushInmemoryParts(pwsBuf[:0], false)
- if err != nil {
- logger.Panicf("FATAL: cannot flush inmemory parts: %s", err)
- }
+func (pt *partition) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error {
+ sortPartsForOptimalMerge(pws)
+ for len(pws) > 0 {
+ n := defaultPartsToMerge
+ if n > len(pws) {
+ n = len(pws)
}
- }
-}
-
-func (pt *partition) flushInmemoryParts(dstPws []*partWrapper, force bool) ([]*partWrapper, error) {
- currentTime := fasttime.UnixTimestamp()
- flushSeconds := int64(inmemoryPartsFlushInterval.Seconds())
- if flushSeconds <= 0 {
- flushSeconds = 1
- }
-
- // Inmemory parts may present only in small parts.
- pt.partsLock.Lock()
- for _, pw := range pt.smallParts {
- if pw.mp == nil || pw.isInMerge {
+ pwsChunk := pws[:n]
+ pws = pws[n:]
+ err := pt.mergeParts(pwsChunk, stopCh, true)
+ if err == nil {
continue
}
- if force || currentTime-pw.mp.creationTime >= uint64(flushSeconds) {
- pw.isInMerge = true
- dstPws = append(dstPws, pw)
+ pt.releasePartsToMerge(pws)
+ if errors.Is(err, errForciblyStopped) {
+ return nil
}
- }
- pt.partsLock.Unlock()
-
- if err := pt.mergePartsOptimal(dstPws, nil); err != nil {
- return dstPws, fmt.Errorf("cannot merge %d inmemory parts: %w", len(dstPws), err)
- }
- return dstPws, nil
-}
-
-func (pt *partition) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error {
- for len(pws) > defaultPartsToMerge {
- pwsChunk := pws[:defaultPartsToMerge]
- pws = pws[defaultPartsToMerge:]
- if err := pt.mergeParts(pwsChunk, stopCh); err != nil {
- pt.releasePartsToMerge(pws)
- return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err)
- }
- }
- if len(pws) == 0 {
- return nil
- }
- if err := pt.mergeParts(pws, stopCh); err != nil {
- return fmt.Errorf("cannot merge %d parts: %w", len(pws), err)
+ return fmt.Errorf("cannot merge parts optimally: %w", err)
}
return nil
}
-// ForceMergeAllParts runs merge for all the parts in pt - small and big.
+// ForceMergeAllParts runs merge for all the parts in pt.
func (pt *partition) ForceMergeAllParts() error {
- var pws []*partWrapper
- pt.partsLock.Lock()
- if !hasActiveMerges(pt.smallParts) && !hasActiveMerges(pt.bigParts) {
- pws = appendAllPartsToMerge(pws, pt.smallParts)
- pws = appendAllPartsToMerge(pws, pt.bigParts)
- }
- pt.partsLock.Unlock()
-
+ pws := pt.getAllPartsForMerge()
if len(pws) == 0 {
// Nothing to merge.
return nil
}
+ for {
+ // Check whether there is enough disk space for merging pws.
+ newPartSize := getPartsSize(pws)
+ maxOutBytes := fs.MustGetFreeSpace(pt.bigPartsPath)
+ if newPartSize > maxOutBytes {
+ freeSpaceNeededBytes := newPartSize - maxOutBytes
+ forceMergeLogger.Warnf("cannot initiate force merge for the partition %s; additional space needed: %d bytes", pt.name, freeSpaceNeededBytes)
+ return nil
+ }
- // Check whether there is enough disk space for merging pws.
- newPartSize := getPartsSize(pws)
- maxOutBytes := fs.MustGetFreeSpace(pt.bigPartsPath)
- if newPartSize > maxOutBytes {
- freeSpaceNeededBytes := newPartSize - maxOutBytes
- forceMergeLogger.Warnf("cannot initiate force merge for the partition %s; additional space needed: %d bytes", pt.name, freeSpaceNeededBytes)
- return nil
+ // If len(pws) == 1, then the merge must run anyway.
+ // This allows applying the configured retention, removing the deleted series
+ // and performing de-duplication if needed.
+ if err := pt.mergePartsOptimal(pws, pt.stopCh); err != nil {
+ return fmt.Errorf("cannot force merge %d parts from partition %q: %w", len(pws), pt.name, err)
+ }
+ pws = pt.getAllPartsForMerge()
+ if len(pws) <= 1 {
+ return nil
+ }
}
-
- // If len(pws) == 1, then the merge must run anyway.
- // This allows applying the configured retention, removing the deleted series
- // and performing de-duplication if needed.
- if err := pt.mergePartsOptimal(pws, pt.stopCh); err != nil {
- return fmt.Errorf("cannot force merge %d parts from partition %q: %w", len(pws), pt.name, err)
- }
- return nil
}
var forceMergeLogger = logger.WithThrottler("forceMerge", time.Minute)
-func appendAllPartsToMerge(dst, src []*partWrapper) []*partWrapper {
+func (pt *partition) getAllPartsForMerge() []*partWrapper {
+ var pws []*partWrapper
+ pt.partsLock.Lock()
+ if !hasActiveMerges(pt.inmemoryParts) && !hasActiveMerges(pt.smallParts) && !hasActiveMerges(pt.bigParts) {
+ pws = appendAllPartsForMerge(pws, pt.inmemoryParts)
+ pws = appendAllPartsForMerge(pws, pt.smallParts)
+ pws = appendAllPartsForMerge(pws, pt.bigParts)
+ }
+ pt.partsLock.Unlock()
+ return pws
+}
+
+func appendAllPartsForMerge(dst, src []*partWrapper) []*partWrapper {
for _, pw := range src {
if pw.isInMerge {
logger.Panicf("BUG: part %q is already in merge", pw.p.path)
@@ -872,10 +946,9 @@ func hasActiveMerges(pws []*partWrapper) bool {
return false
}
-var (
- bigMergeWorkersCount = getDefaultMergeConcurrency(4)
- smallMergeWorkersCount = getDefaultMergeConcurrency(16)
-)
+var mergeWorkersLimitCh = make(chan struct{}, getDefaultMergeConcurrency(16))
+
+var bigMergeWorkersLimitCh = make(chan struct{}, getDefaultMergeConcurrency(4))
func getDefaultMergeConcurrency(max int) int {
v := (cgroup.AvailableCPUs() + 1) / 2
@@ -893,47 +966,28 @@ func SetBigMergeWorkersCount(n int) {
// Do nothing
return
}
- bigMergeWorkersCount = n
+ bigMergeWorkersLimitCh = make(chan struct{}, n)
}
-// SetSmallMergeWorkersCount sets the maximum number of concurrent mergers for small blocks.
+// SetMergeWorkersCount sets the maximum number of concurrent mergers for parts.
//
// The function must be called before opening or creating any storage.
-func SetSmallMergeWorkersCount(n int) {
+func SetMergeWorkersCount(n int) {
if n <= 0 {
// Do nothing
return
}
- smallMergeWorkersCount = n
+ mergeWorkersLimitCh = make(chan struct{}, n)
}
func (pt *partition) startMergeWorkers() {
- for i := 0; i < smallMergeWorkersCount; i++ {
- pt.smallPartsMergerWG.Add(1)
+ for i := 0; i < cap(mergeWorkersLimitCh); i++ {
+ pt.wg.Add(1)
go func() {
- pt.smallPartsMerger()
- pt.smallPartsMergerWG.Done()
+ pt.mergeWorker()
+ pt.wg.Done()
}()
}
- for i := 0; i < bigMergeWorkersCount; i++ {
- pt.bigPartsMergerWG.Add(1)
- go func() {
- pt.bigPartsMerger()
- pt.bigPartsMergerWG.Done()
- }()
- }
-}
-
-func (pt *partition) bigPartsMerger() {
- if err := pt.partsMerger(pt.mergeBigParts); err != nil {
- logger.Panicf("FATAL: unrecoverable error when merging big parts in the partition %q: %s", pt.bigPartsPath, err)
- }
-}
-
-func (pt *partition) smallPartsMerger() {
- if err := pt.partsMerger(pt.mergeSmallParts); err != nil {
- logger.Panicf("FATAL: unrecoverable error when merging small parts in the partition %q: %s", pt.smallPartsPath, err)
- }
}
const (
@@ -941,13 +995,16 @@ const (
maxMergeSleepTime = 10 * time.Second
)
-func (pt *partition) partsMerger(mergerFunc func(isFinal bool) error) error {
+func (pt *partition) mergeWorker() {
sleepTime := minMergeSleepTime
var lastMergeTime uint64
isFinal := false
t := time.NewTimer(sleepTime)
for {
- err := mergerFunc(isFinal)
+ // Limit the number of concurrent calls to mergeExistingParts, cine the number of merge
+ mergeWorkersLimitCh <- struct{}{}
+ err := pt.mergeExistingParts(isFinal)
+ <-mergeWorkersLimitCh
if err == nil {
// Try merging additional parts.
sleepTime = minMergeSleepTime
@@ -957,10 +1014,11 @@ func (pt *partition) partsMerger(mergerFunc func(isFinal bool) error) error {
}
if errors.Is(err, errForciblyStopped) {
// The merger has been stopped.
- return nil
+ return
}
if !errors.Is(err, errNothingToMerge) && !errors.Is(err, errReadOnlyMode) {
- return err
+ // Unexpected error.
+ logger.Panicf("FATAL: unrecoverable error when merging parts in the partition (%q, %q): %s", pt.smallPartsPath, pt.bigPartsPath, err)
}
if finalMergeDelaySeconds > 0 && fasttime.UnixTimestamp()-lastMergeTime > finalMergeDelaySeconds {
// We have free time for merging into bigger parts.
@@ -977,7 +1035,7 @@ func (pt *partition) partsMerger(mergerFunc func(isFinal bool) error) error {
}
select {
case <-pt.stopCh:
- return nil
+ return
case <-t.C:
t.Reset(sleepTime)
}
@@ -996,6 +1054,40 @@ func SetFinalMergeDelay(delay time.Duration) {
return
}
finalMergeDelaySeconds = uint64(delay.Seconds() + 1)
+ mergeset.SetFinalMergeDelay(delay)
+}
+
+func getMaxInmemoryPartSize() uint64 {
+ // Allocate 10% of allowed memory for in-memory parts.
+ n := uint64(0.1 * float64(memory.Allowed()) / maxInmemoryPartsPerPartition)
+ if n < 1e6 {
+ n = 1e6
+ }
+ return n
+}
+
+func (pt *partition) getMaxSmallPartSize() uint64 {
+ // Small parts are cached in the OS page cache,
+ // so limit their size by the remaining free RAM.
+ mem := memory.Remaining()
+ // It is expected no more than defaultPartsToMerge/2 parts exist
+ // in the OS page cache before they are merged into bigger part.
+ // Half of the remaining RAM must be left for lib/mergeset parts,
+ // so the maxItems is calculated using the below code:
+ n := uint64(mem) / defaultPartsToMerge
+ if n < 10e6 {
+ n = 10e6
+ }
+ // Make sure the output part fits available disk space for small parts.
+ sizeLimit := getMaxOutBytes(pt.smallPartsPath, cap(mergeWorkersLimitCh))
+ if n > sizeLimit {
+ n = sizeLimit
+ }
+ return n
+}
+
+func (pt *partition) getMaxBigPartSize() uint64 {
+ return getMaxOutBytes(pt.bigPartsPath, cap(bigMergeWorkersLimitCh))
}
func getMaxOutBytes(path string, workersCount int) uint64 {
@@ -1017,56 +1109,35 @@ func (pt *partition) canBackgroundMerge() bool {
var errReadOnlyMode = fmt.Errorf("storage is in readonly mode")
-func (pt *partition) mergeBigParts(isFinal bool) error {
- if !pt.canBackgroundMerge() {
- // Do not perform merge in read-only mode, since this may result in disk space shortage.
- // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
- return errReadOnlyMode
- }
- maxOutBytes := getMaxOutBytes(pt.bigPartsPath, bigMergeWorkersCount)
+func (pt *partition) mergeInmemoryParts() error {
+ maxOutBytes := pt.getMaxBigPartSize()
pt.partsLock.Lock()
- pws, needFreeSpace := getPartsToMerge(pt.bigParts, maxOutBytes, isFinal)
+ pws, needFreeSpace := getPartsToMerge(pt.inmemoryParts, maxOutBytes, false)
pt.partsLock.Unlock()
- atomicSetBool(&pt.bigMergeNeedFreeDiskSpace, needFreeSpace)
- return pt.mergeParts(pws, pt.stopCh)
+ atomicSetBool(&pt.mergeNeedFreeDiskSpace, needFreeSpace)
+ return pt.mergeParts(pws, pt.stopCh, false)
}
-func (pt *partition) mergeSmallParts(isFinal bool) error {
+func (pt *partition) mergeExistingParts(isFinal bool) error {
if !pt.canBackgroundMerge() {
// Do not perform merge in read-only mode, since this may result in disk space shortage.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
return errReadOnlyMode
}
- // Try merging small parts to a big part at first.
- maxBigPartOutBytes := getMaxOutBytes(pt.bigPartsPath, bigMergeWorkersCount)
+ maxOutBytes := pt.getMaxBigPartSize()
+
pt.partsLock.Lock()
- pws, needFreeSpace := getPartsToMerge(pt.smallParts, maxBigPartOutBytes, isFinal)
+ dst := make([]*partWrapper, 0, len(pt.inmemoryParts)+len(pt.smallParts)+len(pt.bigParts))
+ dst = append(dst, pt.inmemoryParts...)
+ dst = append(dst, pt.smallParts...)
+ dst = append(dst, pt.bigParts...)
+ pws, needFreeSpace := getPartsToMerge(dst, maxOutBytes, isFinal)
pt.partsLock.Unlock()
- atomicSetBool(&pt.bigMergeNeedFreeDiskSpace, needFreeSpace)
- outSize := getPartsSize(pws)
- if outSize > maxSmallPartSize() {
- // Merge small parts to a big part.
- return pt.mergeParts(pws, pt.stopCh)
- }
-
- // Make sure that the output small part fits small parts storage.
- maxSmallPartOutBytes := getMaxOutBytes(pt.smallPartsPath, smallMergeWorkersCount)
- if outSize <= maxSmallPartOutBytes {
- // Merge small parts to a small part.
- return pt.mergeParts(pws, pt.stopCh)
- }
-
- // The output small part doesn't fit small parts storage. Try merging small parts according to maxSmallPartOutBytes limit.
- pt.releasePartsToMerge(pws)
- pt.partsLock.Lock()
- pws, needFreeSpace = getPartsToMerge(pt.smallParts, maxSmallPartOutBytes, isFinal)
- pt.partsLock.Unlock()
- atomicSetBool(&pt.smallMergeNeedFreeDiskSpace, needFreeSpace)
-
- return pt.mergeParts(pws, pt.stopCh)
+ atomicSetBool(&pt.mergeNeedFreeDiskSpace, needFreeSpace)
+ return pt.mergeParts(pws, pt.stopCh, isFinal)
}
func (pt *partition) releasePartsToMerge(pws []*partWrapper) {
@@ -1128,12 +1199,14 @@ func getMinDedupInterval(pws []*partWrapper) int64 {
return dMin
}
-// mergeParts merges pws.
+// mergeParts merges pws to a single resulting part.
//
// Merging is immediately stopped if stopCh is closed.
//
+// if isFinal is set, then the resulting part will be saved to disk.
+//
// All the parts inside pws must have isInMerge field set to true.
-func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) error {
+func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isFinal bool) error {
if len(pws) == 0 {
// Nothing to merge.
return errNothingToMerge
@@ -1142,187 +1215,355 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro
startTime := time.Now()
+ // Initialize destination paths.
+ dstPartType := pt.getDstPartType(pws, isFinal)
+ ptPath, tmpPartPath, mergeIdx := pt.getDstPartPaths(dstPartType)
+
+ if dstPartType == partBig {
+ bigMergeWorkersLimitCh <- struct{}{}
+ defer func() {
+ <-bigMergeWorkersLimitCh
+ }()
+ }
+
+ if isFinal && len(pws) == 1 && pws[0].mp != nil {
+ // Fast path: flush a single in-memory part to disk.
+ mp := pws[0].mp
+ if tmpPartPath == "" {
+ logger.Panicf("BUG: tmpPartPath must be non-empty")
+ }
+ if err := mp.StoreToDisk(tmpPartPath); err != nil {
+ return fmt.Errorf("cannot store in-memory part to %q: %w", tmpPartPath, err)
+ }
+ pwNew, err := pt.openCreatedPart(&mp.ph, pws, nil, ptPath, tmpPartPath, mergeIdx)
+ if err != nil {
+ return fmt.Errorf("cannot atomically register the created part: %w", err)
+ }
+ pt.swapSrcWithDstParts(pws, pwNew, dstPartType)
+ return nil
+ }
+
// Prepare BlockStreamReaders for source parts.
- bsrs := make([]*blockStreamReader, 0, len(pws))
- defer func() {
+ bsrs, err := openBlockStreamReaders(pws)
+ if err != nil {
+ return err
+ }
+ closeBlockStreamReaders := func() {
for _, bsr := range bsrs {
putBlockStreamReader(bsr)
}
- }()
+ bsrs = nil
+ }
+
+ // Prepare BlockStreamWriter for destination part.
+ srcSize := uint64(0)
+ srcRowsCount := uint64(0)
+ srcBlocksCount := uint64(0)
+ for _, pw := range pws {
+ srcSize += pw.p.size
+ srcRowsCount += pw.p.ph.RowsCount
+ srcBlocksCount += pw.p.ph.BlocksCount
+ }
+ rowsPerBlock := float64(srcRowsCount) / float64(srcBlocksCount)
+ compressLevel := getCompressLevel(rowsPerBlock)
+ bsw := getBlockStreamWriter()
+ var mpNew *inmemoryPart
+ if dstPartType == partInmemory {
+ mpNew = getInmemoryPart()
+ bsw.InitFromInmemoryPart(mpNew, compressLevel)
+ } else {
+ if tmpPartPath == "" {
+ logger.Panicf("BUG: tmpPartPath must be non-empty")
+ }
+ nocache := dstPartType == partBig
+ if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil {
+ closeBlockStreamReaders()
+ return fmt.Errorf("cannot create destination part at %q: %w", tmpPartPath, err)
+ }
+ }
+
+ // Merge source parts to destination part.
+ ph, err := pt.mergePartsInternal(tmpPartPath, bsw, bsrs, dstPartType, stopCh)
+ putBlockStreamWriter(bsw)
+ closeBlockStreamReaders()
+ if err != nil {
+ return fmt.Errorf("cannot merge %d parts: %w", len(pws), err)
+ }
+ if mpNew != nil {
+ // Update partHeader for destination inmemory part after the merge.
+ mpNew.ph = *ph
+ }
+
+ // Atomically move the created part from tmpPartPath to its destination
+ // and swap the source parts with the newly created part.
+ pwNew, err := pt.openCreatedPart(ph, pws, mpNew, ptPath, tmpPartPath, mergeIdx)
+ if err != nil {
+ return fmt.Errorf("cannot atomically register the created part: %w", err)
+ }
+ pt.swapSrcWithDstParts(pws, pwNew, dstPartType)
+
+ d := time.Since(startTime)
+ if d <= 30*time.Second {
+ return nil
+ }
+
+ // Log stats for long merges.
+ dstRowsCount := uint64(0)
+ dstBlocksCount := uint64(0)
+ dstSize := uint64(0)
+ dstPartPath := ""
+ if pwNew != nil {
+ pDst := pwNew.p
+ dstRowsCount = pDst.ph.RowsCount
+ dstBlocksCount = pDst.ph.BlocksCount
+ dstSize = pDst.size
+ dstPartPath = pDst.String()
+ }
+ durationSecs := d.Seconds()
+ rowsPerSec := int(float64(srcRowsCount) / durationSecs)
+ logger.Infof("merged (%d parts, %d rows, %d blocks, %d bytes) into (1 part, %d rows, %d blocks, %d bytes) in %.3f seconds at %d rows/sec to %q",
+ len(pws), srcRowsCount, srcBlocksCount, srcSize, dstRowsCount, dstBlocksCount, dstSize, durationSecs, rowsPerSec, dstPartPath)
+
+ return nil
+}
+
+func getFlushToDiskDeadline(pws []*partWrapper) time.Time {
+ d := pws[0].flushToDiskDeadline
+ for _, pw := range pws[1:] {
+ if pw.flushToDiskDeadline.Before(d) {
+ d = pw.flushToDiskDeadline
+ }
+ }
+ return d
+}
+
+type partType int
+
+var (
+ partInmemory = partType(0)
+ partSmall = partType(1)
+ partBig = partType(2)
+)
+
+func (pt *partition) getDstPartType(pws []*partWrapper, isFinal bool) partType {
+ dstPartSize := getPartsSize(pws)
+ if dstPartSize > pt.getMaxSmallPartSize() {
+ return partBig
+ }
+ if isFinal || dstPartSize > getMaxInmemoryPartSize() {
+ return partSmall
+ }
+ if !areAllInmemoryParts(pws) {
+ // If at least a single source part is located in file,
+ // then the destination part must be in file for durability reasons.
+ return partSmall
+ }
+ return partInmemory
+}
+
+func (pt *partition) getDstPartPaths(dstPartType partType) (string, string, uint64) {
+ ptPath := ""
+ switch dstPartType {
+ case partSmall:
+ ptPath = pt.smallPartsPath
+ case partBig:
+ ptPath = pt.bigPartsPath
+ case partInmemory:
+ ptPath = pt.smallPartsPath
+ default:
+ logger.Panicf("BUG: unknown partType=%d", dstPartType)
+ }
+ ptPath = filepath.Clean(ptPath)
+ mergeIdx := pt.nextMergeIdx()
+ tmpPartPath := ""
+ if dstPartType != partInmemory {
+ tmpPartPath = fmt.Sprintf("%s/tmp/%016X", ptPath, mergeIdx)
+ }
+ return ptPath, tmpPartPath, mergeIdx
+}
+
+func openBlockStreamReaders(pws []*partWrapper) ([]*blockStreamReader, error) {
+ bsrs := make([]*blockStreamReader, 0, len(pws))
for _, pw := range pws {
bsr := getBlockStreamReader()
if pw.mp != nil {
bsr.InitFromInmemoryPart(pw.mp)
} else {
if err := bsr.InitFromFilePart(pw.p.path); err != nil {
- return fmt.Errorf("cannot open source part for merging: %w", err)
+ for _, bsr := range bsrs {
+ putBlockStreamReader(bsr)
+ }
+ return nil, fmt.Errorf("cannot open source part for merging: %w", err)
}
}
bsrs = append(bsrs, bsr)
}
+ return bsrs, nil
+}
- outSize := uint64(0)
- outRowsCount := uint64(0)
- outBlocksCount := uint64(0)
- for _, pw := range pws {
- outSize += pw.p.size
- outRowsCount += pw.p.ph.RowsCount
- outBlocksCount += pw.p.ph.BlocksCount
- }
- isBigPart := outSize > maxSmallPartSize()
- nocache := isBigPart
-
- // Prepare BlockStreamWriter for destination part.
- ptPath := pt.smallPartsPath
- if isBigPart {
- ptPath = pt.bigPartsPath
- }
- ptPath = filepath.Clean(ptPath)
- mergeIdx := pt.nextMergeIdx()
- tmpPartPath := fmt.Sprintf("%s/tmp/%016X", ptPath, mergeIdx)
- bsw := getBlockStreamWriter()
- compressLevel := getCompressLevelForRowsCount(outRowsCount, outBlocksCount)
- if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil {
- return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err)
- }
-
- // Merge parts.
+func (pt *partition) mergePartsInternal(tmpPartPath string, bsw *blockStreamWriter, bsrs []*blockStreamReader, dstPartType partType, stopCh <-chan struct{}) (*partHeader, error) {
var ph partHeader
- rowsMerged := &pt.smallRowsMerged
- rowsDeleted := &pt.smallRowsDeleted
- if isBigPart {
+ var rowsMerged *uint64
+ var rowsDeleted *uint64
+ var mergesCount *uint64
+ var activeMerges *uint64
+ switch dstPartType {
+ case partInmemory:
+ rowsMerged = &pt.inmemoryRowsMerged
+ rowsDeleted = &pt.inmemoryRowsDeleted
+ mergesCount = &pt.inmemoryMergesCount
+ activeMerges = &pt.activeInmemoryMerges
+ case partSmall:
+ rowsMerged = &pt.smallRowsMerged
+ rowsDeleted = &pt.smallRowsDeleted
+ mergesCount = &pt.smallMergesCount
+ activeMerges = &pt.activeSmallMerges
+ case partBig:
rowsMerged = &pt.bigRowsMerged
rowsDeleted = &pt.bigRowsDeleted
- atomic.AddUint64(&pt.bigMergesCount, 1)
- atomic.AddUint64(&pt.activeBigMerges, 1)
- } else {
- atomic.AddUint64(&pt.smallMergesCount, 1)
- atomic.AddUint64(&pt.activeSmallMerges, 1)
+ mergesCount = &pt.bigMergesCount
+ activeMerges = &pt.activeBigMerges
+ default:
+ logger.Panicf("BUG: unknown partType=%d", dstPartType)
}
- retentionDeadline := timestampFromTime(startTime) - pt.s.retentionMsecs
+ retentionDeadline := timestampFromTime(time.Now()) - pt.s.retentionMsecs
+ atomic.AddUint64(activeMerges, 1)
err := mergeBlockStreams(&ph, bsw, bsrs, stopCh, pt.s, retentionDeadline, rowsMerged, rowsDeleted)
- if isBigPart {
- atomic.AddUint64(&pt.activeBigMerges, ^uint64(0))
- } else {
- atomic.AddUint64(&pt.activeSmallMerges, ^uint64(0))
- }
- putBlockStreamWriter(bsw)
+ atomic.AddUint64(activeMerges, ^uint64(0))
+ atomic.AddUint64(mergesCount, 1)
if err != nil {
- return fmt.Errorf("error when merging parts to %q: %w", tmpPartPath, err)
+ return nil, fmt.Errorf("cannot merge parts to %q: %w", tmpPartPath, err)
}
-
- // Close bsrs.
- for _, bsr := range bsrs {
- putBlockStreamReader(bsr)
+ if tmpPartPath != "" {
+ ph.MinDedupInterval = GetDedupInterval(ph.MaxTimestamp)
+ if err := ph.writeMinDedupInterval(tmpPartPath); err != nil {
+ return nil, fmt.Errorf("cannot store min dedup interval: %w", err)
+ }
}
- bsrs = nil
+ return &ph, nil
+}
- ph.MinDedupInterval = GetDedupInterval(ph.MaxTimestamp)
- if err := ph.writeMinDedupInterval(tmpPartPath); err != nil {
- return fmt.Errorf("cannot store min dedup interval for part %q: %w", tmpPartPath, err)
+func (pt *partition) openCreatedPart(ph *partHeader, pws []*partWrapper, mpNew *inmemoryPart, ptPath, tmpPartPath string, mergeIdx uint64) (*partWrapper, error) {
+ dstPartPath := ""
+ if mpNew == nil || !areAllInmemoryParts(pws) {
+ // Either source or destination parts are located on disk.
+ // Create a transaction for atomic deleting of old parts and moving new part to its destination on disk.
+ var bb bytesutil.ByteBuffer
+ for _, pw := range pws {
+ if pw.mp == nil {
+ fmt.Fprintf(&bb, "%s\n", pw.p.path)
+ }
+ }
+ if ph.RowsCount > 0 {
+ // The destination part may have no rows if they are deleted during the merge.
+ dstPartPath = ph.Path(ptPath, mergeIdx)
+ }
+ fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath)
+ txnPath := fmt.Sprintf("%s/txn/%016X", ptPath, mergeIdx)
+ if err := fs.WriteFileAtomically(txnPath, bb.B, false); err != nil {
+ return nil, fmt.Errorf("cannot create transaction file %q: %w", txnPath, err)
+ }
+
+ // Run the created transaction.
+ if err := runTransaction(&pt.snapshotLock, pt.smallPartsPath, pt.bigPartsPath, txnPath); err != nil {
+ return nil, fmt.Errorf("cannot execute transaction %q: %w", txnPath, err)
+ }
}
+ // Open the created part.
+ if ph.RowsCount == 0 {
+ // The created part is empty.
+ return nil, nil
+ }
+ if mpNew != nil {
+ // Open the created part from memory.
+ flushToDiskDeadline := getFlushToDiskDeadline(pws)
+ pwNew := newPartWrapperFromInmemoryPart(mpNew, flushToDiskDeadline)
+ return pwNew, nil
+ }
+ // Open the created part from disk.
+ pNew, err := openFilePart(dstPartPath)
+ if err != nil {
+ return nil, fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err)
+ }
+ pwNew := &partWrapper{
+ p: pNew,
+ refCount: 1,
+ }
+ return pwNew, nil
+}
- // Create a transaction for atomic deleting old parts and moving
- // new part to its destination place.
- var bb bytesutil.ByteBuffer
+func areAllInmemoryParts(pws []*partWrapper) bool {
for _, pw := range pws {
if pw.mp == nil {
- fmt.Fprintf(&bb, "%s\n", pw.p.path)
+ return false
}
}
- dstPartPath := ""
- if ph.RowsCount > 0 {
- // The destination part may have no rows if they are deleted
- // during the merge due to deleted time series.
- dstPartPath = ph.Path(ptPath, mergeIdx)
- }
- fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath)
- txnPath := fmt.Sprintf("%s/txn/%016X", ptPath, mergeIdx)
- if err := fs.WriteFileAtomically(txnPath, bb.B, false); err != nil {
- return fmt.Errorf("cannot create transaction file %q: %w", txnPath, err)
- }
+ return true
+}
- // Run the created transaction.
- if err := runTransaction(&pt.snapshotLock, pt.smallPartsPath, pt.bigPartsPath, txnPath); err != nil {
- return fmt.Errorf("cannot execute transaction %q: %w", txnPath, err)
- }
-
- var newPW *partWrapper
- var newPSize uint64
- if len(dstPartPath) > 0 {
- // Open the merged part if it is non-empty.
- newP, err := openFilePart(dstPartPath)
- if err != nil {
- return fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err)
- }
- newPSize = newP.size
- newPW = &partWrapper{
- p: newP,
- refCount: 1,
- }
- }
-
- // Atomically remove old parts and add new part.
+func (pt *partition) swapSrcWithDstParts(pws []*partWrapper, pwNew *partWrapper, dstPartType partType) {
+ // Atomically unregister old parts and add new part to pt.
m := make(map[*partWrapper]bool, len(pws))
for _, pw := range pws {
m[pw] = true
}
if len(m) != len(pws) {
- logger.Panicf("BUG: %d duplicate parts found in the merge of %d parts", len(pws)-len(m), len(pws))
+ logger.Panicf("BUG: %d duplicate parts found when merging %d parts", len(pws)-len(m), len(pws))
}
+ removedInmemoryParts := 0
removedSmallParts := 0
removedBigParts := 0
+
pt.partsLock.Lock()
- pt.smallParts, removedSmallParts = removeParts(pt.smallParts, m, false)
- pt.bigParts, removedBigParts = removeParts(pt.bigParts, m, true)
- if newPW != nil {
- if isBigPart {
- pt.bigParts = append(pt.bigParts, newPW)
- } else {
- pt.smallParts = append(pt.smallParts, newPW)
+ pt.inmemoryParts, removedInmemoryParts = removeParts(pt.inmemoryParts, m)
+ pt.smallParts, removedSmallParts = removeParts(pt.smallParts, m)
+ pt.bigParts, removedBigParts = removeParts(pt.bigParts, m)
+ if pwNew != nil {
+ switch dstPartType {
+ case partInmemory:
+ pt.inmemoryParts = append(pt.inmemoryParts, pwNew)
+ case partSmall:
+ pt.smallParts = append(pt.smallParts, pwNew)
+ case partBig:
+ pt.bigParts = append(pt.bigParts, pwNew)
+ default:
+ logger.Panicf("BUG: unknown partType=%d", dstPartType)
}
}
pt.partsLock.Unlock()
- if removedSmallParts+removedBigParts != len(m) {
- logger.Panicf("BUG: unexpected number of parts removed; got %d, want %d", removedSmallParts+removedBigParts, len(m))
+
+ removedParts := removedInmemoryParts + removedSmallParts + removedBigParts
+ if removedParts != len(m) {
+ logger.Panicf("BUG: unexpected number of parts removed; got %d, want %d", removedParts, len(m))
}
// Remove partition references from old parts.
for _, pw := range pws {
pw.decRef()
}
-
- d := time.Since(startTime)
- if d > 30*time.Second {
- logger.Infof("merged %d rows across %d blocks in %.3f seconds at %d rows/sec to %q; sizeBytes: %d",
- outRowsCount, outBlocksCount, d.Seconds(), int(float64(outRowsCount)/d.Seconds()), dstPartPath, newPSize)
- }
-
- return nil
}
-func getCompressLevelForRowsCount(rowsCount, blocksCount uint64) int {
- avgRowsPerBlock := rowsCount / blocksCount
+func getCompressLevel(rowsPerBlock float64) int {
// See https://github.com/facebook/zstd/releases/tag/v1.3.4 about negative compression levels.
- if avgRowsPerBlock <= 10 {
+ if rowsPerBlock <= 10 {
return -5
}
- if avgRowsPerBlock <= 50 {
+ if rowsPerBlock <= 50 {
return -2
}
- if avgRowsPerBlock <= 200 {
+ if rowsPerBlock <= 200 {
return -1
}
- if avgRowsPerBlock <= 500 {
+ if rowsPerBlock <= 500 {
return 1
}
- if avgRowsPerBlock <= 1000 {
+ if rowsPerBlock <= 1000 {
return 2
}
- if avgRowsPerBlock <= 2000 {
+ if rowsPerBlock <= 2000 {
return 3
}
- if avgRowsPerBlock <= 4000 {
+ if rowsPerBlock <= 4000 {
return 4
}
return 5
@@ -1332,24 +1573,24 @@ func (pt *partition) nextMergeIdx() uint64 {
return atomic.AddUint64(&pt.mergeIdx, 1)
}
-func removeParts(pws []*partWrapper, partsToRemove map[*partWrapper]bool, isBig bool) ([]*partWrapper, int) {
- removedParts := 0
+func removeParts(pws []*partWrapper, partsToRemove map[*partWrapper]bool) ([]*partWrapper, int) {
dst := pws[:0]
for _, pw := range pws {
if !partsToRemove[pw] {
dst = append(dst, pw)
- continue
}
- removedParts++
}
- return dst, removedParts
+ for i := len(dst); i < len(pws); i++ {
+ pws[i] = nil
+ }
+ return dst, len(pws) - len(dst)
}
func (pt *partition) startStalePartsRemover() {
- pt.stalePartsRemoverWG.Add(1)
+ pt.wg.Add(1)
go func() {
pt.stalePartsRemover()
- pt.stalePartsRemoverWG.Done()
+ pt.wg.Done()
}()
}
@@ -1372,9 +1613,9 @@ func (pt *partition) removeStaleParts() {
retentionDeadline := timestampFromTime(startTime) - pt.s.retentionMsecs
pt.partsLock.Lock()
- for _, pw := range pt.bigParts {
+ for _, pw := range pt.inmemoryParts {
if !pw.isInMerge && pw.p.ph.MaxTimestamp < retentionDeadline {
- atomic.AddUint64(&pt.bigRowsDeleted, pw.p.ph.RowsCount)
+ atomic.AddUint64(&pt.inmemoryRowsDeleted, pw.p.ph.RowsCount)
m[pw] = true
}
}
@@ -1384,28 +1625,38 @@ func (pt *partition) removeStaleParts() {
m[pw] = true
}
}
+ for _, pw := range pt.bigParts {
+ if !pw.isInMerge && pw.p.ph.MaxTimestamp < retentionDeadline {
+ atomic.AddUint64(&pt.bigRowsDeleted, pw.p.ph.RowsCount)
+ m[pw] = true
+ }
+ }
+ removedInmemoryParts := 0
removedSmallParts := 0
removedBigParts := 0
if len(m) > 0 {
- pt.smallParts, removedSmallParts = removeParts(pt.smallParts, m, false)
- pt.bigParts, removedBigParts = removeParts(pt.bigParts, m, true)
+ pt.inmemoryParts, removedInmemoryParts = removeParts(pt.inmemoryParts, m)
+ pt.smallParts, removedSmallParts = removeParts(pt.smallParts, m)
+ pt.bigParts, removedBigParts = removeParts(pt.bigParts, m)
}
pt.partsLock.Unlock()
- if removedSmallParts+removedBigParts != len(m) {
- logger.Panicf("BUG: unexpected number of stale parts removed; got %d, want %d", removedSmallParts+removedBigParts, len(m))
+ removedParts := removedInmemoryParts + removedSmallParts + removedBigParts
+ if removedParts != len(m) {
+ logger.Panicf("BUG: unexpected number of stale parts removed; got %d, want %d", removedParts, len(m))
}
// Physically remove stale parts under snapshotLock in order to provide
// consistent snapshots with table.CreateSnapshot().
pt.snapshotLock.RLock()
for pw := range m {
- logger.Infof("removing part %q, since its data is out of the configured retention (%d secs)", pw.p.path, pt.s.retentionMsecs/1000)
- fs.MustRemoveDirAtomic(pw.p.path)
+ if pw.mp == nil {
+ logger.Infof("removing part %q, since its data is out of the configured retention (%d secs)", pw.p.path, pt.s.retentionMsecs/1000)
+ fs.MustRemoveDirAtomic(pw.p.path)
+ }
}
// There is no need in calling fs.MustSyncPath() on pt.smallPartsPath and pt.bigPartsPath,
// since they should be automatically called inside fs.MustRemoveDirAtomic().
-
pt.snapshotLock.RUnlock()
// Remove partition references from removed parts.
@@ -1481,16 +1732,7 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte
src = tmp
needFreeSpace := skippedBigParts > 1
- // Sort src parts by size and backwards timestamp.
- // This should improve adjanced points' locality in the merged parts.
- sort.Slice(src, func(i, j int) bool {
- a := src[i].p
- b := src[j].p
- if a.size == b.size {
- return a.ph.MinTimestamp > b.ph.MinTimestamp
- }
- return a.size < b.size
- })
+ sortPartsForOptimalMerge(src)
maxSrcParts := maxPartsToMerge
if maxSrcParts > len(src) {
@@ -1541,6 +1783,19 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte
return append(dst, pws...), needFreeSpace
}
+func sortPartsForOptimalMerge(pws []*partWrapper) {
+ // Sort src parts by size and backwards timestamp.
+ // This should improve adjanced points' locality in the merged parts.
+ sort.Slice(pws, func(i, j int) bool {
+ a := pws[i].p
+ b := pws[j].p
+ if a.size == b.size {
+ return a.ph.MinTimestamp > b.ph.MinTimestamp
+ }
+ return a.size < b.size
+ })
+}
+
func getPartsSize(pws []*partWrapper) uint64 {
n := uint64(0)
for _, pw := range pws {
@@ -1557,7 +1812,7 @@ func openParts(pathPrefix1, pathPrefix2, path string) ([]*partWrapper, error) {
fs.MustRemoveTemporaryDirs(path)
d, err := os.Open(path)
if err != nil {
- return nil, fmt.Errorf("cannot open directory %q: %w", path, err)
+ return nil, fmt.Errorf("cannot open partition directory: %w", err)
}
defer fs.MustClose(d)
@@ -1639,10 +1894,7 @@ func (pt *partition) CreateSnapshotAt(smallPath, bigPath string) error {
startTime := time.Now()
// Flush inmemory data to disk.
- pt.flushRawRows(true)
- if _, err := pt.flushInmemoryParts(nil, true); err != nil {
- return fmt.Errorf("cannot flush inmemory parts: %w", err)
- }
+ pt.flushInmemoryRows()
// The snapshot must be created under the lock in order to prevent from
// concurrent modifications via runTransaction.
@@ -1668,13 +1920,13 @@ func (pt *partition) createSnapshot(srcDir, dstDir string) error {
d, err := os.Open(srcDir)
if err != nil {
- return fmt.Errorf("cannot open difrectory: %w", err)
+ return fmt.Errorf("cannot open partition difrectory: %w", err)
}
defer fs.MustClose(d)
fis, err := d.Readdir(-1)
if err != nil {
- return fmt.Errorf("cannot read directory: %w", err)
+ return fmt.Errorf("cannot read partition directory: %w", err)
}
for _, fi := range fis {
fn := fi.Name()
@@ -1723,7 +1975,7 @@ func runTransactions(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, path strin
if os.IsNotExist(err) {
return nil
}
- return fmt.Errorf("cannot open %q: %w", txnDir, err)
+ return fmt.Errorf("cannot open transaction directory: %w", err)
}
defer fs.MustClose(d)
@@ -1787,30 +2039,32 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, txnPath str
// Move the new part to new directory.
srcPath := mvPaths[0]
dstPath := mvPaths[1]
- srcPath, err = validatePath(pathPrefix1, pathPrefix2, srcPath)
- if err != nil {
- return fmt.Errorf("invalid source path to rename: %w", err)
- }
- if len(dstPath) > 0 {
- // Move srcPath to dstPath.
- dstPath, err = validatePath(pathPrefix1, pathPrefix2, dstPath)
+ if len(srcPath) > 0 {
+ srcPath, err = validatePath(pathPrefix1, pathPrefix2, srcPath)
if err != nil {
- return fmt.Errorf("invalid destination path to rename: %w", err)
+ return fmt.Errorf("invalid source path to rename: %w", err)
}
- if fs.IsPathExist(srcPath) {
- if err := os.Rename(srcPath, dstPath); err != nil {
- return fmt.Errorf("cannot rename %q to %q: %w", srcPath, dstPath, err)
+ if len(dstPath) > 0 {
+ // Move srcPath to dstPath.
+ dstPath, err = validatePath(pathPrefix1, pathPrefix2, dstPath)
+ if err != nil {
+ return fmt.Errorf("invalid destination path to rename: %w", err)
}
- } else if !fs.IsPathExist(dstPath) {
- // Emit info message for the expected condition after unclean shutdown on NFS disk.
- // The dstPath part may be missing because it could be already merged into bigger part
- // while old source parts for the current txn weren't still deleted due to NFS locks.
- logger.Infof("cannot find both source and destination paths: %q -> %q; this may be the case after unclean shutdown (OOM, `kill -9`, hard reset) on NFS disk",
- srcPath, dstPath)
+ if fs.IsPathExist(srcPath) {
+ if err := os.Rename(srcPath, dstPath); err != nil {
+ return fmt.Errorf("cannot rename %q to %q: %w", srcPath, dstPath, err)
+ }
+ } else if !fs.IsPathExist(dstPath) {
+ // Emit info message for the expected condition after unclean shutdown on NFS disk.
+ // The dstPath part may be missing because it could be already merged into bigger part
+ // while old source parts for the current txn weren't still deleted due to NFS locks.
+ logger.Infof("cannot find both source and destination paths: %q -> %q; this may be the case after unclean shutdown "+
+ "(OOM, `kill -9`, hard reset) on NFS disk", srcPath, dstPath)
+ }
+ } else {
+ // Just remove srcPath.
+ fs.MustRemoveDirAtomic(srcPath)
}
- } else {
- // Just remove srcPath.
- fs.MustRemoveDirAtomic(srcPath)
}
// Flush pathPrefix* directory metadata to the underying storage,
diff --git a/lib/storage/partition_search_test.go b/lib/storage/partition_search_test.go
index 9649770dfd..afd5b7fce6 100644
--- a/lib/storage/partition_search_test.go
+++ b/lib/storage/partition_search_test.go
@@ -181,11 +181,12 @@ func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, ma
t.Fatalf("cannot remove big parts directory: %s", err)
}
}()
+ var tmpRows []rawRow
for _, rows := range rowss {
pt.AddRows(rows)
- // Flush just added rows to a separate partition.
- pt.flushRawRows(true)
+ // Flush just added rows to a separate partitions.
+ tmpRows = pt.flushPendingRows(tmpRows[:0], true)
}
testPartitionSearch(t, pt, tsids, tr, rbsExpected, -1)
pt.MustClose()
@@ -232,8 +233,7 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp
// due to the race with raw rows flusher.
var m partitionMetrics
pt.UpdateMetrics(&m)
- rowsCount := m.BigRowsCount + m.SmallRowsCount
- if rowsCount != uint64(rowsCountExpected) {
+ if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) {
return fmt.Errorf("unexpected rows count; got %d; want %d", rowsCount, rowsCountExpected)
}
}
@@ -258,8 +258,7 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp
if rowsCountExpected >= 0 {
var m partitionMetrics
pt.UpdateMetrics(&m)
- rowsCount := m.BigRowsCount + m.SmallRowsCount
- if rowsCount != uint64(rowsCountExpected) {
+ if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) {
return fmt.Errorf("unexpected rows count after search; got %d; want %d", rowsCount, rowsCountExpected)
}
}
diff --git a/lib/storage/raw_row.go b/lib/storage/raw_row.go
index b1d978d333..206805d628 100644
--- a/lib/storage/raw_row.go
+++ b/lib/storage/raw_row.go
@@ -86,7 +86,10 @@ func (rrm *rawRowsMarshaler) marshalToInmemoryPart(mp *inmemoryPart, rows []rawR
logger.Panicf("BUG: rows count must be smaller than 2^32; got %d", len(rows))
}
- rrm.bsw.InitFromInmemoryPart(mp)
+ // Use the minimum compression level for first-level in-memory blocks,
+ // since they are going to be re-compressed during subsequent merges.
+ const compressLevel = -5 // See https://github.com/facebook/zstd/releases/tag/v1.3.4
+ rrm.bsw.InitFromInmemoryPart(mp, compressLevel)
ph := &mp.ph
ph.Reset()
diff --git a/lib/storage/storage.go b/lib/storage/storage.go
index 1a50316f3c..1b686140bb 100644
--- a/lib/storage/storage.go
+++ b/lib/storage/storage.go
@@ -306,7 +306,7 @@ func (s *Storage) updateDeletedMetricIDs(metricIDs *uint64set.Set) {
// DebugFlush flushes recently added storage data, so it becomes visible to search.
func (s *Storage) DebugFlush() {
- s.tb.flushRawRows()
+ s.tb.flushPendingRows()
s.idb().tb.DebugFlush()
}
@@ -378,13 +378,13 @@ func (s *Storage) ListSnapshots() ([]string, error) {
snapshotsPath := s.path + "/snapshots"
d, err := os.Open(snapshotsPath)
if err != nil {
- return nil, fmt.Errorf("cannot open %q: %w", snapshotsPath, err)
+ return nil, fmt.Errorf("cannot open snapshots directory: %w", err)
}
defer fs.MustClose(d)
fnames, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("cannot read contents of %q: %w", snapshotsPath, err)
+ return nil, fmt.Errorf("cannot read snapshots directory at %q: %w", snapshotsPath, err)
}
snapshotNames := make([]string, 0, len(fnames))
for _, fname := range fnames {
@@ -2070,12 +2070,7 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error {
continue
}
mn.sortTags()
- if err := is.createPerDayIndexes(date, metricID, mn); err != nil {
- if firstError == nil {
- firstError = fmt.Errorf("error when storing per-date inverted index for (date=%s, metricID=%d): %w", dateToString(date), metricID, err)
- }
- continue
- }
+ is.createPerDayIndexes(date, metricID, mn)
}
dateMetricIDsForCache = append(dateMetricIDsForCache, dateMetricID{
date: date,
diff --git a/lib/storage/storage_test.go b/lib/storage/storage_test.go
index d4d330a7e7..4dbc627007 100644
--- a/lib/storage/storage_test.go
+++ b/lib/storage/storage_test.go
@@ -454,7 +454,7 @@ func TestStorageOpenMultipleTimes(t *testing.T) {
func TestStorageRandTimestamps(t *testing.T) {
path := "TestStorageRandTimestamps"
- retentionMsecs := int64(60 * msecsPerMonth)
+ retentionMsecs := int64(10 * msecsPerMonth)
s, err := OpenStorage(path, retentionMsecs, 0, 0)
if err != nil {
t.Fatalf("cannot open storage: %s", err)
@@ -462,10 +462,13 @@ func TestStorageRandTimestamps(t *testing.T) {
t.Run("serial", func(t *testing.T) {
for i := 0; i < 3; i++ {
if err := testStorageRandTimestamps(s); err != nil {
- t.Fatal(err)
+ t.Fatalf("error on iteration %d: %s", i, err)
}
s.MustClose()
s, err = OpenStorage(path, retentionMsecs, 0, 0)
+ if err != nil {
+ t.Fatalf("cannot open storage on iteration %d: %s", i, err)
+ }
}
})
t.Run("concurrent", func(t *testing.T) {
@@ -479,14 +482,15 @@ func TestStorageRandTimestamps(t *testing.T) {
ch <- err
}()
}
+ tt := time.NewTimer(time.Second * 10)
for i := 0; i < cap(ch); i++ {
select {
case err := <-ch:
if err != nil {
- t.Fatal(err)
+ t.Fatalf("error on iteration %d: %s", i, err)
}
- case <-time.After(time.Second * 10):
- t.Fatal("timeout")
+ case <-tt.C:
+ t.Fatalf("timeout on iteration %d", i)
}
}
})
@@ -497,9 +501,9 @@ func TestStorageRandTimestamps(t *testing.T) {
}
func testStorageRandTimestamps(s *Storage) error {
- const rowsPerAdd = 1e3
- const addsCount = 2
- typ := reflect.TypeOf(int64(0))
+ currentTime := timestampFromTime(time.Now())
+ const rowsPerAdd = 5e3
+ const addsCount = 3
rnd := rand.New(rand.NewSource(1))
for i := 0; i < addsCount; i++ {
@@ -512,15 +516,8 @@ func testStorageRandTimestamps(s *Storage) error {
for j := 0; j < rowsPerAdd; j++ {
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", rand.Intn(100)))
metricNameRaw := mn.marshalRaw(nil)
- timestamp := int64(rnd.NormFloat64() * 1e12)
- if j%2 == 0 {
- ts, ok := quick.Value(typ, rnd)
- if !ok {
- return fmt.Errorf("cannot create random timestamp via quick.Value")
- }
- timestamp = ts.Interface().(int64)
- }
- value := rnd.NormFloat64() * 1e12
+ timestamp := currentTime - int64((rnd.Float64()-0.2)*float64(2*s.retentionMsecs))
+ value := rnd.NormFloat64() * 1e11
mr := MetricRow{
MetricNameRaw: metricNameRaw,
@@ -540,8 +537,8 @@ func testStorageRandTimestamps(s *Storage) error {
// Verify the storage contains rows.
var m Metrics
s.UpdateMetrics(&m)
- if m.TableMetrics.SmallRowsCount == 0 {
- return fmt.Errorf("expecting at least one row in the table")
+ if rowsCount := m.TableMetrics.TotalRowsCount(); rowsCount == 0 {
+ return fmt.Errorf("expecting at least one row in storage")
}
return nil
}
@@ -592,14 +589,15 @@ func TestStorageDeleteSeries(t *testing.T) {
ch <- err
}(i)
}
+ tt := time.NewTimer(30 * time.Second)
for i := 0; i < cap(ch); i++ {
select {
case err := <-ch:
if err != nil {
- t.Fatalf("unexpected error: %s", err)
+ t.Fatalf("unexpected error on iteration %d: %s", i, err)
}
- case <-time.After(30 * time.Second):
- t.Fatalf("timeout")
+ case <-tt.C:
+ t.Fatalf("timeout on iteration %d", i)
}
}
})
@@ -932,7 +930,8 @@ func testStorageRegisterMetricNames(s *Storage) error {
func TestStorageAddRowsSerial(t *testing.T) {
path := "TestStorageAddRowsSerial"
- s, err := OpenStorage(path, 0, 1e5, 1e5)
+ retentionMsecs := int64(msecsPerMonth * 10)
+ s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5)
if err != nil {
t.Fatalf("cannot open storage: %s", err)
}
@@ -947,7 +946,8 @@ func TestStorageAddRowsSerial(t *testing.T) {
func TestStorageAddRowsConcurrent(t *testing.T) {
path := "TestStorageAddRowsConcurrent"
- s, err := OpenStorage(path, 0, 1e5, 1e5)
+ retentionMsecs := int64(msecsPerMonth * 10)
+ s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5)
if err != nil {
t.Fatalf("cannot open storage: %s", err)
}
@@ -1000,8 +1000,10 @@ func testStorageAddRows(s *Storage) error {
const rowsPerAdd = 1e3
const addsCount = 10
+ maxTimestamp := timestampFromTime(time.Now())
+ minTimestamp := maxTimestamp - s.retentionMsecs
for i := 0; i < addsCount; i++ {
- mrs := testGenerateMetricRows(rowsPerAdd, 0, 1e10)
+ mrs := testGenerateMetricRows(rowsPerAdd, minTimestamp, maxTimestamp)
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
return fmt.Errorf("unexpected error when adding mrs: %w", err)
}
@@ -1011,8 +1013,8 @@ func testStorageAddRows(s *Storage) error {
minRowsExpected := uint64(rowsPerAdd * addsCount)
var m Metrics
s.UpdateMetrics(&m)
- if m.TableMetrics.SmallRowsCount < minRowsExpected {
- return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, m.TableMetrics.SmallRowsCount)
+ if rowsCount := m.TableMetrics.TotalRowsCount(); rowsCount < minRowsExpected {
+ return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, rowsCount)
}
// Try creating a snapshot from the storage.
@@ -1040,8 +1042,8 @@ func testStorageAddRows(s *Storage) error {
// Verify the snapshot contains rows
var m1 Metrics
s1.UpdateMetrics(&m1)
- if m1.TableMetrics.SmallRowsCount < minRowsExpected {
- return fmt.Errorf("snapshot %q must contain at least %d rows; got %d", snapshotPath, minRowsExpected, m1.TableMetrics.SmallRowsCount)
+ if rowsCount := m1.TableMetrics.TotalRowsCount(); rowsCount < minRowsExpected {
+ return fmt.Errorf("snapshot %q must contain at least %d rows; got %d", snapshotPath, minRowsExpected, rowsCount)
}
// Verify that force merge for the snapshot leaves only a single part per partition.
@@ -1155,22 +1157,25 @@ func testStorageAddMetrics(s *Storage, workerNum int) error {
minRowsExpected := uint64(rowsCount)
var m Metrics
s.UpdateMetrics(&m)
- if m.TableMetrics.SmallRowsCount < minRowsExpected {
- return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, m.TableMetrics.SmallRowsCount)
+ if rowsCount := m.TableMetrics.TotalRowsCount(); rowsCount < minRowsExpected {
+ return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, rowsCount)
}
return nil
}
func TestStorageDeleteStaleSnapshots(t *testing.T) {
path := "TestStorageDeleteStaleSnapshots"
- s, err := OpenStorage(path, 0, 1e5, 1e5)
+ retentionMsecs := int64(msecsPerMonth * 10)
+ s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5)
if err != nil {
t.Fatalf("cannot open storage: %s", err)
}
const rowsPerAdd = 1e3
const addsCount = 10
+ maxTimestamp := timestampFromTime(time.Now())
+ minTimestamp := maxTimestamp - s.retentionMsecs
for i := 0; i < addsCount; i++ {
- mrs := testGenerateMetricRows(rowsPerAdd, 0, 1e10)
+ mrs := testGenerateMetricRows(rowsPerAdd, minTimestamp, maxTimestamp)
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
t.Fatalf("unexpected error when adding mrs: %s", err)
}
diff --git a/lib/storage/table.go b/lib/storage/table.go
index 5b7ffbce71..5eca5e4a3a 100644
--- a/lib/storage/table.go
+++ b/lib/storage/table.go
@@ -215,15 +215,16 @@ func (tb *table) MustClose() {
}
}
-// flushRawRows flushes all the pending rows, so they become visible to search.
+// flushPendingRows flushes all the pending raw rows, so they become visible to search.
//
// This function is for debug purposes only.
-func (tb *table) flushRawRows() {
+func (tb *table) flushPendingRows() {
ptws := tb.GetPartitions(nil)
defer tb.PutPartitions(ptws)
+ var rows []rawRow
for _, ptw := range ptws {
- ptw.pt.flushRawRows(true)
+ rows = ptw.pt.flushPendingRows(rows[:0], true)
}
}
@@ -524,7 +525,7 @@ func openPartitions(smallPartitionsPath, bigPartitionsPath string, s *Storage) (
func populatePartitionNames(partitionsPath string, ptNames map[string]bool) error {
d, err := os.Open(partitionsPath)
if err != nil {
- return fmt.Errorf("cannot open directory with partitions %q: %w", partitionsPath, err)
+ return fmt.Errorf("cannot open directory with partitions: %w", err)
}
defer fs.MustClose(d)
diff --git a/lib/storage/table_search_test.go b/lib/storage/table_search_test.go
index c9b1119dcf..fb28939d62 100644
--- a/lib/storage/table_search_test.go
+++ b/lib/storage/table_search_test.go
@@ -35,7 +35,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
- testTableSearchEx(t, trData, trSearch, 12, 100, 1, 10)
+ testTableSearchEx(t, trData, trSearch, 12, 20, 1, 10)
})
t.Run("SingleTSID", func(t *testing.T) {
@@ -51,7 +51,7 @@ func TestTableSearch(t *testing.T) {
MinTimestamp: trData.MinTimestamp + 4e3,
MaxTimestamp: trData.MaxTimestamp - 4e3,
}
- testTableSearchEx(t, trData, trSearch, 60, 20, 30, 20)
+ testTableSearchEx(t, trData, trSearch, 20, 10, 30, 20)
})
t.Run("ManyTSIDs", func(t *testing.T) {
@@ -197,7 +197,7 @@ func testTableSearchEx(t *testing.T, trData, trSearch TimeRange, partitionsCount
}
// Flush rows to parts.
- tb.flushRawRows()
+ tb.flushPendingRows()
}
testTableSearch(t, tb, tsids, trSearch, rbsExpected, -1)
tb.MustClose()
@@ -244,8 +244,7 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected []
// they may race with raw rows flusher.
var m TableMetrics
tb.UpdateMetrics(&m)
- rowsCount := m.BigRowsCount + m.SmallRowsCount
- if rowsCount != uint64(rowsCountExpected) {
+ if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) {
return fmt.Errorf("unexpected rows count in the table; got %d; want %d", rowsCount, rowsCountExpected)
}
}
@@ -270,8 +269,7 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected []
if rowsCountExpected >= 0 {
var m TableMetrics
tb.UpdateMetrics(&m)
- rowsCount := m.BigRowsCount + m.SmallRowsCount
- if rowsCount != uint64(rowsCountExpected) {
+ if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) {
return fmt.Errorf("unexpected rows count in the table; got %d; want %d", rowsCount, rowsCountExpected)
}
}
diff --git a/lib/storage/table_search_timing_test.go b/lib/storage/table_search_timing_test.go
index cf046c5137..eb1a72b67f 100644
--- a/lib/storage/table_search_timing_test.go
+++ b/lib/storage/table_search_timing_test.go
@@ -55,9 +55,8 @@ func openBenchTable(b *testing.B, startTimestamp int64, rowsPerInsert, rowsCount
rowsCountExpected := insertsCount * uint64(rowsPerInsert)
var m TableMetrics
tb.UpdateMetrics(&m)
- rowsCountActual := m.BigRowsCount + m.SmallRowsCount
- if rowsCountActual != rowsCountExpected {
- b.Fatalf("unexpected rows count in the table %q; got %d; want %d", path, rowsCountActual, rowsCountExpected)
+ if rowsCount := m.TotalRowsCount(); rowsCount != rowsCountExpected {
+ b.Fatalf("unexpected rows count in the table %q; got %d; want %d", path, rowsCount, rowsCountExpected)
}
return tb
diff --git a/lib/storage/table_timing_test.go b/lib/storage/table_timing_test.go
index 0f7ae00d1c..11a3766fdd 100644
--- a/lib/storage/table_timing_test.go
+++ b/lib/storage/table_timing_test.go
@@ -101,8 +101,7 @@ func benchmarkTableAddRows(b *testing.B, rowsPerInsert, tsidsCount int) {
}
var m TableMetrics
tb.UpdateMetrics(&m)
- rowsCount := m.BigRowsCount + m.SmallRowsCount
- if rowsCount != uint64(rowsCountExpected) {
+ if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) {
b.Fatalf("unexpected rows count in the final table %q: got %d; want %d", tablePath, rowsCount, rowsCountExpected)
}
tb.MustClose()
diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go
index 5ac4a843e1..c9ba91825c 100644
--- a/vendor/cloud.google.com/go/compute/internal/version.go
+++ b/vendor/cloud.google.com/go/compute/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.12.1"
+const Version = "1.14.0"
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index 8631b6d6d2..6e3ee8d6ab 100644
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,5 +1,12 @@
# Changes
+## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430)
+
## [0.1.0] (2022-10-26)
Initial release of metadata being it's own module.
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index 50538b1d34..d4aad9bf39 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -70,6 +70,7 @@ func newDefaultHTTPClient() *http.Client {
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
+ IdleConnTimeout: 60 * time.Second,
},
Timeout: 5 * time.Second,
}
diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md
index b0a46ed978..ced217827b 100644
--- a/vendor/cloud.google.com/go/iam/CHANGES.md
+++ b/vendor/cloud.google.com/go/iam/CHANGES.md
@@ -1,5 +1,12 @@
# Changes
+## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.7.0...iam/v0.8.0) (2022-12-05)
+
+
+### Features
+
+* **iam:** Start generating and refresh some libraries ([#7089](https://github.com/googleapis/google-cloud-go/issues/7089)) ([a9045ff](https://github.com/googleapis/google-cloud-go/commit/a9045ff191a711089c37f1d94a63522d9939ce38))
+
## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.6.0...iam/v0.7.0) (2022-11-03)
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
similarity index 99%
rename from vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
rename to vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
index 6fbf54f448..2793098aab 100644
--- a/vendor/google.golang.org/genproto/googleapis/iam/v1/iam_policy.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
@@ -15,10 +15,10 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.12.2
+// protoc v3.21.5
// source: google/iam/v1/iam_policy.proto
-package iam
+package iampb
import (
context "context"
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
similarity index 99%
rename from vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go
rename to vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
index abea46d9bc..835f217199 100644
--- a/vendor/google.golang.org/genproto/googleapis/iam/v1/options.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
@@ -15,10 +15,10 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.12.2
+// protoc v3.21.5
// source: google/iam/v1/options.proto
-package iam
+package iampb
import (
reflect "reflect"
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
similarity index 94%
rename from vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
rename to vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
index 5869d92070..ec7777a768 100644
--- a/vendor/google.golang.org/genproto/googleapis/iam/v1/policy.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
@@ -15,10 +15,10 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.12.2
+// protoc v3.21.5
// source: google/iam/v1/policy.proto
-package iam
+package iampb
import (
reflect "reflect"
@@ -279,11 +279,11 @@ type Policy struct {
// Any operation that affects conditional role bindings must specify version
// `3`. This requirement applies to the following operations:
//
- // * Getting a policy that includes a conditional role binding
- // * Adding a conditional role binding to a policy
- // * Changing a conditional role binding in a policy
- // * Removing any role binding, with or without a condition, from a policy
- // that includes conditions
+ // - Getting a policy that includes a conditional role binding
+ // - Adding a conditional role binding to a policy
+ // - Changing a conditional role binding in a policy
+ // - Removing any role binding, with or without a condition, from a policy
+ // that includes conditions
//
// **Important:** If you use IAM Conditions, you must include the `etag` field
// whenever you call `setIamPolicy`. If you omit this field, then IAM allows
@@ -396,47 +396,43 @@ type Binding struct {
// Specifies the principals requesting access for a Cloud Platform resource.
// `members` can have the following values:
//
- // * `allUsers`: A special identifier that represents anyone who is
- // on the internet; with or without a Google account.
+ // - `allUsers`: A special identifier that represents anyone who is
+ // on the internet; with or without a Google account.
//
- // * `allAuthenticatedUsers`: A special identifier that represents anyone
- // who is authenticated with a Google account or a service account.
+ // - `allAuthenticatedUsers`: A special identifier that represents anyone
+ // who is authenticated with a Google account or a service account.
//
- // * `user:{emailid}`: An email address that represents a specific Google
- // account. For example, `alice@example.com` .
+ // - `user:{emailid}`: An email address that represents a specific Google
+ // account. For example, `alice@example.com` .
//
+ // - `serviceAccount:{emailid}`: An email address that represents a service
+ // account. For example, `my-other-app@appspot.gserviceaccount.com`.
//
- // * `serviceAccount:{emailid}`: An email address that represents a service
- // account. For example, `my-other-app@appspot.gserviceaccount.com`.
+ // - `group:{emailid}`: An email address that represents a Google group.
+ // For example, `admins@example.com`.
//
- // * `group:{emailid}`: An email address that represents a Google group.
- // For example, `admins@example.com`.
+ // - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
+ // identifier) representing a user that has been recently deleted. For
+ // example, `alice@example.com?uid=123456789012345678901`. If the user is
+ // recovered, this value reverts to `user:{emailid}` and the recovered user
+ // retains the role in the binding.
//
- // * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
- // identifier) representing a user that has been recently deleted. For
- // example, `alice@example.com?uid=123456789012345678901`. If the user is
- // recovered, this value reverts to `user:{emailid}` and the recovered user
- // retains the role in the binding.
- //
- // * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
- // unique identifier) representing a service account that has been recently
- // deleted. For example,
- // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
- // If the service account is undeleted, this value reverts to
- // `serviceAccount:{emailid}` and the undeleted service account retains the
- // role in the binding.
- //
- // * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
- // identifier) representing a Google group that has been recently
- // deleted. For example, `admins@example.com?uid=123456789012345678901`. If
- // the group is recovered, this value reverts to `group:{emailid}` and the
- // recovered group retains the role in the binding.
- //
- //
- // * `domain:{domain}`: The G Suite domain (primary) that represents all the
- // users of that domain. For example, `google.com` or `example.com`.
+ // - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
+ // unique identifier) representing a service account that has been recently
+ // deleted. For example,
+ // `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
+ // If the service account is undeleted, this value reverts to
+ // `serviceAccount:{emailid}` and the undeleted service account retains the
+ // role in the binding.
//
+ // - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
+ // identifier) representing a Google group that has been recently
+ // deleted. For example, `admins@example.com?uid=123456789012345678901`. If
+ // the group is recovered, this value reverts to `group:{emailid}` and the
+ // recovered group retains the role in the binding.
//
+ // - `domain:{domain}`: The G Suite domain (primary) that represents all the
+ // users of that domain. For example, `google.com` or `example.com`.
Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
// The condition that is associated with this binding.
//
diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md
index 580202cf84..f12da250ef 100644
--- a/vendor/cloud.google.com/go/storage/CHANGES.md
+++ b/vendor/cloud.google.com/go/storage/CHANGES.md
@@ -1,6 +1,13 @@
# Changes
+## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02)
+
+
+### Bug Fixes
+
+* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5))
+
## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03)
diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go
index f1e551b6fd..4a44cee8b6 100644
--- a/vendor/cloud.google.com/go/storage/grpc_client.go
+++ b/vendor/cloud.google.com/go/storage/grpc_client.go
@@ -792,14 +792,15 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
s := callSettings(c.settings, opts...)
obj := req.dstObject.attrs.toProtoObject("")
call := &storagepb.RewriteObjectRequest{
- SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket),
- SourceObject: req.srcObject.name,
- RewriteToken: req.token,
- DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket),
- DestinationName: req.dstObject.name,
- Destination: obj,
- DestinationKmsKey: req.dstObject.keyName,
- DestinationPredefinedAcl: req.predefinedACL,
+ SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket),
+ SourceObject: req.srcObject.name,
+ RewriteToken: req.token,
+ DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket),
+ DestinationName: req.dstObject.name,
+ Destination: obj,
+ DestinationKmsKey: req.dstObject.keyName,
+ DestinationPredefinedAcl: req.predefinedACL,
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(req.dstObject.encryptionKey),
}
// The userProject, whether source or destination project, is decided by the code calling the interface.
@@ -863,10 +864,10 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
}
b := bucketResourceName(globalProjectAlias, params.bucket)
- // TODO(noahdietz): Use encryptionKey to set relevant request fields.
req := &storagepb.ReadObjectRequest{
- Bucket: b,
- Object: params.object,
+ Bucket: b,
+ Object: params.object,
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
}
// The default is a negative value, which means latest.
if params.gen >= 0 {
@@ -1008,8 +1009,6 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage
return
}
- // TODO(noahdietz): Send encryption key via CommonObjectRequestParams.
-
// The chunk buffer is full, but there is no end in sight. This
// means that a resumable upload will need to be used to send
// multiple chunks, until we are done reading data. Start a
@@ -1499,7 +1498,8 @@ func (w *gRPCWriter) startResumableUpload() error {
}
return run(w.ctx, func() error {
upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{
- WriteObjectSpec: spec,
+ WriteObjectSpec: spec,
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
})
w.upid = upres.GetUploadId()
return err
@@ -1511,7 +1511,9 @@ func (w *gRPCWriter) startResumableUpload() error {
func (w *gRPCWriter) queryProgress() (int64, error) {
var persistedSize int64
err := run(w.ctx, func() error {
- q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid})
+ q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{
+ UploadId: w.upid,
+ })
persistedSize = q.GetPersistedSize()
return err
}, w.settings.retry, true, setRetryHeaderGRPC(w.ctx))
@@ -1582,6 +1584,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st
req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{
WriteObjectSpec: spec,
}
+ req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey)
}
// TODO: Currently the checksums are only sent on the first message
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
index 13bbdb4c96..c36634b1a1 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.12.2
+// protoc v3.21.9
// source: google/storage/v2/storage.proto
package storage
@@ -25,17 +25,17 @@ import (
reflect "reflect"
sync "sync"
- empty "github.com/golang/protobuf/ptypes/empty"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
_ "google.golang.org/genproto/googleapis/api/annotations"
v1 "google.golang.org/genproto/googleapis/iam/v1"
date "google.golang.org/genproto/googleapis/type/date"
- field_mask "google.golang.org/genproto/protobuf/field_mask"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
)
const (
@@ -264,7 +264,7 @@ type GetBucketRequest struct {
// Mask specifying which fields to read.
// A "*" field may be used to indicate all fields.
// If no mask is specified, will default to all fields.
- ReadMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
}
func (x *GetBucketRequest) Reset() {
@@ -320,7 +320,7 @@ func (x *GetBucketRequest) GetIfMetagenerationNotMatch() int64 {
return 0
}
-func (x *GetBucketRequest) GetReadMask() *field_mask.FieldMask {
+func (x *GetBucketRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.ReadMask
}
@@ -443,7 +443,7 @@ type ListBucketsRequest struct {
// If no mask is specified, will default to all fields except items.owner,
// items.acl, and items.default_object_acl.
// * may be used to mean "all fields".
- ReadMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
}
func (x *ListBucketsRequest) Reset() {
@@ -506,7 +506,7 @@ func (x *ListBucketsRequest) GetPrefix() string {
return ""
}
-func (x *ListBucketsRequest) GetReadMask() *field_mask.FieldMask {
+func (x *ListBucketsRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.ReadMask
}
@@ -664,7 +664,7 @@ type UpdateBucketRequest struct {
// Not specifying any fields is an error.
// Not specifying a field while setting that field to a non-default value is
// an error.
- UpdateMask *field_mask.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
}
func (x *UpdateBucketRequest) Reset() {
@@ -734,7 +734,7 @@ func (x *UpdateBucketRequest) GetPredefinedDefaultObjectAcl() string {
return ""
}
-func (x *UpdateBucketRequest) GetUpdateMask() *field_mask.FieldMask {
+func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.UpdateMask
}
@@ -1408,7 +1408,7 @@ type ReadObjectRequest struct {
// If no mask is specified, will default to all fields except metadata.owner
// and metadata.acl.
// * may be used to mean "all fields".
- ReadMask *field_mask.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
}
func (x *ReadObjectRequest) Reset() {
@@ -1513,7 +1513,7 @@ func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestP
return nil
}
-func (x *ReadObjectRequest) GetReadMask() *field_mask.FieldMask {
+func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.ReadMask
}
@@ -1554,7 +1554,7 @@ type GetObjectRequest struct {
// If no mask is specified, will default to all fields except metadata.acl and
// metadata.owner.
// * may be used to mean "all fields".
- ReadMask *field_mask.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
}
func (x *GetObjectRequest) Reset() {
@@ -1645,7 +1645,7 @@ func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestPa
return nil
}
-func (x *GetObjectRequest) GetReadMask() *field_mask.FieldMask {
+func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.ReadMask
}
@@ -2158,7 +2158,7 @@ type ListObjectsRequest struct {
// If no mask is specified, will default to all fields except items.acl and
// items.owner.
// * may be used to mean "all fields".
- ReadMask *field_mask.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
// Filter results to objects whose names are lexicographically equal to or
// after lexicographic_start. If lexicographic_end is also set, the objects
// listed have names between lexicographic_start (inclusive) and
@@ -2252,7 +2252,7 @@ func (x *ListObjectsRequest) GetVersions() bool {
return false
}
-func (x *ListObjectsRequest) GetReadMask() *field_mask.FieldMask {
+func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.ReadMask
}
@@ -2952,7 +2952,7 @@ type UpdateObjectRequest struct {
// Not specifying any fields is an error.
// Not specifying a field while setting that field to a non-default value is
// an error.
- UpdateMask *field_mask.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// A set of parameters common to Storage API requests concerning an object.
CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
}
@@ -3031,7 +3031,7 @@ func (x *UpdateObjectRequest) GetPredefinedAcl() string {
return ""
}
-func (x *UpdateObjectRequest) GetUpdateMask() *field_mask.FieldMask {
+func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.UpdateMask
}
@@ -3497,7 +3497,7 @@ type UpdateHmacKeyRequest struct {
// Update mask for hmac_key.
// Not specifying any fields will mean only the `state` field is updated to
// the value specified in `hmac_key`.
- UpdateMask *field_mask.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
}
func (x *UpdateHmacKeyRequest) Reset() {
@@ -3539,7 +3539,7 @@ func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata {
return nil
}
-func (x *UpdateHmacKeyRequest) GetUpdateMask() *field_mask.FieldMask {
+func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.UpdateMask
}
@@ -3716,14 +3716,14 @@ type Bucket struct {
// Output only. The creation time of the bucket.
// Attempting to set or update this field will result in a
// [FieldViolation][google.rpc.BadRequest.FieldViolation].
- CreateTime *timestamp.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing]
// (CORS) config.
Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"`
// Output only. The modification time of the bucket.
// Attempting to set or update this field will result in a
// [FieldViolation][google.rpc.BadRequest.FieldViolation].
- UpdateTime *timestamp.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
+ UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// The default value for event-based hold on newly created objects in this
// bucket. Event-based hold is a way to retain objects indefinitely until an
// event occurs, signified by the
@@ -3894,7 +3894,7 @@ func (x *Bucket) GetLifecycle() *Bucket_Lifecycle {
return nil
}
-func (x *Bucket) GetCreateTime() *timestamp.Timestamp {
+func (x *Bucket) GetCreateTime() *timestamppb.Timestamp {
if x != nil {
return x.CreateTime
}
@@ -3908,7 +3908,7 @@ func (x *Bucket) GetCors() []*Bucket_Cors {
return nil
}
-func (x *Bucket) GetUpdateTime() *timestamp.Timestamp {
+func (x *Bucket) GetUpdateTime() *timestamppb.Timestamp {
if x != nil {
return x.UpdateTime
}
@@ -4296,9 +4296,9 @@ type HmacKeyMetadata struct {
// Writable, can be updated by UpdateHmacKey operation.
State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"`
// Output only. The creation time of the HMAC key.
- CreateTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// Output only. The last modification time of the HMAC key metadata.
- UpdateTime *timestamp.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
+ UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// The etag of the HMAC key.
Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"`
}
@@ -4370,14 +4370,14 @@ func (x *HmacKeyMetadata) GetState() string {
return ""
}
-func (x *HmacKeyMetadata) GetCreateTime() *timestamp.Timestamp {
+func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp {
if x != nil {
return x.CreateTime
}
return nil
}
-func (x *HmacKeyMetadata) GetUpdateTime() *timestamp.Timestamp {
+func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp {
if x != nil {
return x.UpdateTime
}
@@ -4624,7 +4624,7 @@ type Object struct {
// version of the object has been deleted.
// Attempting to set or update this field will result in a
// [FieldViolation][google.rpc.BadRequest.FieldViolation].
- DeleteTime *timestamp.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
+ DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
// Content-Type of the object data, matching
// [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5].
// If an object is stored without a Content-Type, it is served as
@@ -4633,7 +4633,7 @@ type Object struct {
// Output only. The creation time of the object.
// Attempting to set or update this field will result in a
// [FieldViolation][google.rpc.BadRequest.FieldViolation].
- CreateTime *timestamp.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
+ CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// Output only. Number of underlying components that make up this object. Components are
// accumulated by compose operations.
// Attempting to set or update this field will result in a
@@ -4649,7 +4649,7 @@ type Object struct {
// Object Lifecycle Configuration.
// Attempting to set or update this field will result in a
// [FieldViolation][google.rpc.BadRequest.FieldViolation].
- UpdateTime *timestamp.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
+ UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// Cloud KMS Key used to encrypt this object, if the object is encrypted by
// such a key.
KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
@@ -4657,7 +4657,7 @@ type Object struct {
// object is initially created, it will be set to time_created.
// Attempting to set or update this field will result in a
// [FieldViolation][google.rpc.BadRequest.FieldViolation].
- UpdateStorageClassTime *timestamp.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"`
+ UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"`
// Whether an object is under temporary hold. While this flag is set to true,
// the object is protected against deletion and overwrites. A common use case
// of this flag is regulatory investigations where objects need to be retained
@@ -4671,7 +4671,7 @@ type Object struct {
// Note 2: This value can be provided even when temporary hold is set (so that
// the user can reason about policy without having to first unset the
// temporary hold).
- RetentionExpireTime *timestamp.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"`
+ RetentionExpireTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"`
// User-provided metadata, in key/value pairs.
Metadata map[string]string `protobuf:"bytes,22,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Whether an object is under event-based hold.
@@ -4694,7 +4694,7 @@ type Object struct {
// such a key.
CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"`
// A user-specified timestamp set on an object.
- CustomTime *timestamp.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"`
+ CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"`
}
func (x *Object) Reset() {
@@ -4813,7 +4813,7 @@ func (x *Object) GetContentLanguage() string {
return ""
}
-func (x *Object) GetDeleteTime() *timestamp.Timestamp {
+func (x *Object) GetDeleteTime() *timestamppb.Timestamp {
if x != nil {
return x.DeleteTime
}
@@ -4827,7 +4827,7 @@ func (x *Object) GetContentType() string {
return ""
}
-func (x *Object) GetCreateTime() *timestamp.Timestamp {
+func (x *Object) GetCreateTime() *timestamppb.Timestamp {
if x != nil {
return x.CreateTime
}
@@ -4848,7 +4848,7 @@ func (x *Object) GetChecksums() *ObjectChecksums {
return nil
}
-func (x *Object) GetUpdateTime() *timestamp.Timestamp {
+func (x *Object) GetUpdateTime() *timestamppb.Timestamp {
if x != nil {
return x.UpdateTime
}
@@ -4862,7 +4862,7 @@ func (x *Object) GetKmsKey() string {
return ""
}
-func (x *Object) GetUpdateStorageClassTime() *timestamp.Timestamp {
+func (x *Object) GetUpdateStorageClassTime() *timestamppb.Timestamp {
if x != nil {
return x.UpdateStorageClassTime
}
@@ -4876,7 +4876,7 @@ func (x *Object) GetTemporaryHold() bool {
return false
}
-func (x *Object) GetRetentionExpireTime() *timestamp.Timestamp {
+func (x *Object) GetRetentionExpireTime() *timestamppb.Timestamp {
if x != nil {
return x.RetentionExpireTime
}
@@ -4911,7 +4911,7 @@ func (x *Object) GetCustomerEncryption() *CustomerEncryption {
return nil
}
-func (x *Object) GetCustomTime() *timestamp.Timestamp {
+func (x *Object) GetCustomTime() *timestamppb.Timestamp {
if x != nil {
return x.CustomTime
}
@@ -5845,7 +5845,7 @@ type Bucket_RetentionPolicy struct {
// Server-determined value that indicates the time from which policy was
// enforced and effective.
- EffectiveTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"`
+ EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"`
// Once locked, an object retention policy cannot be modified.
IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"`
// The duration in seconds that objects need to be retained. Retention
@@ -5887,7 +5887,7 @@ func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6}
}
-func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamp.Timestamp {
+func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp {
if x != nil {
return x.EffectiveTime
}
@@ -6089,7 +6089,7 @@ type Bucket_Autoclass struct {
// disabled/unconfigured or set to false after being enabled. If Autoclass
// is enabled when the bucket is created, the toggle_time is set to the
// bucket creation time.
- ToggleTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"`
+ ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"`
}
func (x *Bucket_Autoclass) Reset() {
@@ -6131,7 +6131,7 @@ func (x *Bucket_Autoclass) GetEnabled() bool {
return false
}
-func (x *Bucket_Autoclass) GetToggleTime() *timestamp.Timestamp {
+func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp {
if x != nil {
return x.ToggleTime
}
@@ -6150,7 +6150,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct {
// The deadline time for changing
// `iamConfig.uniformBucketLevelAccess.enabled` from `true` to `false`.
// Mutable until the specified deadline is reached, but not afterward.
- LockTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"`
+ LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"`
}
func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() {
@@ -6192,7 +6192,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool {
return false
}
-func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamp.Timestamp {
+func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp {
if x != nil {
return x.LockTime
}
@@ -8184,13 +8184,13 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{
(*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition
nil, // 73: google.storage.v2.Notification.CustomAttributesEntry
nil, // 74: google.storage.v2.Object.MetadataEntry
- (*field_mask.FieldMask)(nil), // 75: google.protobuf.FieldMask
- (*timestamp.Timestamp)(nil), // 76: google.protobuf.Timestamp
+ (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask
+ (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp
(*date.Date)(nil), // 77: google.type.Date
(*v1.GetIamPolicyRequest)(nil), // 78: google.iam.v1.GetIamPolicyRequest
(*v1.SetIamPolicyRequest)(nil), // 79: google.iam.v1.SetIamPolicyRequest
(*v1.TestIamPermissionsRequest)(nil), // 80: google.iam.v1.TestIamPermissionsRequest
- (*empty.Empty)(nil), // 81: google.protobuf.Empty
+ (*emptypb.Empty)(nil), // 81: google.protobuf.Empty
(*v1.Policy)(nil), // 82: google.iam.v1.Policy
(*v1.TestIamPermissionsResponse)(nil), // 83: google.iam.v1.TestIamPermissionsResponse
}
@@ -9272,7 +9272,7 @@ const _ = grpc.SupportPackageIsVersion6
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type StorageClient interface {
// Permanently deletes an empty bucket.
- DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Returns metadata for the specified bucket.
GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error)
// Creates a new bucket.
@@ -9291,7 +9291,7 @@ type StorageClient interface {
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error)
// Permanently deletes a notification subscription.
- DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// View a notification config.
GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error)
// Creates a notification subscription for a given bucket.
@@ -9306,7 +9306,7 @@ type StorageClient interface {
ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error)
// Deletes an object and its metadata. Deletions are permanent if versioning
// is not enabled for the bucket, or if the `generation` parameter is used.
- DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Cancels an in-progress resumable upload.
CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error)
// Retrieves an object's metadata.
@@ -9397,7 +9397,7 @@ type StorageClient interface {
// Creates a new HMAC key for the given service account.
CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error)
// Deletes a given HMAC key. Key must be in an INACTIVE state.
- DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error)
+ DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Gets an existing HMAC key metadata for the given id.
GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error)
// Lists HMAC keys under a given project with the additional filters provided.
@@ -9414,8 +9414,8 @@ func NewStorageClient(cc grpc.ClientConnInterface) StorageClient {
return &storageClient{cc}
}
-func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
- out := new(empty.Empty)
+func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteBucket", in, out, opts...)
if err != nil {
return nil, err
@@ -9495,8 +9495,8 @@ func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketReques
return out, nil
}
-func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
- out := new(empty.Empty)
+func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotification", in, out, opts...)
if err != nil {
return nil, err
@@ -9540,8 +9540,8 @@ func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequ
return out, nil
}
-func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
- out := new(empty.Empty)
+func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteObject", in, out, opts...)
if err != nil {
return nil, err
@@ -9696,8 +9696,8 @@ func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequ
return out, nil
}
-func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
- out := new(empty.Empty)
+func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...)
if err != nil {
return nil, err
@@ -9735,7 +9735,7 @@ func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequ
// StorageServer is the server API for Storage service.
type StorageServer interface {
// Permanently deletes an empty bucket.
- DeleteBucket(context.Context, *DeleteBucketRequest) (*empty.Empty, error)
+ DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error)
// Returns metadata for the specified bucket.
GetBucket(context.Context, *GetBucketRequest) (*Bucket, error)
// Creates a new bucket.
@@ -9754,7 +9754,7 @@ type StorageServer interface {
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error)
// Permanently deletes a notification subscription.
- DeleteNotification(context.Context, *DeleteNotificationRequest) (*empty.Empty, error)
+ DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error)
// View a notification config.
GetNotification(context.Context, *GetNotificationRequest) (*Notification, error)
// Creates a notification subscription for a given bucket.
@@ -9769,7 +9769,7 @@ type StorageServer interface {
ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error)
// Deletes an object and its metadata. Deletions are permanent if versioning
// is not enabled for the bucket, or if the `generation` parameter is used.
- DeleteObject(context.Context, *DeleteObjectRequest) (*empty.Empty, error)
+ DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error)
// Cancels an in-progress resumable upload.
CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error)
// Retrieves an object's metadata.
@@ -9860,7 +9860,7 @@ type StorageServer interface {
// Creates a new HMAC key for the given service account.
CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error)
// Deletes a given HMAC key. Key must be in an INACTIVE state.
- DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*empty.Empty, error)
+ DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error)
// Gets an existing HMAC key metadata for the given id.
GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error)
// Lists HMAC keys under a given project with the additional filters provided.
@@ -9873,7 +9873,7 @@ type StorageServer interface {
type UnimplementedStorageServer struct {
}
-func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*empty.Empty, error) {
+func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented")
}
func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) {
@@ -9900,7 +9900,7 @@ func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *v1.TestI
func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented")
}
-func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*empty.Empty, error) {
+func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteNotification not implemented")
}
func (*UnimplementedStorageServer) GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) {
@@ -9915,7 +9915,7 @@ func (*UnimplementedStorageServer) ListNotifications(context.Context, *ListNotif
func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) {
return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented")
}
-func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*empty.Empty, error) {
+func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented")
}
func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) {
@@ -9951,7 +9951,7 @@ func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServic
func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented")
}
-func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*empty.Empty, error) {
+func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented")
}
func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) {
diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go
index 50f34dc83c..008568b405 100644
--- a/vendor/cloud.google.com/go/storage/internal/version.go
+++ b/vendor/cloud.google.com/go/storage/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.28.0"
+const Version = "1.28.1"
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
index 855792f474..b5c10efc8a 100644
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ b/vendor/cloud.google.com/go/storage/storage.go
@@ -1412,12 +1412,13 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs {
Generation: o.Generation,
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
- CustomerKeySHA256: string(o.GetCustomerEncryption().GetKeySha256Bytes()),
- KMSKeyName: o.GetKmsKey(),
- Created: convertProtoTime(o.GetCreateTime()),
- Deleted: convertProtoTime(o.GetDeleteTime()),
- Updated: convertProtoTime(o.GetUpdateTime()),
- CustomTime: convertProtoTime(o.GetCustomTime()),
+ // CustomerKeySHA256 needs to be presented as base64 encoded, but the response from gRPC is not.
+ CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()),
+ KMSKeyName: o.GetKmsKey(),
+ Created: convertProtoTime(o.GetCreateTime()),
+ Deleted: convertProtoTime(o.GetDeleteTime()),
+ Updated: convertProtoTime(o.GetUpdateTime()),
+ CustomTime: convertProtoTime(o.GetCustomTime()),
}
}
diff --git a/vendor/github.com/VictoriaMetrics/metricsql/optimizer.go b/vendor/github.com/VictoriaMetrics/metricsql/optimizer.go
index 3a432e63e0..3415285863 100644
--- a/vendor/github.com/VictoriaMetrics/metricsql/optimizer.go
+++ b/vendor/github.com/VictoriaMetrics/metricsql/optimizer.go
@@ -392,7 +392,7 @@ func getTransformArgIdxForOptimization(funcName string, args []Expr) int {
return -1
case "limit_offset":
return 2
- case "buckets_limit", "histogram_quantile", "histogram_share", "range_quantile":
+ case "buckets_limit", "histogram_quantile", "histogram_share", "range_quantile", "range_trim_spikes":
return 1
case "histogram_quantiles":
return len(args) - 1
diff --git a/vendor/github.com/VictoriaMetrics/metricsql/transform.go b/vendor/github.com/VictoriaMetrics/metricsql/transform.go
index 5876c82908..31029f2c3e 100644
--- a/vendor/github.com/VictoriaMetrics/metricsql/transform.go
+++ b/vendor/github.com/VictoriaMetrics/metricsql/transform.go
@@ -81,6 +81,7 @@ var transformFuncs = map[string]bool{
"range_stddev": true,
"range_stdvar": true,
"range_sum": true,
+ "range_trim_spikes": true,
"remove_resets": true,
"round": true,
"running_avg": true,
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md
index 56b641cf7b..fcf2947ba5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md
@@ -1,3 +1,571 @@
+# Release (2022-12-02)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.17.0](service/appsync/CHANGELOG.md#v1170-2022-12-02)
+ * **Feature**: Fixes the URI for the evaluatecode endpoint to include the /v1 prefix (ie. "/v1/dataplane-evaluatecode").
+* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.1](service/ecs/CHANGELOG.md#v1201-2022-12-02)
+ * **Documentation**: Documentation updates for Amazon ECS
+* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.21.0](service/fms/CHANGELOG.md#v1210-2022-12-02)
+ * **Feature**: AWS Firewall Manager now supports Fortigate Cloud Native Firewall as a Service as a third-party policy type.
+* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.28.0](service/mediaconvert/CHANGELOG.md#v1280-2022-12-02)
+ * **Feature**: The AWS Elemental MediaConvert SDK has added support for configurable ID3 eMSG box attributes and the ability to signal them with InbandEventStream tags in DASH and CMAF outputs.
+* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.25.0](service/medialive/CHANGELOG.md#v1250-2022-12-02)
+ * **Feature**: Updates to Event Signaling and Management (ESAM) API and documentation.
+* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.21.0](service/polly/CHANGELOG.md#v1210-2022-12-02)
+ * **Feature**: Add language code for Finnish (fi-FI)
+* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.18.0](service/proton/CHANGELOG.md#v1180-2022-12-02)
+ * **Feature**: CreateEnvironmentAccountConnection RoleArn input is now optional
+* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.3.0](service/redshiftserverless/CHANGELOG.md#v130-2022-12-02)
+ * **Feature**: Add Table Level Restore operations for Amazon Redshift Serverless. Add multi-port support for Amazon Redshift Serverless endpoints. Add Tagging support to Snapshots and Recovery Points in Amazon Redshift Serverless.
+* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.18.7](service/sns/CHANGELOG.md#v1187-2022-12-02)
+ * **Documentation**: This release adds the message payload-filtering feature to the SNS Subscribe, SetSubscriptionAttributes, and GetSubscriptionAttributes API actions
+
+# Release (2022-12-01)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.0.0](service/codecatalyst/CHANGELOG.md#v100-2022-12-01)
+ * **Release**: New AWS service client module
+ * **Feature**: This release adds operations that support customers using the AWS Toolkits and Amazon CodeCatalyst, a unified software development service that helps developers develop, deploy, and maintain applications in the cloud. For more information, see the documentation.
+* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.20.0](service/comprehend/CHANGELOG.md#v1200-2022-12-01)
+ * **Feature**: Comprehend now supports semi-structured documents (such as PDF files or image files) as inputs for custom analysis using the synchronous APIs (ClassifyDocument and DetectEntities).
+* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.16.0](service/gamelift/CHANGELOG.md#v1160-2022-12-01)
+ * **Feature**: GameLift introduces a new feature, GameLift Anywhere. GameLift Anywhere allows you to integrate your own compute resources with GameLift. You can also use GameLift Anywhere to iteratively test your game servers without uploading the build to GameLift for every iteration.
+* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.0.0](service/pipes/CHANGELOG.md#v100-2022-12-01)
+ * **Release**: New AWS service client module
+ * **Feature**: AWS introduces new Amazon EventBridge Pipes which allow you to connect sources (SQS, Kinesis, DDB, Kafka, MQ) to Targets (14+ EventBridge Targets) without any code, with filtering, batching, input transformation, and an optional Enrichment stage (Lambda, StepFunctions, ApiGateway, ApiDestinations)
+* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.16.0](service/sfn/CHANGELOG.md#v1160-2022-12-01)
+ * **Feature**: This release adds support for the AWS Step Functions Map state in Distributed mode. The changes include a new MapRun resource and several new and modified APIs.
+
+# Release (2022-11-30)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.18.0](service/accessanalyzer/CHANGELOG.md#v1180-2022-11-30)
+ * **Feature**: This release adds support for S3 cross account access points. IAM Access Analyzer will now produce public or cross account findings when it detects bucket delegation to external account access points.
+* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.20.0](service/athena/CHANGELOG.md#v1200-2022-11-30)
+ * **Feature**: This release includes support for using Apache Spark in Amazon Athena.
+* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.17.0](service/dataexchange/CHANGELOG.md#v1170-2022-11-30)
+ * **Feature**: This release enables data providers to license direct access to data in their Amazon S3 buckets or AWS Lake Formation data lakes through AWS Data Exchange. Subscribers get read-only access to the data and can use it in downstream AWS services, like Amazon Athena, without creating or managing copies.
+* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.0.0](service/docdbelastic/CHANGELOG.md#v100-2022-11-30)
+ * **Release**: New AWS service client module
+ * **Feature**: Launched Amazon DocumentDB Elastic Clusters. You can now use the SDK to create, list, update and delete Amazon DocumentDB Elastic Cluster resources
+* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.37.0](service/glue/CHANGELOG.md#v1370-2022-11-30)
+ * **Feature**: This release adds support for AWS Glue Data Quality, which helps you evaluate and monitor the quality of your data and includes the API for creating, deleting, or updating data quality rulesets, runs and evaluations.
+* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.28.0](service/s3control/CHANGELOG.md#v1280-2022-11-30)
+ * **Feature**: Amazon S3 now supports cross-account access points. S3 bucket owners can now allow trusted AWS accounts to create access points associated with their bucket.
+* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.56.0](service/sagemaker/CHANGELOG.md#v1560-2022-11-30)
+ * **Feature**: Added Models as part of the Search API. Added Model shadow deployments in realtime inference, and shadow testing in managed inference. Added support for shared spaces, geospatial APIs, Model Cards, AutoMLJobStep in pipelines, Git repositories on user profiles and domains, Model sharing in Jumpstart.
+* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.0.0](service/sagemakergeospatial/CHANGELOG.md#v100-2022-11-30)
+ * **Release**: New AWS service client module
+ * **Feature**: This release provides Amazon SageMaker geospatial APIs to build, train, deploy and visualize geospatial models.
+
+# Release (2022-11-29.2)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.74.0](service/ec2/CHANGELOG.md#v1740-2022-11-292)
+ * **Feature**: This release adds support for AWS Verified Access and the Hpc6id Amazon EC2 compute optimized instance type, which features 3rd generation Intel Xeon Scalable processors.
+* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.15.0](service/firehose/CHANGELOG.md#v1150-2022-11-292)
+ * **Feature**: Allow support for the Serverless offering for Amazon OpenSearch Service as a Kinesis Data Firehose delivery destination.
+* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.19.0](service/kms/CHANGELOG.md#v1190-2022-11-292)
+ * **Feature**: AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control.
+* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.0.0](service/omics/CHANGELOG.md#v100-2022-11-292)
+ * **Release**: New AWS service client module
+ * **Feature**: Amazon Omics is a new, purpose-built service that can be used by healthcare and life science organizations to store, query, and analyze omics data. The insights from that data can be used to accelerate scientific discoveries and improve healthcare.
+* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.0.0](service/opensearchserverless/CHANGELOG.md#v100-2022-11-292)
+ * **Release**: New AWS service client module
+ * **Feature**: Publish SDK for Amazon OpenSearch Serverless
+* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.0.0](service/securitylake/CHANGELOG.md#v100-2022-11-292)
+ * **Release**: New AWS service client module
+ * **Feature**: Amazon Security Lake automatically centralizes security data from cloud, on-premises, and custom sources into a purpose-built data lake stored in your account. Security Lake makes it easier to analyze security data, so you can improve the protection of your workloads, applications, and data
+* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.0.0](service/simspaceweaver/CHANGELOG.md#v100-2022-11-292)
+ * **Release**: New AWS service client module
+ * **Feature**: AWS SimSpace Weaver is a new service that helps customers build spatial simulations at new levels of scale - resulting in virtual worlds with millions of dynamic entities. See the AWS SimSpace Weaver developer guide for more details on how to get started. https://docs.aws.amazon.com/simspaceweaver
+
+# Release (2022-11-29)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.0.0](service/arczonalshift/CHANGELOG.md#v100-2022-11-29)
+ * **Release**: New AWS service client module
+ * **Feature**: Amazon Route 53 Application Recovery Controller Zonal Shift is a new service that makes it easy to shift traffic away from an Availability Zone in a Region. See the developer guide for more information: https://docs.aws.amazon.com/r53recovery/latest/dg/what-is-route53-recovery.html
+* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.18.0](service/computeoptimizer/CHANGELOG.md#v1180-2022-11-29)
+ * **Feature**: Adds support for a new recommendation preference that makes it possible for customers to optimize their EC2 recommendations by utilizing an external metrics ingestion service to provide metrics.
+* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.28.0](service/configservice/CHANGELOG.md#v1280-2022-11-29)
+ * **Feature**: With this release, you can use AWS Config to evaluate your resources for compliance with Config rules before they are created or updated. Using Config rules in proactive mode enables you to test and build compliant resource templates or check resource configurations at the time they are provisioned.
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.73.0](service/ec2/CHANGELOG.md#v1730-2022-11-29)
+ * **Feature**: Introduces ENA Express, which uses AWS SRD and dynamic routing to increase throughput and minimize latency, adds support for trust relationships between Reachability Analyzer and AWS Organizations to enable cross-account analysis, and adds support for Infrastructure Performance metric subscriptions.
+* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.24.0](service/eks/CHANGELOG.md#v1240-2022-11-29)
+ * **Feature**: Adds support for additional EKS add-ons metadata and filtering fields
+* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.26.0](service/fsx/CHANGELOG.md#v1260-2022-11-29)
+ * **Feature**: This release adds support for 4GB/s / 160K PIOPS FSx for ONTAP file systems and 10GB/s / 350K PIOPS FSx for OpenZFS file systems (Single_AZ_2). For FSx for ONTAP, this also adds support for DP volumes, snapshot policy, copy tags to backups, and Multi-AZ route table updates.
+* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.36.0](service/glue/CHANGELOG.md#v1360-2022-11-29)
+ * **Feature**: This release allows the creation of Custom Visual Transforms (Dynamic Transforms) to be created via AWS Glue CLI/SDK.
+* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.9.0](service/inspector2/CHANGELOG.md#v190-2022-11-29)
+ * **Feature**: This release adds support for Inspector to scan AWS Lambda.
+* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.26.0](service/lambda/CHANGELOG.md#v1260-2022-11-29)
+ * **Feature**: Adds support for Lambda SnapStart, which helps improve the startup performance of functions. Customers can now manage SnapStart based functions via CreateFunction and UpdateFunctionConfiguration APIs
+* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.1.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v110-2022-11-29)
+ * **Feature**: AWS now offers fully-compliant, Amazon-provided licenses for Microsoft Office Professional Plus 2021 Amazon Machine Images (AMIs) on Amazon EC2. These AMIs are now available on the Amazon EC2 console and on AWS Marketplace to launch instances on-demand without any long-term licensing commitments.
+* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.24.0](service/macie2/CHANGELOG.md#v1240-2022-11-29)
+ * **Feature**: Added support for configuring Macie to continually sample objects from S3 buckets and inspect them for sensitive data. Results appear in statistics, findings, and other data that Macie provides.
+* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.28.0](service/quicksight/CHANGELOG.md#v1280-2022-11-29)
+ * **Feature**: This release adds new Describe APIs and updates Create and Update APIs to support the data model for Dashboards, Analyses, and Templates.
+* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.27.0](service/s3control/CHANGELOG.md#v1270-2022-11-29)
+ * **Feature**: Added two new APIs to support Amazon S3 Multi-Region Access Point failover controls: GetMultiRegionAccessPointRoutes and SubmitMultiRegionAccessPointRoutes. The failover control APIs are supported in the following Regions: us-east-1, us-west-2, eu-west-1, ap-southeast-2, and ap-northeast-1.
+* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.25.0](service/securityhub/CHANGELOG.md#v1250-2022-11-29)
+ * **Feature**: Adding StandardsManagedBy field to DescribeStandards API response
+
+# Release (2022-11-28)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.18.0](service/backup/CHANGELOG.md#v1180-2022-11-28)
+ * **Feature**: AWS Backup introduces support for legal hold and application stack backups. AWS Backup Audit Manager introduces support for cross-Region, cross-account reports.
+* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.22.0](service/cloudwatch/CHANGELOG.md#v1220-2022-11-28)
+ * **Feature**: Adds cross-account support to the GetMetricData API. Adds cross-account support to the ListMetrics API through the usage of the IncludeLinkedAccounts flag and the new OwningAccounts field.
+* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.0](service/cloudwatchlogs/CHANGELOG.md#v1170-2022-11-28)
+ * **Feature**: Updates to support CloudWatch Logs data protection and CloudWatch cross-account observability
+* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.9.0](service/drs/CHANGELOG.md#v190-2022-11-28)
+ * **Feature**: Non breaking changes to existing APIs, and additional APIs added to support in-AWS failing back using AWS Elastic Disaster Recovery.
+* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.0](service/ecs/CHANGELOG.md#v1200-2022-11-28)
+ * **Feature**: This release adds support for ECS Service Connect, a new capability that simplifies writing and operating resilient distributed applications. This release updates the TaskDefinition, Cluster, Service mutation APIs with Service connect constructs and also adds a new ListServicesByNamespace API.
+* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.18.0](service/efs/CHANGELOG.md#v1180-2022-11-28)
+ * **Feature**: This release adds elastic as a new ThroughputMode value for EFS file systems and adds AFTER_1_DAY as a value for TransitionToIARules.
+* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.32.0](service/iot/CHANGELOG.md#v1320-2022-11-28)
+ * **Feature**: Job scheduling enables the scheduled rollout of a Job with start and end times and a customizable end behavior when end time is reached. This is available for continuous and snapshot jobs. Added support for MQTT5 properties to AWS IoT TopicRule Republish Action.
+* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.13.0](service/iotdataplane/CHANGELOG.md#v1130-2022-11-28)
+ * **Feature**: This release adds support for MQTT5 properties to AWS IoT HTTP Publish API.
+* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.23.0](service/iotwireless/CHANGELOG.md#v1230-2022-11-28)
+ * **Feature**: This release includes a new feature for customers to calculate the position of their devices by adding three new APIs: UpdateResourcePosition, GetResourcePosition, and GetPositionEstimate.
+* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.36.0](service/kendra/CHANGELOG.md#v1360-2022-11-28)
+ * **Feature**: Amazon Kendra now supports preview of table information from HTML tables in the search results. The most relevant cells with their corresponding rows, columns are displayed as a preview in the search result. The most relevant table cell or cells are also highlighted in table preview.
+* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.16.0](service/mgn/CHANGELOG.md#v1160-2022-11-28)
+ * **Feature**: This release adds support for Application and Wave management. We also now support custom post-launch actions.
+* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.0.0](service/oam/CHANGELOG.md#v100-2022-11-28)
+ * **Release**: New AWS service client module
+ * **Feature**: Amazon CloudWatch Observability Access Manager is a new service that allows configuration of the CloudWatch cross-account observability feature.
+* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.17.0](service/organizations/CHANGELOG.md#v1170-2022-11-28)
+ * **Feature**: This release introduces delegated administrator for AWS Organizations, a new feature to help you delegate the management of your Organizations policies, enabling you to govern your AWS organization in a decentralized way. You can now allow member accounts to manage Organizations policies.
+* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.31.0](service/rds/CHANGELOG.md#v1310-2022-11-28)
+ * **Feature**: This release enables new Aurora and RDS feature called Blue/Green Deployments that makes updates to databases safer, simpler and faster.
+* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.19.0](service/textract/CHANGELOG.md#v1190-2022-11-28)
+ * **Feature**: This release adds support for classifying and splitting lending documents by type, and extracting information by using the Analyze Lending APIs. This release also includes support for summarized information of the processed lending document package, in addition to per document results.
+* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.22.0](service/transcribe/CHANGELOG.md#v1220-2022-11-28)
+ * **Feature**: This release adds support for 'inputType' for post-call and real-time (streaming) Call Analytics within Amazon Transcribe.
+* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.8.0](service/transcribestreaming/CHANGELOG.md#v180-2022-11-28)
+ * **Feature**: This release adds support for real-time (streaming) and post-call Call Analytics within Amazon Transcribe.
+
+# Release (2022-11-23)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.10.0](service/grafana/CHANGELOG.md#v1100-2022-11-23)
+ * **Feature**: This release includes support for configuring a Grafana workspace to connect to a datasource within a VPC as well as new APIs for configuring Grafana settings.
+* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.7.0](service/rbin/CHANGELOG.md#v170-2022-11-23)
+ * **Feature**: This release adds support for Rule Lock for Recycle Bin, which allows you to lock retention rules so that they can no longer be modified or deleted.
+
+# Release (2022-11-22)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.21.0](service/appflow/CHANGELOG.md#v1210-2022-11-22)
+ * **Feature**: Adding support for Amazon AppFlow to transfer the data to Amazon Redshift databases through Amazon Redshift Data API service. This feature will support the Redshift destination connector on both public and private accessible Amazon Redshift Clusters and Amazon Redshift Serverless.
+* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.15.0](service/kinesisanalyticsv2/CHANGELOG.md#v1150-2022-11-22)
+ * **Feature**: Support for Apache Flink 1.15 in Kinesis Data Analytics.
+
+# Release (2022-11-21)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.25.0](service/route53/CHANGELOG.md#v1250-2022-11-21)
+ * **Feature**: Amazon Route 53 now supports the Asia Pacific (Hyderabad) Region (ap-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region.
+
+# Release (2022-11-18.2)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.1](service/ssmsap/CHANGELOG.md#v101-2022-11-182)
+ * **Bug Fix**: Removes old model file for ssm sap and uses the new model file to regenerate client
+
+# Release (2022-11-18)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.20.0](service/appflow/CHANGELOG.md#v1200-2022-11-18)
+ * **Feature**: AppFlow provides a new API called UpdateConnectorRegistration to update a custom connector that customers have previously registered. With this API, customers no longer need to unregister and then register a connector to make an update.
+* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.21.0](service/auditmanager/CHANGELOG.md#v1210-2022-11-18)
+ * **Feature**: This release introduces a new feature for Audit Manager: Evidence finder. You can now use evidence finder to quickly query your evidence, and add the matching evidence results to an assessment report.
+* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.0.0](service/chimesdkvoice/CHANGELOG.md#v100-2022-11-18)
+ * **Release**: New AWS service client module
+ * **Feature**: Amazon Chime Voice Connector, Voice Connector Group and PSTN Audio Service APIs are now available in the Amazon Chime SDK Voice namespace. See https://docs.aws.amazon.com/chime-sdk/latest/dg/sdk-available-regions.html for more details.
+* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.21.0](service/cloudfront/CHANGELOG.md#v1210-2022-11-18)
+ * **Feature**: CloudFront API support for staging distributions and associated traffic management policies.
+* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.38.0](service/connect/CHANGELOG.md#v1380-2022-11-18)
+ * **Feature**: Added AllowedAccessControlTags and TagRestrictedResource for Tag Based Access Control on Amazon Connect Webpage
+* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.17.6](service/dynamodb/CHANGELOG.md#v1176-2022-11-18)
+ * **Documentation**: Updated minor fixes for DynamoDB documentation.
+* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.13.25](service/dynamodbstreams/CHANGELOG.md#v11325-2022-11-18)
+ * **Documentation**: Updated minor fixes for DynamoDB documentation.
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.72.0](service/ec2/CHANGELOG.md#v1720-2022-11-18)
+ * **Feature**: This release adds support for copying an Amazon Machine Image's tags when copying an AMI.
+* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.35.0](service/glue/CHANGELOG.md#v1350-2022-11-18)
+ * **Feature**: AWSGlue Crawler - Adding support for Table and Column level Comments with database level datatypes for JDBC based crawler.
+* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.0.0](service/iotroborunner/CHANGELOG.md#v100-2022-11-18)
+ * **Release**: New AWS service client module
+ * **Feature**: AWS IoT RoboRunner is a new service that makes it easy to build applications that help multi-vendor robots work together seamlessly. See the IoT RoboRunner developer guide for more details on getting started. https://docs.aws.amazon.com/iotroborunner/latest/dev/iotroborunner-welcome.html
+* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.27.0](service/quicksight/CHANGELOG.md#v1270-2022-11-18)
+ * **Feature**: This release adds the following: 1) Asset management for centralized assets governance 2) QuickSight Q now supports public embedding 3) New Termination protection flag to mitigate accidental deletes 4) Athena data sources now accept a custom IAM role 5) QuickSight supports connectivity to Databricks
+* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.55.0](service/sagemaker/CHANGELOG.md#v1550-2022-11-18)
+ * **Feature**: Added DisableProfiler flag as a new field in ProfilerConfig
+* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.15.0](service/servicecatalog/CHANGELOG.md#v1150-2022-11-18)
+ * **Feature**: This release 1. adds support for Principal Name Sharing with Service Catalog portfolio sharing. 2. Introduces repo sourced products which are created and managed with existing SC APIs. These products are synced to external repos and auto create new product versions based on changes in the repo.
+* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.15.0](service/sfn/CHANGELOG.md#v1150-2022-11-18)
+ * **Feature**: This release adds support for using Step Functions service integrations to invoke any cross-account AWS resource, even if that service doesn't support resource-based policies or cross-account calls. See https://docs.aws.amazon.com/step-functions/latest/dg/concepts-access-cross-acct-resources.html
+* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.25.0](service/transfer/CHANGELOG.md#v1250-2022-11-18)
+ * **Feature**: Adds a NONE encryption algorithm type to AS2 connectors, providing support for skipping encryption of the AS2 message body when a HTTPS URL is also specified.
+
+# Release (2022-11-17)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.12.0](service/amplify/CHANGELOG.md#v1120-2022-11-17)
+ * **Feature**: Adds a new value (WEB_COMPUTE) to the Platform enum that allows customers to create Amplify Apps with Server-Side Rendering support.
+* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.19.0](service/appflow/CHANGELOG.md#v1190-2022-11-17)
+ * **Feature**: AppFlow simplifies the preparation and cataloging of SaaS data into the AWS Glue Data Catalog where your data can be discovered and accessed by AWS analytics and ML services. AppFlow now also supports data field partitioning and file size optimization to improve query performance and reduce cost.
+* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.16.0](service/appsync/CHANGELOG.md#v1160-2022-11-17)
+ * **Feature**: This release introduces the APPSYNC_JS runtime, and adds support for JavaScript in AppSync functions and AppSync pipeline resolvers.
+* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.22.0](service/databasemigrationservice/CHANGELOG.md#v1220-2022-11-17)
+ * **Feature**: Adds support for Internet Protocol Version 6 (IPv6) on DMS Replication Instances
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.71.0](service/ec2/CHANGELOG.md#v1710-2022-11-17)
+ * **Feature**: This release adds a new optional parameter "privateIpAddress" for the CreateNatGateway API. PrivateIPAddress will allow customers to select a custom Private IPv4 address instead of having it be auto-assigned.
+* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.25](service/elasticloadbalancingv2/CHANGELOG.md#v11825-2022-11-17)
+ * **Documentation**: Provides new target group attributes to turn on/off cross zone load balancing and configure target group health for Network Load Balancers and Application Load Balancers. Provides improvements to health check configuration for Network Load Balancers.
+* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.4.0](service/emrserverless/CHANGELOG.md#v140-2022-11-17)
+ * **Feature**: Adds support for AWS Graviton2 based applications. You can now select CPU architecture when creating new applications or updating existing ones.
+* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.1.0](service/ivschat/CHANGELOG.md#v110-2022-11-17)
+ * **Feature**: Adds LoggingConfiguration APIs for IVS Chat - a feature that allows customers to store and record sent messages in a chat room to S3 buckets, CloudWatch logs, or Kinesis firehose.
+* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.25.0](service/lambda/CHANGELOG.md#v1250-2022-11-17)
+ * **Feature**: Add Node 18 (nodejs18.x) support to AWS Lambda.
+* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.22.0](service/personalize/CHANGELOG.md#v1220-2022-11-17)
+ * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize
+* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.20.0](service/polly/CHANGELOG.md#v1200-2022-11-17)
+ * **Feature**: Add two new neural voices - Ola (pl-PL) and Hala (ar-AE).
+* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.8.0](service/rum/CHANGELOG.md#v180-2022-11-17)
+ * **Feature**: CloudWatch RUM now supports custom events. To use custom events, create an app monitor or update an app monitor with CustomEvent Status as ENABLED.
+* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.26.0](service/s3control/CHANGELOG.md#v1260-2022-11-17)
+ * **Feature**: Added 34 new S3 Storage Lens metrics to support additional customer use cases.
+* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.7](service/secretsmanager/CHANGELOG.md#v1167-2022-11-17)
+ * **Documentation**: Documentation updates for Secrets Manager.
+* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.24.0](service/securityhub/CHANGELOG.md#v1240-2022-11-17)
+ * **Feature**: Added SourceLayerArn and SourceLayerHash field for security findings. Updated AwsLambdaFunction Resource detail
+* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.15.0](service/servicecatalogappregistry/CHANGELOG.md#v1150-2022-11-17)
+ * **Feature**: This release adds support for tagged resource associations, which allows you to associate a group of resources with a defined resource tag key and value to the application.
+* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.17.4](service/sts/CHANGELOG.md#v1174-2022-11-17)
+ * **Documentation**: Documentation updates for AWS Security Token Service.
+* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.18.0](service/textract/CHANGELOG.md#v1180-2022-11-17)
+ * **Feature**: This release adds support for specifying and extracting information from documents using the Signatures feature within Analyze Document API
+* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.27.0](service/workspaces/CHANGELOG.md#v1270-2022-11-17)
+ * **Feature**: The release introduces CreateStandbyWorkspaces, an API that allows you to create standby WorkSpaces associated with a primary WorkSpace in another Region. DescribeWorkspaces now includes related WorkSpaces properties. DescribeWorkspaceBundles and CreateWorkspaceBundle now return more bundle details.
+
+# Release (2022-11-16)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.19.1](service/batch/CHANGELOG.md#v1191-2022-11-16)
+ * **Documentation**: Documentation updates related to Batch on EKS
+* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.2.0](service/billingconductor/CHANGELOG.md#v120-2022-11-16)
+ * **Feature**: This release adds a new feature BillingEntity pricing rule.
+* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.24.0](service/cloudformation/CHANGELOG.md#v1240-2022-11-16)
+ * **Feature**: Added UnsupportedTarget HandlerErrorCode for use with CFN Resource Hooks
+* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.14.0](service/comprehendmedical/CHANGELOG.md#v1140-2022-11-16)
+ * **Feature**: This release supports new set of entities and traits. It also adds new category (BEHAVIORAL_ENVIRONMENTAL_SOCIAL).
+* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.37.0](service/connect/CHANGELOG.md#v1370-2022-11-16)
+ * **Feature**: This release adds a new MonitorContact API for initiating monitoring of ongoing Voice and Chat contacts.
+* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.23.0](service/eks/CHANGELOG.md#v1230-2022-11-16)
+ * **Feature**: Adds support for customer-provided placement groups for Kubernetes control plane instances when creating local EKS clusters on Outposts
+* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.24.0](service/elasticache/CHANGELOG.md#v1240-2022-11-16)
+ * **Feature**: for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0
+* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.8.0](service/iottwinmaker/CHANGELOG.md#v180-2022-11-16)
+ * **Feature**: This release adds the following: 1) ExecuteQuery API allows users to query their AWS IoT TwinMaker Knowledge Graph 2) Pricing plan APIs allow users to configure and manage their pricing mode 3) Support for property groups and tabular property values in existing AWS IoT TwinMaker APIs.
+* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.12.0](service/personalizeevents/CHANGELOG.md#v1120-2022-11-16)
+ * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize
+* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.17.0](service/proton/CHANGELOG.md#v1170-2022-11-16)
+ * **Feature**: Add support for sorting and filtering in ListServiceInstances
+* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.30.0](service/rds/CHANGELOG.md#v1300-2022-11-16)
+ * **Feature**: This release adds support for container databases (CDBs) to Amazon RDS Custom for Oracle. A CDB contains one PDB at creation. You can add more PDBs using Oracle SQL. You can also customize your database installation by setting the Oracle base, Oracle home, and the OS user name and group.
+* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.33.0](service/ssm/CHANGELOG.md#v1330-2022-11-16)
+ * **Feature**: This release adds support for cross account access in CreateOpsItem, UpdateOpsItem and GetOpsItem. It introduces new APIs to setup resource policies for SSM resources: PutResourcePolicy, GetResourcePolicies and DeleteResourcePolicy.
+* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.19.0](service/ssmincidents/CHANGELOG.md#v1190-2022-11-16)
+ * **Feature**: Add support for PagerDuty integrations on ResponsePlan, IncidentRecord, and RelatedItem APIs
+* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.24.0](service/transfer/CHANGELOG.md#v1240-2022-11-16)
+ * **Feature**: Allow additional operations to throw ThrottlingException
+* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.15.0](service/xray/CHANGELOG.md#v1150-2022-11-16)
+ * **Feature**: This release adds new APIs - PutResourcePolicy, DeleteResourcePolicy, ListResourcePolicies for supporting resource based policies for AWS X-Ray.
+
+# Release (2022-11-15)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.36.0](service/connect/CHANGELOG.md#v1360-2022-11-15)
+ * **Feature**: This release updates the APIs: UpdateInstanceAttribute, DescribeInstanceAttribute, and ListInstanceAttributes. You can use it to programmatically enable/disable enhanced contact monitoring using attribute type ENHANCED_CONTACT_MONITORING on the specified Amazon Connect instance.
+* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.20.0](service/greengrassv2/CHANGELOG.md#v1200-2022-11-15)
+ * **Feature**: Adds new parent target ARN paramater to CreateDeployment, GetDeployment, and ListDeployments APIs for the new subdeployments feature.
+* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.24.0](service/route53/CHANGELOG.md#v1240-2022-11-15)
+ * **Feature**: Amazon Route 53 now supports the Europe (Spain) Region (eu-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region.
+* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.0](service/ssmsap/CHANGELOG.md#v100-2022-11-15)
+ * **Release**: New AWS service client module
+ * **Feature**: AWS Systems Manager for SAP provides simplified operations and management of SAP applications such as SAP HANA. With this release, SAP customers and partners can automate and simplify their SAP system administration tasks such as backup/restore of SAP HANA.
+* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.26.0](service/workspaces/CHANGELOG.md#v1260-2022-11-15)
+ * **Feature**: This release introduces ModifyCertificateBasedAuthProperties, a new API that allows control of certificate-based auth properties associated with a WorkSpaces directory. The DescribeWorkspaceDirectories API will now additionally return certificate-based auth properties in its responses.
+
+# Release (2022-11-14)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.20.0](service/customerprofiles/CHANGELOG.md#v1200-2022-11-14)
+ * **Feature**: This release enhances the SearchProfiles API by providing functionality to search for profiles using multiple keys and logical operators.
+* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.18.0](service/lakeformation/CHANGELOG.md#v1180-2022-11-14)
+ * **Feature**: This release adds a new parameter "Parameters" in the DataLakeSettings.
+* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.13.3](service/managedblockchain/CHANGELOG.md#v1133-2022-11-14)
+ * **Documentation**: Updating the API docs data type: NetworkEthereumAttributes, and the operations DeleteNode, and CreateNode to also include the supported Goerli network.
+* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.16.0](service/proton/CHANGELOG.md#v1160-2022-11-14)
+ * **Feature**: Add support for CodeBuild Provisioning
+* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.29.0](service/rds/CHANGELOG.md#v1290-2022-11-14)
+ * **Feature**: This release adds support for restoring an RDS Multi-AZ DB cluster snapshot to a Single-AZ deployment or a Multi-AZ DB instance deployment.
+* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.12.0](service/workdocs/CHANGELOG.md#v1120-2022-11-14)
+ * **Feature**: Added 2 new document related operations, DeleteDocumentVersion and RestoreDocumentVersions.
+* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.14.0](service/xray/CHANGELOG.md#v1140-2022-11-14)
+ * **Feature**: This release enhances GetServiceGraph API to support new type of edge to represent links between SQS and Lambda in event-driven applications.
+
+# Release (2022-11-11)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.0](config/CHANGELOG.md#v1180-2022-11-11)
+ * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
+ * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
+* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.13.0](credentials/CHANGELOG.md#v1130-2022-11-11)
+ * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
+ * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
+* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.1](service/glue/CHANGELOG.md#v1341-2022-11-11)
+ * **Documentation**: Added links related to enabling job bookmarks.
+* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.31.0](service/iot/CHANGELOG.md#v1310-2022-11-11)
+ * **Feature**: This release add new api listRelatedResourcesForAuditFinding and new member type IssuerCertificates for Iot device device defender Audit.
+* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.16.0](service/licensemanager/CHANGELOG.md#v1160-2022-11-11)
+ * **Feature**: AWS License Manager now supports onboarded Management Accounts or Delegated Admins to view granted licenses aggregated from all accounts in the organization.
+* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.14.0](service/marketplacecatalog/CHANGELOG.md#v1140-2022-11-11)
+ * **Feature**: Added three new APIs to support tagging and tag-based authorization: TagResource, UntagResource, and ListTagsForResource. Added optional parameters to the StartChangeSet API to support tagging a resource while making a request to create it.
+* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.21.0](service/rekognition/CHANGELOG.md#v1210-2022-11-11)
+ * **Feature**: Adding support for ImageProperties feature to detect dominant colors and image brightness, sharpness, and contrast, inclusion and exclusion filters for labels and label categories, new fields to the API response, "aliases" and "categories"
+* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.23.8](service/securityhub/CHANGELOG.md#v1238-2022-11-11)
+ * **Documentation**: Documentation updates for Security Hub
+* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.18.0](service/ssmincidents/CHANGELOG.md#v1180-2022-11-11)
+ * **Feature**: RelatedItems now have an ID field which can be used for referencing them else where. Introducing event references in TimelineEvent API and increasing maximum length of "eventData" to 12K characters.
+
+# Release (2022-11-10)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.1](service/autoscaling/CHANGELOG.md#v1241-2022-11-10)
+ * **Documentation**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price.
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.70.0](service/ec2/CHANGELOG.md#v1700-2022-11-10)
+ * **Feature**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price.
+* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.19.0](service/ecs/CHANGELOG.md#v1190-2022-11-10)
+ * **Feature**: This release adds support for task scale-in protection with updateTaskProtection and getTaskProtection APIs. UpdateTaskProtection API can be used to protect a service managed task from being terminated by scale-in events and getTaskProtection API to get the scale-in protection status of a task.
+* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.17.0](service/elasticsearchservice/CHANGELOG.md#v1170-2022-11-10)
+ * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet.
+* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.1](service/resourceexplorer2/CHANGELOG.md#v101-2022-11-10)
+ * **Documentation**: Text only updates to some Resource Explorer descriptions.
+* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.0.0](service/scheduler/CHANGELOG.md#v100-2022-11-10)
+ * **Release**: New AWS service client module
+ * **Feature**: AWS introduces the new Amazon EventBridge Scheduler. EventBridge Scheduler is a serverless scheduler that allows you to create, run, and manage tasks from one central, managed service.
+
+# Release (2022-11-09)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.35.0](service/connect/CHANGELOG.md#v1350-2022-11-09)
+ * **Feature**: This release adds new fields SignInUrl, UserArn, and UserId to GetFederationToken response payload.
+* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.1.0](service/connectcases/CHANGELOG.md#v110-2022-11-09)
+ * **Feature**: This release adds the ability to disable templates through the UpdateTemplate API. Disabling templates prevents customers from creating cases using the template. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.69.0](service/ec2/CHANGELOG.md#v1690-2022-11-09)
+ * **Feature**: Amazon EC2 Trn1 instances, powered by AWS Trainium chips, are purpose built for high-performance deep learning training. u-24tb1.112xlarge and u-18tb1.112xlarge High Memory instances are purpose-built to run large in-memory databases.
+* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.14.0](service/groundstation/CHANGELOG.md#v1140-2022-11-09)
+ * **Feature**: This release adds the preview of customer-provided ephemeris support for AWS Ground Station, allowing space vehicle owners to provide their own position and trajectory information for a satellite.
+* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.19.0](service/mediapackagevod/CHANGELOG.md#v1190-2022-11-09)
+ * **Feature**: This release adds "IncludeIframeOnlyStream" for Dash endpoints.
+* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.7.0](service/transcribestreaming/CHANGELOG.md#v170-2022-11-09)
+ * **Feature**: This will release hi-IN and th-TH
+
+# Release (2022-11-08)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.16.0](service/acm/CHANGELOG.md#v1160-2022-11-08)
+ * **Feature**: Support added for requesting elliptic curve certificate key algorithm types P-256 (EC_prime256v1) and P-384 (EC_secp384r1).
+* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.1.0](service/billingconductor/CHANGELOG.md#v110-2022-11-08)
+ * **Feature**: This release adds the Recurring Custom Line Item feature along with a new API ListCustomLineItemVersions.
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.68.0](service/ec2/CHANGELOG.md#v1680-2022-11-08)
+ * **Feature**: This release enables sharing of EC2 Placement Groups across accounts and within AWS Organizations using Resource Access Manager
+* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.20.0](service/fms/CHANGELOG.md#v1200-2022-11-08)
+ * **Feature**: AWS Firewall Manager now supports importing existing AWS Network Firewall firewalls into Firewall Manager policies.
+* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.24.0](service/lightsail/CHANGELOG.md#v1240-2022-11-08)
+ * **Feature**: This release adds support for Amazon Lightsail to automate the delegation of domains registered through Amazon Route 53 to Lightsail DNS management and to automate record creation for DNS validation of Lightsail SSL/TLS certificates.
+* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.11.0](service/opensearch/CHANGELOG.md#v1110-2022-11-08)
+ * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet.
+* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.19.0](service/polly/CHANGELOG.md#v1190-2022-11-08)
+ * **Feature**: Amazon Polly adds new voices: Elin (sv-SE), Ida (nb-NO), Laura (nl-NL) and Suvi (fi-FI). They are available as neural voices only.
+* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.0](service/resourceexplorer2/CHANGELOG.md#v100-2022-11-08)
+ * **Release**: New AWS service client module
+ * **Feature**: This is the initial SDK release for AWS Resource Explorer. AWS Resource Explorer lets your users search for and discover your AWS resources across the AWS Regions in your account.
+* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.23.0](service/route53/CHANGELOG.md#v1230-2022-11-08)
+ * **Feature**: Amazon Route 53 now supports the Europe (Zurich) Region (eu-central-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region.
+
+# Release (2022-11-07)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.19.0](service/athena/CHANGELOG.md#v1190-2022-11-07)
+ * **Feature**: Adds support for using Query Result Reuse
+* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.0](service/autoscaling/CHANGELOG.md#v1240-2022-11-07)
+ * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes.
+* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.20.0](service/cloudtrail/CHANGELOG.md#v1200-2022-11-07)
+ * **Feature**: This release includes support for configuring a delegated administrator to manage an AWS Organizations organization CloudTrail trails and event data stores, and AWS Key Management Service encryption of CloudTrail Lake event data stores.
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.67.0](service/ec2/CHANGELOG.md#v1670-2022-11-07)
+ * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes.
+* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.23.0](service/elasticache/CHANGELOG.md#v1230-2022-11-07)
+ * **Feature**: Added support for IPv6 and dual stack for Memcached and Redis clusters. Customers can now launch new Redis and Memcached clusters with IPv6 and dual stack networking support.
+* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.26.0](service/lexmodelsv2/CHANGELOG.md#v1260-2022-11-07)
+ * **Feature**: Amazon Lex now supports new APIs for viewing and editing Custom Vocabulary in bots.
+* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.27.0](service/mediaconvert/CHANGELOG.md#v1270-2022-11-07)
+ * **Feature**: The AWS Elemental MediaConvert SDK has added support for setting the SDR reference white point for HDR conversions and conversion of HDR10 to DolbyVision without mastering metadata.
+* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.32.0](service/ssm/CHANGELOG.md#v1320-2022-11-07)
+ * **Feature**: This release includes support for applying a CloudWatch alarm to multi account multi region Systems Manager Automation
+* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.1](service/wafv2/CHANGELOG.md#v1231-2022-11-07)
+ * **Documentation**: The geo match statement now adds labels for country and region. You can match requests at the region level by combining a geo match statement with label match statements.
+* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.17.0](service/wellarchitected/CHANGELOG.md#v1170-2022-11-07)
+ * **Feature**: This release adds support for integrations with AWS Trusted Advisor and AWS Service Catalog AppRegistry to improve workload discovery and speed up your workload reviews.
+* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.25.0](service/workspaces/CHANGELOG.md#v1250-2022-11-07)
+ * **Feature**: This release adds protocols attribute to workspaces properties data type. This enables customers to migrate workspaces from PC over IP (PCoIP) to WorkSpaces Streaming Protocol (WSP) using create and modify workspaces public APIs.
+
+# Release (2022-11-04)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.1](service/cloudwatchlogs/CHANGELOG.md#v1161-2022-11-04)
+ * **Documentation**: Doc-only update for bug fixes and support of export to buckets encrypted with SSE-KMS
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.66.0](service/ec2/CHANGELOG.md#v1660-2022-11-04)
+ * **Feature**: This release adds API support for the recipient of an AMI account share to remove shared AMI launch permissions.
+* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.15.0](service/emrcontainers/CHANGELOG.md#v1150-2022-11-04)
+ * **Feature**: Adding support for Job templates. Job templates allow you to create and store templates to configure Spark applications parameters. This helps you ensure consistent settings across applications by reusing and enforcing configuration overrides in data pipelines.
+* `github.com/aws/aws-sdk-go-v2/service/internal/eventstreamtesting`: [v1.0.37](service/internal/eventstreamtesting/CHANGELOG.md#v1037-2022-11-04)
+ * **Dependency Update**: update golang.org/x/net dependency to 0.1.0
+
+# Release (2022-11-03)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.10.0](service/memorydb/CHANGELOG.md#v1100-2022-11-03)
+ * **Feature**: Adding support for r6gd instances for MemoryDB Redis with data tiering. In a cluster with data tiering enabled, when available memory capacity is exhausted, the least recently used data is automatically tiered to solid state drives for cost-effective capacity scaling with minimal performance impact.
+* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.54.0](service/sagemaker/CHANGELOG.md#v1540-2022-11-03)
+ * **Feature**: Amazon SageMaker now supports running training jobs on ml.trn1 instance types.
+
+# Release (2022-11-02)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.26.0](service/iotsitewise/CHANGELOG.md#v1260-2022-11-02)
+ * **Feature**: This release adds the ListAssetModelProperties and ListAssetProperties APIs. You can list all properties that belong to a single asset model or asset using these two new APIs.
+* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.25.0](service/s3control/CHANGELOG.md#v1250-2022-11-02)
+ * **Feature**: S3 on Outposts launches support for Lifecycle configuration for Outposts buckets. With S3 Lifecycle configuration, you can mange objects so they are stored cost effectively. You can manage objects using size-based rules and specify how many noncurrent versions bucket will retain.
+* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.53.0](service/sagemaker/CHANGELOG.md#v1530-2022-11-02)
+ * **Feature**: This release updates Framework model regex for ModelPackage to support new Framework version xgboost, sklearn.
+* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.17.0](service/ssmincidents/CHANGELOG.md#v1170-2022-11-02)
+ * **Feature**: Adds support for tagging replication-set on creation.
+
+# Release (2022-11-01)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.28.0](service/rds/CHANGELOG.md#v1280-2022-11-01)
+ * **Feature**: Relational Database Service - This release adds support for configuring Storage Throughput on RDS database instances.
+* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.17.0](service/textract/CHANGELOG.md#v1170-2022-11-01)
+ * **Feature**: Add ocr results in AnalyzeIDResponse as blocks
+
+# Release (2022-10-31)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.15.0](service/apprunner/CHANGELOG.md#v1150-2022-10-31)
+ * **Feature**: This release adds support for private App Runner services. Services may now be configured to be made private and only accessible from a VPC. The changes include a new VpcIngressConnection resource and several new and modified APIs.
+* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.0](service/cloudwatchlogs/CHANGELOG.md#v1160-2022-10-31)
+ * **Feature**: SDK release to support tagging for destinations and log groups with TagResource. Also supports tag on create with PutDestination.
+* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.34.0](service/connect/CHANGELOG.md#v1340-2022-10-31)
+ * **Feature**: Amazon connect now support a new API DismissUserContact to dismiss or remove terminated contacts in Agent CCP
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.65.0](service/ec2/CHANGELOG.md#v1650-2022-10-31)
+ * **Feature**: Elastic IP transfer is a new Amazon VPC feature that allows you to transfer your Elastic IP addresses from one AWS Account to another.
+* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.30.0](service/iot/CHANGELOG.md#v1300-2022-10-31)
+ * **Feature**: This release adds the Amazon Location action to IoT Rules Engine.
+* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.15.0](service/sesv2/CHANGELOG.md#v1150-2022-10-31)
+ * **Feature**: This release includes support for interacting with the Virtual Deliverability Manager, allowing you to opt in/out of the feature and to retrieve recommendations and metric data.
+* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.16.0](service/textract/CHANGELOG.md#v1160-2022-10-31)
+ * **Feature**: This release introduces additional support for 30+ normalized fields such as vendor address and currency. It also includes OCR output in the response and accuracy improvements for the already supported fields in previous version
+
+# Release (2022-10-28)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.14.0](service/apprunner/CHANGELOG.md#v1140-2022-10-28)
+ * **Feature**: AWS App Runner adds .NET 6, Go 1, PHP 8.1 and Ruby 3.1 runtimes.
+* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.18.0](service/appstream/CHANGELOG.md#v1180-2022-10-28)
+ * **Feature**: This release includes CertificateBasedAuthProperties in CreateDirectoryConfig and UpdateDirectoryConfig.
+* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.16.20](service/cloud9/CHANGELOG.md#v11620-2022-10-28)
+ * **Documentation**: Update to the documentation section of the Cloud9 API Reference guide.
+* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.23.0](service/cloudformation/CHANGELOG.md#v1230-2022-10-28)
+ * **Feature**: This release adds more fields to improves visibility of AWS CloudFormation StackSets information in following APIs: ListStackInstances, DescribeStackInstance, ListStackSetOperationResults, ListStackSetOperations, DescribeStackSetOperation.
+* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.19.0](service/mediatailor/CHANGELOG.md#v1190-2022-10-28)
+ * **Feature**: This release introduces support for SCTE-35 segmentation descriptor messages which can be sent within time signal messages.
+
+# Release (2022-10-27)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.64.0](service/ec2/CHANGELOG.md#v1640-2022-10-27)
+ * **Feature**: Feature supports the replacement of instance root volume using an updated AMI without requiring customers to stop their instance.
+* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.19.0](service/fms/CHANGELOG.md#v1190-2022-10-27)
+ * **Feature**: Add support NetworkFirewall Managed Rule Group Override flag in GetViolationDetails API
+* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.0](service/glue/CHANGELOG.md#v1340-2022-10-27)
+ * **Feature**: Added support for custom datatypes when using custom csv classifier.
+* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.13](service/redshift/CHANGELOG.md#v12613-2022-10-27)
+ * **Documentation**: This release clarifies use for the ElasticIp parameter of the CreateCluster and RestoreFromClusterSnapshot APIs.
+* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.52.0](service/sagemaker/CHANGELOG.md#v1520-2022-10-27)
+ * **Feature**: This change allows customers to provide a custom entrypoint script for the docker container to be run while executing training jobs, and provide custom arguments to the entrypoint script.
+* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.0](service/wafv2/CHANGELOG.md#v1230-2022-10-27)
+ * **Feature**: This release adds the following: Challenge rule action, to silently verify client browsers; rule group rule action override to any valid rule action, not just Count; token sharing between protected applications for challenge/CAPTCHA token; targeted rules option for Bot Control managed rule group.
+
+# Release (2022-10-26)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.23](service/iam/CHANGELOG.md#v11823-2022-10-26)
+ * **Documentation**: Doc only update that corrects instances of CLI not using an entity.
+* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.18.0](service/kafka/CHANGELOG.md#v1180-2022-10-26)
+ * **Feature**: This release adds support for Tiered Storage. UpdateStorage allows you to control the Storage Mode for supported storage tiers.
+* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.18.0](service/neptune/CHANGELOG.md#v1180-2022-10-26)
+ * **Feature**: Added a new cluster-level attribute to set the capacity range for Neptune Serverless instances.
+* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.51.0](service/sagemaker/CHANGELOG.md#v1510-2022-10-26)
+ * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying Grid Search strategy for tuning jobs, which evaluates all hyperparameter combinations exhaustively based on the categorical hyperparameters provided.
+
+# Release (2022-10-25)
+
+## Module Highlights
+* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.17.0](service/accessanalyzer/CHANGELOG.md#v1170-2022-10-25)
+ * **Feature**: This release adds support for six new resource types in IAM Access Analyzer to help you easily identify public and cross-account access to your AWS resources. Updated service API, documentation, and paginators.
+* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.19.3](service/location/CHANGELOG.md#v1193-2022-10-25)
+ * **Documentation**: Added new map styles with satellite imagery for map resources using HERE as a data provider.
+* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.18.0](service/mediatailor/CHANGELOG.md#v1180-2022-10-25)
+ * **Feature**: This release is a documentation update
+* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.27.0](service/rds/CHANGELOG.md#v1270-2022-10-25)
+ * **Feature**: Relational Database Service - This release adds support for exporting DB cluster data to Amazon S3.
+* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.24.0](service/workspaces/CHANGELOG.md#v1240-2022-10-25)
+ * **Feature**: This release adds new enums for supporting Workspaces Core features, including creating Manual running mode workspaces, importing regular Workspaces Core images and importing g4dn Workspaces Core images.
+
# Release (2022-10-24)
## General Highlights
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile
index 4b761e771a..4bc9dfaf01 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/Makefile
+++ b/vendor/github.com/aws/aws-sdk-go-v2/Makefile
@@ -120,6 +120,7 @@ gen-config-asserts:
gen-internal-codegen:
@echo "Generating internal/codegen"
cd internal/codegen \
+ && go mod tidy \
&& go generate
gen-repo-mod-replace:
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
index 41d23512a4..6d936cd505 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
@@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.17.1"
+const goModuleVersion = "1.17.2"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md
index d81093cad4..c95d493ea8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.4.10 (2022-12-02)
+
+* No change notes available for this release.
+
# v1.4.9 (2022-10-24)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go
index 35adfcc20c..0ca5492a3e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go
@@ -3,4 +3,4 @@
package eventstream
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.4.9"
+const goModuleVersion = "1.4.10"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
index 0386bcf7f4..e02d957c4a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.18.4 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.18.3 (2022-11-22)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
index 1a1aaed58a..44b6e16dcd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
@@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.18.3"
+const goModuleVersion = "1.18.4"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
index 953ce67f3c..613d814926 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.13.4 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.13.3 (2022-11-22)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
index 0bcacb3963..9866ca36f8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
@@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.13.3"
+const goModuleVersion = "1.13.4"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
index 0dfb44be1a..f0ab4cd76d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.12.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.12.19 (2022-10-24)
* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
index 9fc713a7cb..4da2bd2c18 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
@@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.12.19"
+const goModuleVersion = "1.12.20"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
index 1602d22925..9f446c501c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.11.43 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.11.42 (2022-11-22)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
index e481cd689d..475e01773b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
@@ -3,4 +3,4 @@
package manager
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.11.42"
+const goModuleVersion = "1.11.43"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
index ab6184058b..41d589b381 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.1.26 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.1.25 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
index b9d5ca7fae..58b3ba7ad8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
@@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.1.25"
+const goModuleVersion = "1.1.26"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
index 90e3d662d0..678f6634f2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v2.4.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v2.4.19 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
index d839c6d9b6..ec010e0aae 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
@@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "2.4.19"
+const goModuleVersion = "2.4.20"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
index 2cac3297b3..fc5b9781b5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.3.27 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.3.26 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
index 6d796b3100..e4c947fecc 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
@@ -3,4 +3,4 @@
package ini
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.3.26"
+const goModuleVersion = "1.3.27"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
index cc8edf2eb5..bc55796348 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.0.17 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.0.16 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
index 2b1401a3fd..be1f79e20f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
@@ -3,4 +3,4 @@
package v4a
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.0.16"
+const goModuleVersion = "1.0.17"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml
index d869782145..b6d07cdd6d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml
+++ b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml
@@ -1,10 +1,10 @@
[dependencies]
"github.com/aws/aws-sdk-go" = "v1.44.28"
- "github.com/aws/smithy-go" = "v1.13.4"
+ "github.com/aws/smithy-go" = "v1.13.5"
"github.com/google/go-cmp" = "v0.5.8"
"github.com/jmespath/go-jmespath" = "v0.4.0"
- "golang.org/x/net" = "v0.0.0-20220127200216-cd36cc0744dd"
+ "golang.org/x/net" = "v0.1.0"
[modules]
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
index a92035e29b..b3998b28b2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.9.11 (2022-12-02)
+
+* No change notes available for this release.
+
# v1.9.10 (2022-10-24)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
index 036a0c08e5..f49fa9218d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
@@ -3,4 +3,4 @@
package acceptencoding
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.9.10"
+const goModuleVersion = "1.9.11"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
index 531f193805..27d70fe1fd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.1.21 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.1.20 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
index 0cf97a5652..c923037772 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go
@@ -3,4 +3,4 @@
package checksum
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.1.20"
+const goModuleVersion = "1.1.21"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
index 89832ca1d0..a2dfc457c1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.9.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.9.19 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
index c10027df60..3b99e9c4f6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
@@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.9.19"
+const goModuleVersion = "1.9.20"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
index 782b3a3adc..5a91105868 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.13.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.13.19 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
index f0495ea2eb..b6e0f39a15 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go
@@ -3,4 +3,4 @@
package s3shared
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.13.19"
+const goModuleVersion = "1.13.20"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
index 3fd73499d4..7ba549c467 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.29.5 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.29.4 (2022-11-22)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
index d2204ecd02..0dbd3f1be8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
@@ -3,4 +3,4 @@
package s3
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.29.4"
+const goModuleVersion = "1.29.5"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
index 2f8860d2f2..49b4e31d6b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.11.26 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.11.25 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
index e2de3ea315..cbfe45ee1a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
@@ -3,4 +3,4 @@
package sso
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.11.25"
+const goModuleVersion = "1.11.26"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
index 4245e8d9fd..b3b019177d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.13.9 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.13.8 (2022-10-24)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
index 9c79d16f41..a5a50c97fa 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
@@ -3,4 +3,4 @@
package ssooidc
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.13.8"
+const goModuleVersion = "1.13.9"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
index 6255c0bc5d..106016915f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
@@ -1,3 +1,7 @@
+# v1.17.6 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.17.5 (2022-11-22)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
index 9e6b85cc41..ae6f9e766d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
@@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.17.5"
+const goModuleVersion = "1.17.6"
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 0e53126722..6a8bac059f 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -927,6 +927,25 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "aoss": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"api.detective": service{
Defaults: endpointDefaults{
defaultKey{}: endpoint{
@@ -1456,6 +1475,26 @@ var awsPartition = partition{
},
},
},
+ "api.ecr-public": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{
+ Hostname: "api.ecr-public.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{
+ Hostname: "api.ecr-public.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
+ },
+ },
"api.elastic-inference": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -4940,6 +4979,17 @@ var awsPartition = partition{
},
},
},
+ "codecatalyst": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "aws-global",
+ }: endpoint{
+ Hostname: "codecatalyst.global.api.aws",
+ },
+ },
+ },
"codecommit": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -12088,22 +12138,6 @@ var awsPartition = partition{
endpointKey{
Region: "ca-central-1",
}: endpoint{},
- endpointKey{
- Region: "dataplane-ap-south-1",
- }: endpoint{
- Hostname: "greengrass-ats.iot.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "dataplane-us-east-2",
- }: endpoint{
- Hostname: "greengrass-ats.iot.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
endpointKey{
Region: "eu-central-1",
}: endpoint{},
@@ -18164,6 +18198,79 @@ var awsPartition = partition{
},
},
},
+ "pipes": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "af-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-northeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "ap-southeast-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "ca-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-north-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "eu-west-3",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-central-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "me-south-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "sa-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-east-2",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-1",
+ }: endpoint{},
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"polly": service{
Endpoints: serviceEndpoints{
endpointKey{
@@ -21690,6 +21797,13 @@ var awsPartition = partition{
}: endpoint{},
},
},
+ "sagemaker-geospatial": service{
+ Endpoints: serviceEndpoints{
+ endpointKey{
+ Region: "us-west-2",
+ }: endpoint{},
+ },
+ },
"savingsplans": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -28147,14 +28261,6 @@ var awscnPartition = partition{
endpointKey{
Region: "cn-north-1",
}: endpoint{},
- endpointKey{
- Region: "dataplane-cn-north-1",
- }: endpoint{
- Hostname: "greengrass.ats.iot.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
},
},
"guardduty": service{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index 41fe3cf0e3..f726e2dce8 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.44.149"
+const SDKVersion = "1.44.153"
diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md
index 41bbcfac3a..1e23bf95b3 100644
--- a/vendor/github.com/aws/smithy-go/CHANGELOG.md
+++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md
@@ -1,3 +1,7 @@
+# Release (2022-12-02)
+
+* No change notes available for this release.
+
# Release (2022-10-24)
## Module Highlights
diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md
index 789b378896..a4bb43fbe9 100644
--- a/vendor/github.com/aws/smithy-go/README.md
+++ b/vendor/github.com/aws/smithy-go/README.md
@@ -2,7 +2,7 @@
[](https://github.com/aws/smithy-go/actions/workflows/go.yml)[](https://github.com/aws/smithy-go/actions/workflows/codegen.yml)
-Smithy code generators for Go.
+[Smithy](https://smithy.io/) code generators for Go.
**WARNING: All interfaces are subject to change.**
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
index d6e1e41e16..f9200093e8 100644
--- a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
@@ -2,7 +2,7 @@
Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to
shape serializer function in which a xml.Value will be passed around.
-Resources followed: https://awslabs.github.io/smithy/1.0/spec/core/xml-traits.html#
+Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings
Member Element
diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go
index 4ed5881885..8eaac41e7a 100644
--- a/vendor/github.com/aws/smithy-go/go_module_metadata.go
+++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go
@@ -3,4 +3,4 @@
package smithy
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.13.4"
+const goModuleVersion = "1.13.5"
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 792b4a60b3..8bf0e5b781 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -3,8 +3,7 @@
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
@@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
func (*Digest) Sum64() uint64
```
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
## Compatibility
@@ -45,19 +47,20 @@ I recommend using the latest release of Go.
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
```
## Projects using this package
diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh
new file mode 100644
index 0000000000..94b9c44398
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/testall.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+set -eu -o pipefail
+
+# Small convenience script for running the tests with various combinations of
+# arch/tags. This assumes we're running on amd64 and have qemu available.
+
+go test ./...
+go test -tags purego ./...
+GOARCH=arm64 go test
+GOARCH=arm64 go test -tags purego
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index 15c835d541..a9e0d45c9d 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -16,19 +16,11 @@ const (
prime5 uint64 = 2870177450012600261
)
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
type Digest struct {
@@ -50,10 +42,10 @@ func New() *Digest {
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
+ d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
- d.v4 = -prime1v
+ d.v4 = -primes[0]
d.total = 0
d.n = 0
}
@@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
n = len(b)
d.total += uint64(n)
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
if d.n+n < 32 {
// This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
+ copy(memleft, b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
- copy(d.mem[d.n:], b)
+ c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
+ b = b[c:]
d.n = 0
}
@@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
h += d.total
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
- i++
}
h ^= h >> 33
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
index be8db5bf79..3e8b132579 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -1,215 +1,209 @@
+//go:build !appengine && gc && !purego
// +build !appengine
// +build gc
// +build !purego
#include "textflag.h"
-// Register allocation:
-// AX h
-// SI pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// DI prime4v
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (SI), R12 \
- ADDQ $8, SI \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ DI, acc
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), DI
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
// Load slice.
- MOVQ b_base+0(FP), SI
- MOVQ b_len+8(FP), DX
- LEAQ (SI)(DX*1), BX
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32.
- SUBQ $32, BX
+ SUBQ $32, end
// Check whether we have at least one block.
- CMPQ DX, $32
+ CMPQ n, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
- // Loop until SI > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
+ blockLoop()
- CMPQ SI, BX
- JLE blockLoop
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
JMP afterBlocks
noBlocks:
- MOVQ ·prime5v(SB), AX
+ MOVQ ·primes+32(SB), h
afterBlocks:
- ADDQ DX, AX
+ ADDQ n, h
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
- ADDQ $24, BX
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
- CMPQ SI, BX
- JG fourByte
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
-wordLoop:
- // Calculate k1.
- MOVQ (SI), R8
- ADDQ $8, SI
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
+ CMPQ p, end
+ JLE loop8
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ DI, AX
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
- CMPQ SI, BX
- JLE wordLoop
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
-fourByte:
- ADDQ $4, BX
- CMPQ SI, BX
- JG singles
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
- MOVL (SI), R8
- ADDQ $4, SI
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ SI, BX
+try1:
+ ADDQ $4, end
+ CMPQ p, end
JGE finalize
-singlesLoop:
- MOVBQZX (SI), R12
- ADDQ $1, SI
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ SI, BX
- JL singlesLoop
+ CMPQ p, end
+ JL loop1
finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
- MOVQ AX, ret+24(FP)
+ MOVQ h, ret+24(FP)
RET
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
// Load slice.
- MOVQ b_base+8(FP), SI
- MOVQ b_len+16(FP), DX
- LEAQ (SI)(DX*1), BX
- SUBQ $32, BX
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
// Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
+ blockLoop()
// Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
- // The number of bytes written is SI minus the old base pointer.
- SUBQ b_base+8(FP), SI
- MOVQ SI, ret+32(FP)
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
new file mode 100644
index 0000000000..7e3145a221
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
@@ -0,0 +1,183 @@
+//go:build !appengine && gc && !purego
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Registers:
+#define digest R1
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
+#define prime1 R7
+#define prime2 R8
+#define prime3 R9
+#define prime4 R10
+#define prime5 R11
+#define v1 R12
+#define v2 R13
+#define v3 R14
+#define v4 R15
+#define x1 R20
+#define x2 R21
+#define x3 R22
+#define x4 R23
+
+#define round(acc, x) \
+ MADD prime2, acc, x, acc \
+ ROR $64-31, acc \
+ MUL prime1, acc
+
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ MUL prime2, x \
+ ROR $64-31, x \
+ MUL prime1, x
+
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
+
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
+
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
+
+ ADD prime1, prime2, v1
+ MOVD prime2, v2
+ MOVD $0, v3
+ NEG prime1, v4
+
+ blockLoop()
+
+ ROR $64-1, v1, x1
+ ROR $64-7, v2, x2
+ ADD x1, x2
+ ROR $64-12, v3, x3
+ ROR $64-18, v4, x4
+ ADD x3, x4
+ ADD x2, x4, h
+
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
+
+afterLoop:
+ ADD n, h
+
+ TBZ $4, n, try8
+ LDP.P 16(p), (x1, x2)
+
+ round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+ round0(x2)
+ ROR $64-27, h
+ EOR x2 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try8:
+ TBZ $3, n, try4
+ MOVD.P 8(p), x1
+
+ round0(x1)
+ ROR $64-27, h
+ EOR x1 @> 64-27, h, h
+ MADD h, prime4, prime1, h
+
+try4:
+ TBZ $2, n, try2
+ MOVWU.P 4(p), x2
+
+ MUL prime1, x2
+ ROR $64-23, h
+ EOR x2 @> 64-23, h, h
+ MADD h, prime3, prime2, h
+
+try2:
+ TBZ $1, n, try1
+ MOVHU.P 2(p), x3
+ AND $255, x3, x1
+ LSR $8, x3, x2
+
+ MUL prime5, x1
+ ROR $64-11, h
+ EOR x1 @> 64-11, h, h
+ MUL prime1, h
+
+ MUL prime5, x2
+ ROR $64-11, h
+ EOR x2 @> 64-11, h, h
+ MUL prime1, h
+
+try1:
+ TBZ $0, n, finalize
+ MOVBU (p), x4
+
+ MUL prime5, x4
+ ROR $64-11, h
+ EOR x4 @> 64-11, h, h
+ MUL prime1, h
+
+finalize:
+ EOR h >> 33, h
+ MUL prime2, h
+ EOR h >> 29, h
+ MUL prime3, h
+ EOR h >> 32, h
+
+ MOVD h, ret+24(FP)
+ RET
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
+
+ // Load state. Assume v[1-4] are stored contiguously.
+ MOVD d+0(FP), digest
+ LDP 0(digest), (v1, v2)
+ LDP 16(digest), (v3, v4)
+
+ LDP b_base+8(FP), (p, n)
+
+ blockLoop()
+
+ // Store updated state.
+ STP (v1, v2), 0(digest)
+ STP (v3, v4), 16(digest)
+
+ BIC $31, n
+ MOVD n, ret+32(FP)
+ RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
similarity index 73%
rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index ad14b807f4..9216e0a40c 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -1,3 +1,5 @@
+//go:build (amd64 || arm64) && !appengine && gc && !purego
+// +build amd64 arm64
// +build !appengine
// +build gc
// +build !purego
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 4a5a821603..26df13bba4 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -1,4 +1,5 @@
-// +build !amd64 appengine !gc purego
+//go:build (!amd64 && !arm64) || appengine || !gc || purego
+// +build !amd64,!arm64 appengine !gc purego
package xxhash
@@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
var h uint64
if n >= 32 {
- v1 := prime1v + prime2
+ v1 := primes[0] + prime2
v2 := prime2
v3 := uint64(0)
- v4 := -prime1v
+ v4 := -primes[0]
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
h += uint64(n)
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index fc9bea7a31..e86f1b5fd8 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
// +build appengine
// This file contains the safe implementations of otherwise unsafe-using code.
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 376e0ca2e4..1c1638fd88 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
// +build !appengine
// This file encapsulates usage of unsafe.
@@ -11,7 +12,7 @@ import (
// In the future it's possible that compiler optimizations will make these
// XxxString functions unnecessary by realizing that calls such as
-// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
+// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
// If that happens, even if we keep these functions they can be replaced with
// the trivial safe code.
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
deleted file mode 100644
index 16686a6552..0000000000
--- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/empty/empty.proto
-
-package empty
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- emptypb "google.golang.org/protobuf/types/known/emptypb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/empty.proto.
-
-type Empty = emptypb.Empty
-
-var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{
- 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d,
- 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() }
-func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() {
- if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File
- file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
index 6e326888af..e57b96db90 100644
--- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
+++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go
@@ -624,9 +624,9 @@ func (it *histogramIterator) Err() error {
}
func (it *histogramIterator) Reset(b []byte) {
- // The first 2 bytes contain chunk headers.
+ // The first 3 bytes contain chunk headers.
// We skip that for actual samples.
- it.br = newBReader(b[2:])
+ it.br = newBReader(b[3:])
it.numTotal = binary.BigEndian.Uint16(b)
it.numRead = 0
diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go
index e7f79c5130..10198f4332 100644
--- a/vendor/github.com/urfave/cli/v2/app.go
+++ b/vendor/github.com/urfave/cli/v2/app.go
@@ -107,6 +107,8 @@ type App struct {
CustomAppHelpTemplate string
// SliceFlagSeparator is used to customize the separator for SliceFlag, the default is ","
SliceFlagSeparator string
+ // DisableSliceFlagSeparator is used to disable SliceFlagSeparator, the default is false
+ DisableSliceFlagSeparator bool
// Boolean to enable short-option handling so user can combine several
// single-character bool arguments into one
// i.e. foobar -o -v -> foobar -ov
@@ -264,6 +266,8 @@ func (a *App) Setup() {
if len(a.SliceFlagSeparator) != 0 {
defaultSliceFlagSeparator = a.SliceFlagSeparator
}
+
+ disableSliceFlagSeparator = a.DisableSliceFlagSeparator
}
func (a *App) newRootCommand() *Command {
diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go
index c5939d4ec8..b8a944d641 100644
--- a/vendor/github.com/urfave/cli/v2/command.go
+++ b/vendor/github.com/urfave/cli/v2/command.go
@@ -203,7 +203,7 @@ func (c *Command) Run(cCtx *Context, arguments ...string) (err error) {
cerr := cCtx.checkRequiredFlags(c.Flags)
if cerr != nil {
- _ = ShowSubcommandHelp(cCtx)
+ _ = helpCommand.Action(cCtx)
return cerr
}
diff --git a/vendor/github.com/urfave/cli/v2/flag.go b/vendor/github.com/urfave/cli/v2/flag.go
index b66a75da5e..5c0a8b7328 100644
--- a/vendor/github.com/urfave/cli/v2/flag.go
+++ b/vendor/github.com/urfave/cli/v2/flag.go
@@ -15,7 +15,10 @@ import (
const defaultPlaceholder = "value"
-var defaultSliceFlagSeparator = ","
+var (
+ defaultSliceFlagSeparator = ","
+ disableSliceFlagSeparator = false
+)
var (
slPfx = fmt.Sprintf("sl:::%d:::", time.Now().UTC().UnixNano())
@@ -380,5 +383,9 @@ func flagFromEnvOrFile(envVars []string, filePath string) (value string, fromWhe
}
func flagSplitMultiValues(val string) []string {
+ if disableSliceFlagSeparator {
+ return []string{val}
+ }
+
return strings.Split(val, defaultSliceFlagSeparator)
}
diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt
index b8dbf6ad0a..6afd244f25 100644
--- a/vendor/github.com/urfave/cli/v2/godoc-current.txt
+++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt
@@ -318,6 +318,8 @@ type App struct {
CustomAppHelpTemplate string
// SliceFlagSeparator is used to customize the separator for SliceFlag, the default is ","
SliceFlagSeparator string
+ // DisableSliceFlagSeparator is used to disable SliceFlagSeparator, the default is false
+ DisableSliceFlagSeparator bool
// Boolean to enable short-option handling so user can combine several
// single-character bool arguments into one
// i.e. foobar -o -v -> foobar -ov
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
index 4c037f1d8e..fd91e4162d 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
@@ -164,10 +164,10 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
var bw bodyWrapper
- // if request body is nil we don't want to mutate the body as it will affect
- // the identity of it in an unforeseeable way because we assert ReadCloser
- // fulfills a certain interface and it is indeed nil.
- if r.Body != nil {
+ // if request body is nil or NoBody, we don't want to mutate the body as it
+ // will affect the identity of it in an unforeseeable way because we assert
+ // ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
+ if r.Body != nil && r.Body != http.NoBody {
bw.ReadCloser = r.Body
bw.record = readRecordFunc
r.Body = &bw
@@ -180,7 +180,13 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
- rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators}
+ rww := &respWriterWrapper{
+ ResponseWriter: w,
+ record: writeRecordFunc,
+ ctx: ctx,
+ props: h.propagators,
+ statusCode: 200, // default status code in case the Handler doesn't write anything
+ }
// Wrap w to use our ResponseWriter methods while also exposing
// other interfaces that w may implement (http.CloseNotifier,
@@ -230,10 +236,9 @@ func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int,
if wrote > 0 {
attributes = append(attributes, WroteBytesKey.Int64(wrote))
}
- if statusCode > 0 {
- attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...)
- span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer))
- }
+ attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...)
+ span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindServer))
+
if werr != nil && werr != io.EOF {
attributes = append(attributes, WriteErrorKey.String(werr.Error()))
}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
index 6ef983af4c..822491db06 100644
--- a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
@@ -16,7 +16,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
// Version is the current release version of the otelhttp instrumentation.
func Version() string {
- return "0.36.4"
+ return "0.37.0"
// This string is updated by the pre_release.sh script during release
}
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index 253e3b35b5..0f099f5759 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -9,7 +9,6 @@ linters:
disable-all: true
# Specifically enable linters we want to use.
enable:
- - deadcode
- depguard
- errcheck
- godot
@@ -21,10 +20,8 @@ linters:
- misspell
- revive
- staticcheck
- - structcheck
- typecheck
- unused
- - varcheck
issues:
# Maximum issues count per one linter.
@@ -114,8 +111,9 @@ linters-settings:
- name: constant-logical-expr
disabled: false
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument
+ # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280
- name: context-as-argument
- disabled: false
+ disabled: true
arguments:
allowTypesBefore: "*testing.T"
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index faae85f292..9f130b8be1 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,66 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
+## [1.11.2/0.34.0] 2022-12-05
+
+### Added
+
+- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package.
+ This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387)
+- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter.
+ This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357)
+- OTLP exporters now recognize: (#3363)
+ - `OTEL_EXPORTER_OTLP_INSECURE`
+ - `OTEL_EXPORTER_OTLP_TRACES_INSECURE`
+ - `OTEL_EXPORTER_OTLP_METRICS_INSECURE`
+ - `OTEL_EXPORTER_OTLP_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE`
+- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459)
+- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`.
+ Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option.
+ The views registered with the `MeterProvider` apply to all `Reader`s. (#3387)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260)
+- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260)
+- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369)
+- Remove comparable requirement for `Reader`s. (#3387)
+- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389)
+- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
+- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
+- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
+- Reenabled Attribute Filters in the Metric SDK. (#3396)
+- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408)
+- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
+- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
+- Prevent duplicate Prometheus description, unit, and type. (#3469)
+- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489)
+
+## Removed
+
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated.
+ Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476)
+
## [1.11.1/0.33.0] 2022-10-19
### Added
@@ -2027,7 +2087,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.1...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.11.2...HEAD
+[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 07e31965c3..68cdfef7d9 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -17,7 +17,7 @@ TOOLS_MOD_DIR := ./internal/tools
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
-ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
+ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
GO = go
TIMEOUT = 60
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index 80a37bd6ff..34a4e548dd 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -142,6 +142,13 @@ func (v Value) AsBool() bool {
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
// BOOLSLICE.
func (v Value) AsBoolSlice() []bool {
+ if v.vtype != BOOLSLICE {
+ return nil
+ }
+ return v.asBoolSlice()
+}
+
+func (v Value) asBoolSlice() []bool {
return attribute.AsSlice[bool](v.slice)
}
@@ -154,6 +161,13 @@ func (v Value) AsInt64() int64 {
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
// INT64SLICE.
func (v Value) AsInt64Slice() []int64 {
+ if v.vtype != INT64SLICE {
+ return nil
+ }
+ return v.asInt64Slice()
+}
+
+func (v Value) asInt64Slice() []int64 {
return attribute.AsSlice[int64](v.slice)
}
@@ -166,6 +180,13 @@ func (v Value) AsFloat64() float64 {
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
// FLOAT64SLICE.
func (v Value) AsFloat64Slice() []float64 {
+ if v.vtype != FLOAT64SLICE {
+ return nil
+ }
+ return v.asFloat64Slice()
+}
+
+func (v Value) asFloat64Slice() []float64 {
return attribute.AsSlice[float64](v.slice)
}
@@ -178,6 +199,13 @@ func (v Value) AsString() string {
// AsStringSlice returns the []string value. Make sure that the Value's type is
// STRINGSLICE.
func (v Value) AsStringSlice() []string {
+ if v.vtype != STRINGSLICE {
+ return nil
+ }
+ return v.asStringSlice()
+}
+
+func (v Value) asStringSlice() []string {
return attribute.AsSlice[string](v.slice)
}
@@ -189,19 +217,19 @@ func (v Value) AsInterface() interface{} {
case BOOL:
return v.AsBool()
case BOOLSLICE:
- return v.AsBoolSlice()
+ return v.asBoolSlice()
case INT64:
return v.AsInt64()
case INT64SLICE:
- return v.AsInt64Slice()
+ return v.asInt64Slice()
case FLOAT64:
return v.AsFloat64()
case FLOAT64SLICE:
- return v.AsFloat64Slice()
+ return v.asFloat64Slice()
case STRING:
return v.stringly
case STRINGSLICE:
- return v.AsStringSlice()
+ return v.asStringSlice()
}
return unknownValueType{}
}
@@ -210,19 +238,19 @@ func (v Value) AsInterface() interface{} {
func (v Value) Emit() string {
switch v.Type() {
case BOOLSLICE:
- return fmt.Sprint(v.AsBoolSlice())
+ return fmt.Sprint(v.asBoolSlice())
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64SLICE:
- return fmt.Sprint(v.AsInt64Slice())
+ return fmt.Sprint(v.asInt64Slice())
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64SLICE:
- return fmt.Sprint(v.AsFloat64Slice())
+ return fmt.Sprint(v.asFloat64Slice())
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRINGSLICE:
- return fmt.Sprint(v.AsStringSlice())
+ return fmt.Sprint(v.asStringSlice())
case STRING:
return v.stringly
default:
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index 064a9279fd..587ebae4e3 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -23,10 +23,20 @@ import (
const (
// Unset is the default status code.
Unset Code = 0
+
// Error indicates the operation contains an error.
+ //
+ // NOTE: The error code in OTLP is 2.
+ // The value of this enum is only relevant to the internals
+ // of the Go SDK.
Error Code = 1
+
// Ok indicates operation has been validated by an Application developers
// or Operator to have completed successfully, or contain no error.
+ //
+ // NOTE: The Ok code in OTLP is 1.
+ // The value of this enum is only relevant to the internals
+ // of the Go SDK.
Ok Code = 2
maxCode = 3
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
index 5c8260ceb6..330a14c2f6 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go
@@ -22,6 +22,8 @@ import (
)
// InstrumentProvider provides access to individual instruments.
+//
+// Warning: methods may be added to this interface in minor releases.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
@@ -34,9 +36,11 @@ type InstrumentProvider interface {
}
// Counter is an instrument that records increasing values.
+//
+// Warning: methods may be added to this interface in minor releases.
type Counter interface {
- // Observe records the state of the instrument to be x. The value of x is
- // assumed to be the exact Counter value to record.
+ // Observe records the state of the instrument to be x. Implementations
+ // will assume x to be the cumulative sum of the count.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
@@ -47,9 +51,11 @@ type Counter interface {
}
// UpDownCounter is an instrument that records increasing or decreasing values.
+//
+// Warning: methods may be added to this interface in minor releases.
type UpDownCounter interface {
- // Observe records the state of the instrument to be x. The value of x is
- // assumed to be the exact UpDownCounter value to record.
+ // Observe records the state of the instrument to be x. Implementations
+ // will assume x to be the cumulative sum of the count.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
@@ -60,6 +66,8 @@ type UpDownCounter interface {
}
// Gauge is an instrument that records independent readings.
+//
+// Warning: methods may be added to this interface in minor releases.
type Gauge interface {
// Observe records the state of the instrument to be x.
//
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go
index b07409c793..4fce9963c3 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go
@@ -22,6 +22,8 @@ import (
)
// InstrumentProvider provides access to individual instruments.
+//
+// Warning: methods may be added to this interface in minor releases.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
@@ -34,9 +36,11 @@ type InstrumentProvider interface {
}
// Counter is an instrument that records increasing values.
+//
+// Warning: methods may be added to this interface in minor releases.
type Counter interface {
- // Observe records the state of the instrument to be x. The value of x is
- // assumed to be the exact Counter value to record.
+ // Observe records the state of the instrument to be x. Implementations
+ // will assume x to be the cumulative sum of the count.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
@@ -47,9 +51,11 @@ type Counter interface {
}
// UpDownCounter is an instrument that records increasing or decreasing values.
+//
+// Warning: methods may be added to this interface in minor releases.
type UpDownCounter interface {
- // Observe records the state of the instrument to be x. The value of x is
- // assumed to be the exact UpDownCounter value to record.
+ // Observe records the state of the instrument to be x. Implementations
+ // will assume x to be the cumulative sum of the count.
//
// It is only valid to call this within a callback. If called outside of the
// registered callback it should have no effect on the instrument, and an
@@ -60,6 +66,8 @@ type UpDownCounter interface {
}
// Gauge is an instrument that records independent readings.
+//
+// Warning: methods may be added to this interface in minor releases.
type Gauge interface {
// Observe records the state of the instrument to be x.
//
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go
index 435db1127b..2ec192f70d 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go
@@ -22,6 +22,8 @@ import (
)
// InstrumentProvider provides access to individual instruments.
+//
+// Warning: methods may be added to this interface in minor releases.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
@@ -32,6 +34,8 @@ type InstrumentProvider interface {
}
// Counter is an instrument that records increasing values.
+//
+// Warning: methods may be added to this interface in minor releases.
type Counter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
@@ -40,6 +44,8 @@ type Counter interface {
}
// UpDownCounter is an instrument that records increasing or decreasing values.
+//
+// Warning: methods may be added to this interface in minor releases.
type UpDownCounter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
@@ -48,6 +54,8 @@ type UpDownCounter interface {
}
// Histogram is an instrument that records a distribution of values.
+//
+// Warning: methods may be added to this interface in minor releases.
type Histogram interface {
// Record adds an additional value to the distribution.
Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue)
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go
index c77a467286..03b5d53e63 100644
--- a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go
@@ -22,6 +22,8 @@ import (
)
// InstrumentProvider provides access to individual instruments.
+//
+// Warning: methods may be added to this interface in minor releases.
type InstrumentProvider interface {
// Counter creates an instrument for recording increasing values.
Counter(name string, opts ...instrument.Option) (Counter, error)
@@ -32,6 +34,8 @@ type InstrumentProvider interface {
}
// Counter is an instrument that records increasing values.
+//
+// Warning: methods may be added to this interface in minor releases.
type Counter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
@@ -40,6 +44,8 @@ type Counter interface {
}
// UpDownCounter is an instrument that records increasing or decreasing values.
+//
+// Warning: methods may be added to this interface in minor releases.
type UpDownCounter interface {
// Add records a change to the counter.
Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
@@ -48,6 +54,8 @@ type UpDownCounter interface {
}
// Histogram is an instrument that records a distribution of values.
+//
+// Warning: methods may be added to this interface in minor releases.
type Histogram interface {
// Record adds an additional value to the distribution.
Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue)
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
index 21fc1c499f..23e6853afb 100644
--- a/vendor/go.opentelemetry.io/otel/metric/meter.go
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -26,6 +26,8 @@ import (
// MeterProvider provides access to named Meter instances, for instrumenting
// an application or library.
+//
+// Warning: methods may be added to this interface in minor releases.
type MeterProvider interface {
// Meter creates an instance of a `Meter` interface. The instrumentationName
// must be the name of the library providing instrumentation. This name may
@@ -36,6 +38,8 @@ type MeterProvider interface {
}
// Meter provides access to instrument instances for recording metrics.
+//
+// Warning: methods may be added to this interface in minor releases.
type Meter interface {
// AsyncInt64 is the namespace for the Asynchronous Integer instruments.
//
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
index 391417718f..ab0346f966 100644
--- a/vendor/go.opentelemetry.io/otel/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -17,7 +17,7 @@ Package trace provides an implementation of the tracing part of the
OpenTelemetry API.
To participate in distributed traces a Span needs to be created for the
-operation being performed as part of a traced workflow. It its simplest form:
+operation being performed as part of a traced workflow. In its simplest form:
var tracer trace.Tracer
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index 942e484f84..00b79bcc25 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.11.1"
+ return "1.11.2"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index a2905787a5..611879def4 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -14,7 +14,7 @@
module-sets:
stable-v1:
- version: v1.11.1
+ version: v1.11.2
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
@@ -34,7 +34,7 @@ module-sets:
- go.opentelemetry.io/otel/trace
- go.opentelemetry.io/otel/sdk
experimental-metrics:
- version: v0.33.0
+ version: v0.34.0
modules:
- go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
index 6b7928ef75..01bed6c681 100644
--- a/vendor/golang.org/x/exp/slices/slices.go
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -128,6 +128,12 @@ func Contains[E comparable](s []E, v E) bool {
return Index(s, v) >= 0
}
+// ContainsFunc reports whether at least one
+// element e of s satisfies f(e).
+func ContainsFunc[E any](s []E, f func(E) bool) bool {
+ return IndexFunc(s, f) >= 0
+}
+
// Insert inserts the values v... into s at index i,
// returning the modified slice.
// In the returned slice r, r[i] == v[0].
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
index 6886dc163c..46219da2b0 100644
--- a/vendor/golang.org/x/net/http2/hpack/encode.go
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -116,6 +116,11 @@ func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
e.dynTab.setMaxSize(v)
}
+// MaxDynamicTableSize returns the current dynamic header table size.
+func (e *Encoder) MaxDynamicTableSize() (v uint32) {
+ return e.dynTab.maxSize
+}
+
// SetMaxDynamicTableSizeLimit changes the maximum value that can be
// specified in SetMaxDynamicTableSize to v. By default, it is set to
// 4096, which is the same size of the default dynamic header table
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index d8a17aa9b0..e35a76c07b 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -98,6 +98,19 @@ type Server struct {
// the HTTP/2 spec's recommendations.
MaxConcurrentStreams uint32
+ // MaxDecoderHeaderTableSize optionally specifies the http2
+ // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
+ // informs the remote endpoint of the maximum size of the header compression
+ // table used to decode header blocks, in octets. If zero, the default value
+ // of 4096 is used.
+ MaxDecoderHeaderTableSize uint32
+
+ // MaxEncoderHeaderTableSize optionally specifies an upper limit for the
+ // header compression table used for encoding request headers. Received
+ // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
+ // the default value of 4096 is used.
+ MaxEncoderHeaderTableSize uint32
+
// MaxReadFrameSize optionally specifies the largest frame
// this server is willing to read. A valid value is between
// 16k and 16M, inclusive. If zero or otherwise invalid, a
@@ -170,6 +183,20 @@ func (s *Server) maxConcurrentStreams() uint32 {
return defaultMaxStreams
}
+func (s *Server) maxDecoderHeaderTableSize() uint32 {
+ if v := s.MaxDecoderHeaderTableSize; v > 0 {
+ return v
+ }
+ return initialHeaderTableSize
+}
+
+func (s *Server) maxEncoderHeaderTableSize() uint32 {
+ if v := s.MaxEncoderHeaderTableSize; v > 0 {
+ return v
+ }
+ return initialHeaderTableSize
+}
+
// maxQueuedControlFrames is the maximum number of control frames like
// SETTINGS, PING and RST_STREAM that will be queued for writing before
// the connection is closed to prevent memory exhaustion attacks.
@@ -394,7 +421,6 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
advMaxStreams: s.maxConcurrentStreams(),
initialStreamSendWindowSize: initialWindowSize,
maxFrameSize: initialMaxFrameSize,
- headerTableSize: initialHeaderTableSize,
serveG: newGoroutineLock(),
pushEnabled: true,
sawClientPreface: opts.SawClientPreface,
@@ -424,12 +450,13 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
sc.flow.add(initialWindowSize)
sc.inflow.add(initialWindowSize)
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+ sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
fr := NewFramer(sc.bw, c)
if s.CountError != nil {
fr.countError = s.CountError
}
- fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
fr.MaxHeaderListSize = sc.maxHeaderListSize()
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
sc.framer = fr
@@ -559,7 +586,6 @@ type serverConn struct {
streams map[uint32]*stream
initialStreamSendWindowSize int32
maxFrameSize int32
- headerTableSize uint32
peerMaxHeaderListSize uint32 // zero means unknown (default)
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
writingFrame bool // started writing a frame (on serve goroutine or separate)
@@ -864,6 +890,7 @@ func (sc *serverConn) serve() {
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
{SettingMaxConcurrentStreams, sc.advMaxStreams},
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
},
})
@@ -1661,7 +1688,6 @@ func (sc *serverConn) processSetting(s Setting) error {
}
switch s.ID {
case SettingHeaderTableSize:
- sc.headerTableSize = s.Val
sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
case SettingEnablePush:
sc.pushEnabled = s.Val != 0
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 46dda4dc31..30f706e6cb 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -118,6 +118,28 @@ type Transport struct {
// to mean no limit.
MaxHeaderListSize uint32
+ // MaxReadFrameSize is the http2 SETTINGS_MAX_FRAME_SIZE to send in the
+ // initial settings frame. It is the size in bytes of the largest frame
+ // payload that the sender is willing to receive. If 0, no setting is
+ // sent, and the value is provided by the peer, which should be 16384
+ // according to the spec:
+ // https://datatracker.ietf.org/doc/html/rfc7540#section-6.5.2.
+ // Values are bounded in the range 16k to 16M.
+ MaxReadFrameSize uint32
+
+ // MaxDecoderHeaderTableSize optionally specifies the http2
+ // SETTINGS_HEADER_TABLE_SIZE to send in the initial settings frame. It
+ // informs the remote endpoint of the maximum size of the header compression
+ // table used to decode header blocks, in octets. If zero, the default value
+ // of 4096 is used.
+ MaxDecoderHeaderTableSize uint32
+
+ // MaxEncoderHeaderTableSize optionally specifies an upper limit for the
+ // header compression table used for encoding request headers. Received
+ // SETTINGS_HEADER_TABLE_SIZE settings are capped at this limit. If zero,
+ // the default value of 4096 is used.
+ MaxEncoderHeaderTableSize uint32
+
// StrictMaxConcurrentStreams controls whether the server's
// SETTINGS_MAX_CONCURRENT_STREAMS should be respected
// globally. If false, new TCP connections are created to the
@@ -171,6 +193,19 @@ func (t *Transport) maxHeaderListSize() uint32 {
return t.MaxHeaderListSize
}
+func (t *Transport) maxFrameReadSize() uint32 {
+ if t.MaxReadFrameSize == 0 {
+ return 0 // use the default provided by the peer
+ }
+ if t.MaxReadFrameSize < minMaxFrameSize {
+ return minMaxFrameSize
+ }
+ if t.MaxReadFrameSize > maxFrameSize {
+ return maxFrameSize
+ }
+ return t.MaxReadFrameSize
+}
+
func (t *Transport) disableCompression() bool {
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
}
@@ -293,10 +328,11 @@ type ClientConn struct {
lastActive time.Time
lastIdle time.Time // time last idle
// Settings from peer: (also guarded by wmu)
- maxFrameSize uint32
- maxConcurrentStreams uint32
- peerMaxHeaderListSize uint64
- initialWindowSize uint32
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ peerMaxHeaderListSize uint64
+ peerMaxHeaderTableSize uint32
+ initialWindowSize uint32
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
@@ -681,6 +717,20 @@ func (t *Transport) expectContinueTimeout() time.Duration {
return t.t1.ExpectContinueTimeout
}
+func (t *Transport) maxDecoderHeaderTableSize() uint32 {
+ if v := t.MaxDecoderHeaderTableSize; v > 0 {
+ return v
+ }
+ return initialHeaderTableSize
+}
+
+func (t *Transport) maxEncoderHeaderTableSize() uint32 {
+ if v := t.MaxEncoderHeaderTableSize; v > 0 {
+ return v
+ }
+ return initialHeaderTableSize
+}
+
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return t.newClientConn(c, t.disableKeepAlives())
}
@@ -721,15 +771,19 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
})
cc.br = bufio.NewReader(c)
cc.fr = NewFramer(cc.bw, cc.br)
+ if t.maxFrameReadSize() != 0 {
+ cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
+ }
if t.CountError != nil {
cc.fr.countError = t.CountError
}
- cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ maxHeaderTableSize := t.maxDecoderHeaderTableSize()
+ cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
- // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
- // henc in response to SETTINGS frames?
cc.henc = hpack.NewEncoder(&cc.hbuf)
+ cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
+ cc.peerMaxHeaderTableSize = initialHeaderTableSize
if t.AllowHTTP {
cc.nextStreamID = 3
@@ -744,9 +798,15 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
{ID: SettingEnablePush, Val: 0},
{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
}
+ if max := t.maxFrameReadSize(); max != 0 {
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
+ }
if max := t.maxHeaderListSize(); max != 0 {
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
}
+ if maxHeaderTableSize != initialHeaderTableSize {
+ initialSettings = append(initialSettings, Setting{ID: SettingHeaderTableSize, Val: maxHeaderTableSize})
+ }
cc.bw.Write(clientPreface)
cc.fr.WriteSettings(initialSettings...)
@@ -2773,8 +2833,10 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
cc.cond.Broadcast()
cc.initialWindowSize = s.Val
+ case SettingHeaderTableSize:
+ cc.henc.SetMaxDynamicTableSize(s.Val)
+ cc.peerMaxHeaderTableSize = s.Val
default:
- // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
cc.vlogf("Unhandled Setting: %v", s)
}
return nil
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 7a6ba43a7e..a49853e9d3 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -367,6 +367,7 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode
//sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible
//sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo
+//sys GetLargePageMinimum() (size uintptr)
// Volume Management Functions
//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 96ba8559c3..ac60052e44 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -252,6 +252,7 @@ var (
procGetFileType = modkernel32.NewProc("GetFileType")
procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW")
procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW")
+ procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum")
procGetLastError = modkernel32.NewProc("GetLastError")
procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW")
procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives")
@@ -2180,6 +2181,12 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (
return
}
+func GetLargePageMinimum() (size uintptr) {
+ r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0)
+ size = uintptr(r0)
+ return
+}
+
func GetLastError() (lasterr error) {
r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0)
if r0 != 0 {
diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go
index 4c459c4b72..6a796e2214 100644
--- a/vendor/golang.org/x/text/unicode/bidi/trieval.go
+++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go
@@ -37,18 +37,6 @@ const (
unknownClass = ^Class(0)
)
-var controlToClass = map[rune]Class{
- 0x202D: LRO, // LeftToRightOverride,
- 0x202E: RLO, // RightToLeftOverride,
- 0x202A: LRE, // LeftToRightEmbedding,
- 0x202B: RLE, // RightToLeftEmbedding,
- 0x202C: PDF, // PopDirectionalFormat,
- 0x2066: LRI, // LeftToRightIsolate,
- 0x2067: RLI, // RightToLeftIsolate,
- 0x2068: FSI, // FirstStrongIsolate,
- 0x2069: PDI, // PopDirectionalIsolate,
-}
-
// A trie entry has the following bits:
// 7..5 XOR mask for brackets
// 4 1: Bracket open, 0: Bracket close
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 8f7c29f156..f0e0cf3cb1 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -83,7 +83,7 @@ func (lim *Limiter) Burst() int {
// TokensAt returns the number of tokens available at time t.
func (lim *Limiter) TokensAt(t time.Time) float64 {
lim.mu.Lock()
- _, _, tokens := lim.advance(t) // does not mutute lim
+ _, tokens := lim.advance(t) // does not mutate lim
lim.mu.Unlock()
return tokens
}
@@ -183,7 +183,7 @@ func (r *Reservation) CancelAt(t time.Time) {
return
}
// advance time to now
- t, _, tokens := r.lim.advance(t)
+ t, tokens := r.lim.advance(t)
// calculate new number of tokens
tokens += restoreTokens
if burst := float64(r.lim.burst); tokens > burst {
@@ -304,7 +304,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) {
lim.mu.Lock()
defer lim.mu.Unlock()
- t, _, tokens := lim.advance(t)
+ t, tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@@ -321,7 +321,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) {
lim.mu.Lock()
defer lim.mu.Unlock()
- t, _, tokens := lim.advance(t)
+ t, tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@@ -356,7 +356,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
}
}
- t, last, tokens := lim.advance(t)
+ t, tokens := lim.advance(t)
// Calculate the remaining number of tokens resulting from the request.
tokens -= float64(n)
@@ -379,15 +379,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
if ok {
r.tokens = n
r.timeToAct = t.Add(waitDuration)
- }
- // Update state
- if ok {
+ // Update state
lim.last = t
lim.tokens = tokens
lim.lastEvent = r.timeToAct
- } else {
- lim.last = last
}
return r
@@ -396,7 +392,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
// advance calculates and returns an updated state for lim resulting from the passage of time.
// lim is not changed.
// advance requires that lim.mu is held.
-func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, newTokens float64) {
+func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
last := lim.last
if t.Before(last) {
last = t
@@ -409,7 +405,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, new
if burst := float64(lim.burst); tokens > burst {
tokens = burst
}
- return t, last, tokens
+ return t, tokens
}
// durationFromTokens is a unit conversion function from the number of tokens to the duration
diff --git a/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go
new file mode 100644
index 0000000000..9fb745926a
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/iam/v1/alias.go
@@ -0,0 +1,208 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by aliasgen. DO NOT EDIT.
+
+// Package iam aliases all exported identifiers in package
+// "cloud.google.com/go/iam/apiv1/iampb".
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb.
+// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md
+// for more details.
+package iam
+
+import (
+ src "cloud.google.com/go/iam/apiv1/iampb"
+ grpc "google.golang.org/grpc"
+)
+
+// Deprecated: Please use consts in: cloud.google.com/go/iam/apiv1/iampb
+const (
+ AuditConfigDelta_ACTION_UNSPECIFIED = src.AuditConfigDelta_ACTION_UNSPECIFIED
+ AuditConfigDelta_ADD = src.AuditConfigDelta_ADD
+ AuditConfigDelta_REMOVE = src.AuditConfigDelta_REMOVE
+ AuditLogConfig_ADMIN_READ = src.AuditLogConfig_ADMIN_READ
+ AuditLogConfig_DATA_READ = src.AuditLogConfig_DATA_READ
+ AuditLogConfig_DATA_WRITE = src.AuditLogConfig_DATA_WRITE
+ AuditLogConfig_LOG_TYPE_UNSPECIFIED = src.AuditLogConfig_LOG_TYPE_UNSPECIFIED
+ BindingDelta_ACTION_UNSPECIFIED = src.BindingDelta_ACTION_UNSPECIFIED
+ BindingDelta_ADD = src.BindingDelta_ADD
+ BindingDelta_REMOVE = src.BindingDelta_REMOVE
+)
+
+// Deprecated: Please use vars in: cloud.google.com/go/iam/apiv1/iampb
+var (
+ AuditConfigDelta_Action_name = src.AuditConfigDelta_Action_name
+ AuditConfigDelta_Action_value = src.AuditConfigDelta_Action_value
+ AuditLogConfig_LogType_name = src.AuditLogConfig_LogType_name
+ AuditLogConfig_LogType_value = src.AuditLogConfig_LogType_value
+ BindingDelta_Action_name = src.BindingDelta_Action_name
+ BindingDelta_Action_value = src.BindingDelta_Action_value
+ File_google_iam_v1_iam_policy_proto = src.File_google_iam_v1_iam_policy_proto
+ File_google_iam_v1_options_proto = src.File_google_iam_v1_options_proto
+ File_google_iam_v1_policy_proto = src.File_google_iam_v1_policy_proto
+)
+
+// Specifies the audit configuration for a service. The configuration
+// determines which permission types are logged, and what identities, if any,
+// are exempted from logging. An AuditConfig must have one or more
+// AuditLogConfigs. If there are AuditConfigs for both `allServices` and a
+// specific service, the union of the two AuditConfigs is used for that
+// service: the log_types specified in each AuditConfig are enabled, and the
+// exempted_members in each AuditLogConfig are exempted. Example Policy with
+// multiple AuditConfigs: { "audit_configs": [ { "service": "allServices",
+// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
+// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" }, { "log_type":
+// "ADMIN_READ" } ] }, { "service": "sampleservice.googleapis.com",
+// "audit_log_configs": [ { "log_type": "DATA_READ" }, { "log_type":
+// "DATA_WRITE", "exempted_members": [ "user:aliya@example.com" ] } ] } ] } For
+// sampleservice, this policy enables DATA_READ, DATA_WRITE and ADMIN_READ
+// logging. It also exempts jose@example.com from DATA_READ logging, and
+// aliya@example.com from DATA_WRITE logging.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type AuditConfig = src.AuditConfig
+
+// One delta entry for AuditConfig. Each individual change (only one
+// exempted_member in each entry) to a AuditConfig will be a separate entry.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type AuditConfigDelta = src.AuditConfigDelta
+
+// The type of action performed on an audit configuration in a policy.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type AuditConfigDelta_Action = src.AuditConfigDelta_Action
+
+// Provides the configuration for logging a type of permissions. Example: {
+// "audit_log_configs": [ { "log_type": "DATA_READ", "exempted_members": [
+// "user:jose@example.com" ] }, { "log_type": "DATA_WRITE" } ] } This enables
+// 'DATA_READ' and 'DATA_WRITE' logging, while exempting jose@example.com from
+// DATA_READ logging.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type AuditLogConfig = src.AuditLogConfig
+
+// The list of valid permission types for which logging can be configured.
+// Admin writes are always logged, and are not configurable.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type AuditLogConfig_LogType = src.AuditLogConfig_LogType
+
+// Associates `members`, or principals, with a `role`.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type Binding = src.Binding
+
+// One delta entry for Binding. Each individual change (only one member in
+// each entry) to a binding will be a separate entry.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type BindingDelta = src.BindingDelta
+
+// The type of action performed on a Binding in a policy.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type BindingDelta_Action = src.BindingDelta_Action
+
+// Request message for `GetIamPolicy` method.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type GetIamPolicyRequest = src.GetIamPolicyRequest
+
+// Encapsulates settings provided to GetIamPolicy.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type GetPolicyOptions = src.GetPolicyOptions
+
+// IAMPolicyClient is the client API for IAMPolicy service. For semantics
+// around ctx use and closing/ending streaming RPCs, please refer to
+// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type IAMPolicyClient = src.IAMPolicyClient
+
+// IAMPolicyServer is the server API for IAMPolicy service.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type IAMPolicyServer = src.IAMPolicyServer
+
+// An Identity and Access Management (IAM) policy, which specifies access
+// controls for Google Cloud resources. A `Policy` is a collection of
+// `bindings`. A `binding` binds one or more `members`, or principals, to a
+// single `role`. Principals can be user accounts, service accounts, Google
+// groups, and domains (such as G Suite). A `role` is a named list of
+// permissions; each `role` can be an IAM predefined role or a user-created
+// custom role. For some types of Google Cloud resources, a `binding` can also
+// specify a `condition`, which is a logical expression that allows access to a
+// resource only if the expression evaluates to `true`. A condition can add
+// constraints based on attributes of the request, the resource, or both. To
+// learn which resources support conditions in their IAM policies, see the [IAM
+// documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
+// **JSON example:** { "bindings": [ { "role":
+// "roles/resourcemanager.organizationAdmin", "members": [
+// "user:mike@example.com", "group:admins@example.com", "domain:google.com",
+// "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role":
+// "roles/resourcemanager.organizationViewer", "members": [
+// "user:eve@example.com" ], "condition": { "title": "expirable access",
+// "description": "Does not grant access after Sep 2020", "expression":
+// "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag":
+// "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: -
+// user:mike@example.com - group:admins@example.com - domain:google.com -
+// serviceAccount:my-project-id@appspot.gserviceaccount.com role:
+// roles/resourcemanager.organizationAdmin - members: - user:eve@example.com
+// role: roles/resourcemanager.organizationViewer condition: title: expirable
+// access description: Does not grant access after Sep 2020 expression:
+// request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA=
+// version: 3 For a description of IAM and its features, see the [IAM
+// documentation](https://cloud.google.com/iam/docs/).
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type Policy = src.Policy
+
+// The difference delta between two policies.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type PolicyDelta = src.PolicyDelta
+
+// Request message for `SetIamPolicy` method.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type SetIamPolicyRequest = src.SetIamPolicyRequest
+
+// Request message for `TestIamPermissions` method.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type TestIamPermissionsRequest = src.TestIamPermissionsRequest
+
+// Response message for `TestIamPermissions` method.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type TestIamPermissionsResponse = src.TestIamPermissionsResponse
+
+// UnimplementedIAMPolicyServer can be embedded to have forward compatible
+// implementations.
+//
+// Deprecated: Please use types in: cloud.google.com/go/iam/apiv1/iampb
+type UnimplementedIAMPolicyServer = src.UnimplementedIAMPolicyServer
+
+// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb
+func NewIAMPolicyClient(cc grpc.ClientConnInterface) IAMPolicyClient {
+ return src.NewIAMPolicyClient(cc)
+}
+
+// Deprecated: Please use funcs in: cloud.google.com/go/iam/apiv1/iampb
+func RegisterIAMPolicyServer(s *grpc.Server, srv IAMPolicyServer) {
+ src.RegisterIAMPolicyServer(s, srv)
+}
diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
deleted file mode 100644
index d10ad66533..0000000000
--- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package field_mask aliases all exported identifiers in
-// package "google.golang.org/protobuf/types/known/fieldmaskpb".
-package field_mask
-
-import "google.golang.org/protobuf/types/known/fieldmaskpb"
-
-type FieldMask = fieldmaskpb.FieldMask
-
-var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 6938b6c223..f82248394d 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -4,16 +4,17 @@ cloud.google.com/go/internal
cloud.google.com/go/internal/optional
cloud.google.com/go/internal/trace
cloud.google.com/go/internal/version
-# cloud.google.com/go/compute v1.12.1
+# cloud.google.com/go/compute v1.14.0
## explicit; go 1.19
cloud.google.com/go/compute/internal
-# cloud.google.com/go/compute/metadata v0.2.1
+# cloud.google.com/go/compute/metadata v0.2.2
## explicit; go 1.19
cloud.google.com/go/compute/metadata
-# cloud.google.com/go/iam v0.7.0
+# cloud.google.com/go/iam v0.8.0
## explicit; go 1.19
cloud.google.com/go/iam
-# cloud.google.com/go/storage v1.28.0
+cloud.google.com/go/iam/apiv1/iampb
+# cloud.google.com/go/storage v1.28.1
## explicit; go 1.19
cloud.google.com/go/storage
cloud.google.com/go/storage/internal
@@ -69,7 +70,7 @@ github.com/VictoriaMetrics/fasthttp/stackless
# github.com/VictoriaMetrics/metrics v1.23.0
## explicit; go 1.15
github.com/VictoriaMetrics/metrics
-# github.com/VictoriaMetrics/metricsql v0.49.1
+# github.com/VictoriaMetrics/metricsql v0.50.0
## explicit; go 1.13
github.com/VictoriaMetrics/metricsql
github.com/VictoriaMetrics/metricsql/binaryop
@@ -79,7 +80,7 @@ github.com/VividCortex/ewma
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
## explicit; go 1.15
github.com/alecthomas/units
-# github.com/aws/aws-sdk-go v1.44.149
+# github.com/aws/aws-sdk-go v1.44.153
## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr
@@ -121,7 +122,7 @@ github.com/aws/aws-sdk-go/service/sso
github.com/aws/aws-sdk-go/service/sso/ssoiface
github.com/aws/aws-sdk-go/service/sts
github.com/aws/aws-sdk-go/service/sts/stsiface
-# github.com/aws/aws-sdk-go-v2 v1.17.1
+# github.com/aws/aws-sdk-go-v2 v1.17.2
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2
github.com/aws/aws-sdk-go-v2/aws
@@ -143,14 +144,14 @@ github.com/aws/aws-sdk-go-v2/internal/sdkio
github.com/aws/aws-sdk-go-v2/internal/strings
github.com/aws/aws-sdk-go-v2/internal/sync/singleflight
github.com/aws/aws-sdk-go-v2/internal/timeconv
-# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9
+# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi
-# github.com/aws/aws-sdk-go-v2/config v1.18.3
+# github.com/aws/aws-sdk-go-v2/config v1.18.4
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/config
-# github.com/aws/aws-sdk-go-v2/credentials v1.13.3
+# github.com/aws/aws-sdk-go-v2/credentials v1.13.4
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/credentials
github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
@@ -159,64 +160,64 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client
github.com/aws/aws-sdk-go-v2/credentials/processcreds
github.com/aws/aws-sdk-go-v2/credentials/ssocreds
github.com/aws/aws-sdk-go-v2/credentials/stscreds
-# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19
+# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/feature/ec2/imds
github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config
-# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42
+# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.43
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/feature/s3/manager
-# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25
+# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/internal/configsources
-# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19
+# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2
-# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26
+# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/internal/ini
-# github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16
+# github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.17
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/internal/v4a
github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto
github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4
-# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10
+# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding
-# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20
+# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.21
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/internal/checksum
-# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19
+# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
-# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19
+# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.20
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/internal/s3shared
github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn
github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config
-# github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4
+# github.com/aws/aws-sdk-go-v2/service/s3 v1.29.5
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/s3
github.com/aws/aws-sdk-go-v2/service/s3/internal/arn
github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations
github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints
github.com/aws/aws-sdk-go-v2/service/s3/types
-# github.com/aws/aws-sdk-go-v2/service/sso v1.11.25
+# github.com/aws/aws-sdk-go-v2/service/sso v1.11.26
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/sso
github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints
github.com/aws/aws-sdk-go-v2/service/sso/types
-# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8
+# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/ssooidc
github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints
github.com/aws/aws-sdk-go-v2/service/ssooidc/types
-# github.com/aws/aws-sdk-go-v2/service/sts v1.17.5
+# github.com/aws/aws-sdk-go-v2/service/sts v1.17.6
## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/service/sts
github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints
github.com/aws/aws-sdk-go-v2/service/sts/types
-# github.com/aws/smithy-go v1.13.4
+# github.com/aws/smithy-go v1.13.5
## explicit; go 1.15
github.com/aws/smithy-go
github.com/aws/smithy-go/auth/bearer
@@ -240,7 +241,7 @@ github.com/aws/smithy-go/waiter
# github.com/beorn7/perks v1.0.1
## explicit; go 1.11
github.com/beorn7/perks/quantile
-# github.com/cespare/xxhash/v2 v2.1.2
+# github.com/cespare/xxhash/v2 v2.2.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
# github.com/cheggaaa/pb/v3 v3.1.0
@@ -296,7 +297,6 @@ github.com/golang/protobuf/proto
github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
github.com/golang/protobuf/ptypes/duration
-github.com/golang/protobuf/ptypes/empty
github.com/golang/protobuf/ptypes/timestamp
# github.com/golang/snappy v0.0.4
## explicit
@@ -398,7 +398,7 @@ github.com/prometheus/common/sigv4
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
-# github.com/prometheus/prometheus v0.40.4
+# github.com/prometheus/prometheus v0.40.5
## explicit; go 1.18
github.com/prometheus/prometheus/config
github.com/prometheus/prometheus/discovery
@@ -443,7 +443,7 @@ github.com/russross/blackfriday/v2
## explicit; go 1.13
github.com/stretchr/testify/assert
github.com/stretchr/testify/require
-# github.com/urfave/cli/v2 v2.23.5
+# github.com/urfave/cli/v2 v2.23.6
## explicit; go 1.18
github.com/urfave/cli/v2
# github.com/valyala/bytebufferpool v1.0.0
@@ -490,10 +490,10 @@ go.opencensus.io/trace
go.opencensus.io/trace/internal
go.opencensus.io/trace/propagation
go.opencensus.io/trace/tracestate
-# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.36.4
+# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0
## explicit; go 1.18
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
-# go.opentelemetry.io/otel v1.11.1
+# go.opentelemetry.io/otel v1.11.2
## explicit; go 1.18
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@@ -506,7 +506,7 @@ go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/internal
go.opentelemetry.io/otel/semconv/v1.12.0
-# go.opentelemetry.io/otel/metric v0.33.0
+# go.opentelemetry.io/otel/metric v0.34.0
## explicit; go 1.18
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/global
@@ -517,7 +517,7 @@ go.opentelemetry.io/otel/metric/instrument/syncfloat64
go.opentelemetry.io/otel/metric/instrument/syncint64
go.opentelemetry.io/otel/metric/internal/global
go.opentelemetry.io/otel/metric/unit
-# go.opentelemetry.io/otel/trace v1.11.1
+# go.opentelemetry.io/otel/trace v1.11.2
## explicit; go 1.18
go.opentelemetry.io/otel/trace
# go.uber.org/atomic v1.10.0
@@ -527,11 +527,11 @@ go.uber.org/atomic
## explicit; go 1.18
go.uber.org/goleak
go.uber.org/goleak/internal/stack
-# golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9
+# golang.org/x/exp v0.0.0-20221205204356-47842c84f3db
## explicit; go 1.18
golang.org/x/exp/constraints
golang.org/x/exp/slices
-# golang.org/x/net v0.2.0
+# golang.org/x/net v0.3.0
## explicit; go 1.17
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
@@ -556,18 +556,18 @@ golang.org/x/oauth2/jwt
# golang.org/x/sync v0.1.0
## explicit
golang.org/x/sync/errgroup
-# golang.org/x/sys v0.2.0
+# golang.org/x/sys v0.3.0
## explicit; go 1.17
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
golang.org/x/sys/windows
-# golang.org/x/text v0.4.0
+# golang.org/x/text v0.5.0
## explicit; go 1.17
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.2.0
+# golang.org/x/time v0.3.0
## explicit
golang.org/x/time/rate
# golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2
@@ -607,7 +607,7 @@ google.golang.org/appengine/internal/socket
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/socket
google.golang.org/appengine/urlfetch
-# google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6
+# google.golang.org/genproto v0.0.0-20221205194025-8222ab48f5fc
## explicit; go 1.19
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
@@ -617,7 +617,6 @@ google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
google.golang.org/genproto/googleapis/type/date
google.golang.org/genproto/googleapis/type/expr
-google.golang.org/genproto/protobuf/field_mask
# google.golang.org/grpc v1.51.0
## explicit; go 1.17
google.golang.org/grpc