mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-02-19 15:30:17 +00:00
Merge branch 'master' into get-vllogs-tenants
This commit is contained in:
commit
64e2c017ff
313 changed files with 4222 additions and 1803 deletions
app
vlinsert/insertutils
vlogscli
vmselect/prometheus
vmui/packages/vmui/src
api
components
Chart/BarHitsChart
Configurators/GraphSettings
LogsConfigurators/GroupLogsConfigurators
Main
Table
Views/GraphView
hooks
pages
CustomPanel
ExploreLogs
PredefinedPanels/PredefinedPanel
QueryAnalyzer/QueryAnalyzerView
state/graph
utils/uplot
dashboards
victorialogs.jsonvictoriametrics-cluster.jsonvictoriametrics.json
vm
victorialogs.jsonvictoriametrics-cluster.jsonvictoriametrics.jsonvmagent.jsonvmalert.jsonvmauth.json
vmagent.jsonvmalert.jsonvmauth.jsondeployment/docker
docker-compose-cluster.ymldocker-compose-victorialogs.ymldocker-compose.yml
victorialogs
vmanomaly/vmanomaly-integration
docs
Articles.mdBestPractices.mdLTS-releases.mdQuick-Start.md
go.modgo.sumVictoriaLogs
anomaly-detection
changelog
enterprise.mdguides/grafana-vmgateway-openid-configuration
scrape_config_examples.mdvictoriametrics-cloud
victoriametrics-datasource.mdvmauth.mdlib
bufferedwriter
cgroup
filestream
fs
logstorage
storage
vendor/github.com/Azure/azure-sdk-for-go/sdk
azidentity
BREAKING_CHANGES.mdCHANGELOG.mdREADME.mdTOKEN_CACHING.MDTROUBLESHOOTING.mdazidentity.goazure_cli_credential.goazure_developer_cli_credential.gochained_token_credential.goci.ymlconfidential_client.godefault_azure_credential.godevice_code_credential.gointeractive_browser_credential.gomanaged_identity_client.gopublic_client.gotest-resources-post.ps1version.go
storage/azblob
|
@ -199,8 +199,8 @@ func (lmp *logMessageProcessor) AddRow(timestamp int64, fields, streamFields []l
|
|||
lmp.bytesIngestedTotal.Add(n)
|
||||
|
||||
if len(fields) > *MaxFieldsPerLine {
|
||||
rf := logstorage.RowFormatter(fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, rf)
|
||||
line := logstorage.MarshalFieldsToJSON(nil, fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, line)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
}
|
||||
|
|
|
@ -8,8 +8,10 @@ import (
|
|||
|
||||
var (
|
||||
// MaxLineSizeBytes is the maximum length of a single line for /insert/* handlers
|
||||
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers")
|
||||
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#what-length-a-log-record-is-expected-to-have")
|
||||
|
||||
// MaxFieldsPerLine is the maximum number of fields per line for /insert/* handlers
|
||||
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers")
|
||||
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#how-many-fields-a-single-log-entry-may-contain")
|
||||
)
|
||||
|
|
|
@ -270,7 +270,7 @@ func printCommandsHelp(w io.Writer) {
|
|||
\h - show this help
|
||||
\s - singleline json output mode
|
||||
\m - multiline json output mode
|
||||
\c - compact output
|
||||
\c - compact output mode
|
||||
\logfmt - logfmt output mode
|
||||
\wrap_long_lines - toggles wrapping long lines
|
||||
\tail <query> - live tail <query> results
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
@ -142,10 +143,13 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
|
|||
WriteFederate(bb, rs)
|
||||
return sw.maybeFlushBuffer(bb)
|
||||
})
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
err = sw.flush()
|
||||
}
|
||||
if err != nil && !netutil.IsTrivialNetworkError(err) {
|
||||
return fmt.Errorf("error during sending data to remote client: %w", err)
|
||||
}
|
||||
return sw.flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
var federateDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/federate"}`)
|
||||
|
@ -226,10 +230,13 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
|
|||
}()
|
||||
}
|
||||
err = <-doneCh
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
err = sw.flush()
|
||||
}
|
||||
if err != nil && !netutil.IsTrivialNetworkError(err) {
|
||||
return fmt.Errorf("error during sending the exported csv data to remote client: %w", err)
|
||||
}
|
||||
return sw.flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
var exportCSVDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export/csv"}`)
|
||||
|
@ -281,10 +288,13 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req
|
|||
bb.B = dst
|
||||
return sw.maybeFlushBuffer(bb)
|
||||
})
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
err = sw.flush()
|
||||
}
|
||||
if err != nil && !netutil.IsTrivialNetworkError(err) {
|
||||
return fmt.Errorf("error during sending native data to remote client: %w", err)
|
||||
}
|
||||
return sw.flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
var exportNativeDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export/native"}`)
|
||||
|
@ -441,16 +451,19 @@ func exportHandler(qt *querytracer.Tracer, w http.ResponseWriter, cp *commonPara
|
|||
}()
|
||||
}
|
||||
err := <-doneCh
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
err = sw.flush()
|
||||
}
|
||||
if err == nil {
|
||||
if format == "promapi" {
|
||||
WriteExportPromAPIFooter(bw, qt)
|
||||
}
|
||||
err = bw.Flush()
|
||||
}
|
||||
if err != nil && !netutil.IsTrivialNetworkError(err) {
|
||||
return fmt.Errorf("cannot send data to remote client: %w", err)
|
||||
}
|
||||
if err := sw.flush(); err != nil {
|
||||
return fmt.Errorf("cannot send data to remote client: %w", err)
|
||||
}
|
||||
if format == "promapi" {
|
||||
WriteExportPromAPIFooter(bw, qt)
|
||||
}
|
||||
return bw.Flush()
|
||||
return nil
|
||||
}
|
||||
|
||||
type exportBlock struct {
|
||||
|
|
|
@ -46,7 +46,7 @@ export interface Logs {
|
|||
export interface LogHits {
|
||||
timestamps: string[];
|
||||
values: number[];
|
||||
total?: number;
|
||||
total: number;
|
||||
fields: { [key: string]: string; };
|
||||
_isOther: boolean;
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ const BarHitsTooltip: FC<Props> = ({ data, focusDataIdx, uPlotInst }) => {
|
|||
</div>
|
||||
)}
|
||||
<div className="vm-chart-tooltip-header">
|
||||
<div className="vm-chart-tooltip-header__title">
|
||||
<div className="vm-chart-tooltip-header__title vm-bar-hits-tooltip__date">
|
||||
{tooltipData.timestamp}
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -24,4 +24,8 @@
|
|||
white-space: nowrap;
|
||||
}
|
||||
}
|
||||
|
||||
&__date {
|
||||
white-space: nowrap;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ const useBarHitsOptions = ({
|
|||
width: strokeWidth[graphOptions.graphStyle],
|
||||
spanGaps: true,
|
||||
stroke: color,
|
||||
fill: graphOptions.fill ? color + "80" : "",
|
||||
fill: graphOptions.fill ? color + (target?._isOther ? "" : "80") : "",
|
||||
paths: getSeriesPaths(graphOptions.graphStyle),
|
||||
};
|
||||
});
|
||||
|
|
|
@ -36,35 +36,40 @@ const AxesLimitsConfigurator: FC<AxesLimitsConfiguratorProps> = ({ yaxis, setYax
|
|||
"vm-axes-limits_mobile": isMobile
|
||||
})}
|
||||
>
|
||||
<Switch
|
||||
value={yaxis.limits.enable}
|
||||
onChange={toggleEnableLimits}
|
||||
label="Fix the limits for y-axis"
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
<div className="vm-axes-limits-list">
|
||||
{axes.map(axis => (
|
||||
<div
|
||||
className="vm-axes-limits-list__inputs"
|
||||
key={axis}
|
||||
>
|
||||
<TextField
|
||||
label={`Min ${axis}`}
|
||||
type="number"
|
||||
disabled={!yaxis.limits.enable}
|
||||
value={yaxis.limits.range[axis][0]}
|
||||
onChange={createHandlerOnchangeAxis(axis, 0)}
|
||||
/>
|
||||
<TextField
|
||||
label={`Max ${axis}`}
|
||||
type="number"
|
||||
disabled={!yaxis.limits.enable}
|
||||
value={yaxis.limits.range[axis][1]}
|
||||
onChange={createHandlerOnchangeAxis(axis, 1)}
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
<div className="vm-graph-settings-row">
|
||||
<span className="vm-graph-settings-row__label">Fixed Y-axis limits</span>
|
||||
<Switch
|
||||
value={yaxis.limits.enable}
|
||||
onChange={toggleEnableLimits}
|
||||
label={`${yaxis.limits.enable ? "Fixed" : "Auto"} limits`}
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
</div>
|
||||
{yaxis.limits.enable && (
|
||||
<div className="vm-axes-limits-list">
|
||||
{axes.map(axis => (
|
||||
<div
|
||||
className="vm-axes-limits-list__inputs"
|
||||
key={axis}
|
||||
>
|
||||
<TextField
|
||||
label={`Min ${axis}`}
|
||||
type="number"
|
||||
disabled={!yaxis.limits.enable}
|
||||
value={yaxis.limits.range[axis][0]}
|
||||
onChange={createHandlerOnchangeAxis(axis, 0)}
|
||||
/>
|
||||
<TextField
|
||||
label={`Max ${axis}`}
|
||||
type="number"
|
||||
disabled={!yaxis.limits.enable}
|
||||
value={yaxis.limits.range[axis][1]}
|
||||
onChange={createHandlerOnchangeAxis(axis, 1)}
|
||||
/>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>;
|
||||
};
|
||||
|
||||
|
|
|
@ -8,10 +8,14 @@ import "./style.scss";
|
|||
import Tooltip from "../../Main/Tooltip/Tooltip";
|
||||
import useBoolean from "../../../hooks/useBoolean";
|
||||
import LinesConfigurator from "./LinesConfigurator/LinesConfigurator";
|
||||
import GraphTypeSwitcher from "./GraphTypeSwitcher/GraphTypeSwitcher";
|
||||
import { MetricResult } from "../../../api/types";
|
||||
import { isHistogramData } from "../../../utils/metric";
|
||||
|
||||
const title = "Graph settings";
|
||||
|
||||
interface GraphSettingsProps {
|
||||
data: MetricResult[],
|
||||
yaxis: YaxisState,
|
||||
setYaxisLimits: (limits: AxisRange) => void,
|
||||
toggleEnableLimits: () => void,
|
||||
|
@ -19,11 +23,13 @@ interface GraphSettingsProps {
|
|||
value: boolean,
|
||||
onChange: (value: boolean) => void,
|
||||
},
|
||||
isHistogram?: boolean,
|
||||
}
|
||||
|
||||
const GraphSettings: FC<GraphSettingsProps> = ({ yaxis, setYaxisLimits, toggleEnableLimits, spanGaps }) => {
|
||||
const GraphSettings: FC<GraphSettingsProps> = ({ data, yaxis, setYaxisLimits, toggleEnableLimits, spanGaps }) => {
|
||||
const popperRef = useRef<HTMLDivElement>(null);
|
||||
const buttonRef = useRef<HTMLDivElement>(null);
|
||||
const displayHistogramMode = isHistogramData(data);
|
||||
|
||||
const {
|
||||
value: openPopper,
|
||||
|
@ -64,6 +70,7 @@ const GraphSettings: FC<GraphSettingsProps> = ({ yaxis, setYaxisLimits, toggleEn
|
|||
spanGaps={spanGaps.value}
|
||||
onChange={spanGaps.onChange}
|
||||
/>
|
||||
{displayHistogramMode && <GraphTypeSwitcher onChange={handleClose}/>}
|
||||
</div>
|
||||
</div>
|
||||
</Popper>
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
import React, { FC } from "preact/compat";
|
||||
import Switch from "../../../Main/Switch/Switch";
|
||||
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
|
||||
import { useSearchParams } from "react-router-dom";
|
||||
import { useChangeDisplayMode } from "./useChangeDisplayMode";
|
||||
|
||||
type Props = {
|
||||
onChange: () => void;
|
||||
}
|
||||
|
||||
const GraphTypeSwitcher: FC<Props> = ({ onChange }) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
const { handleChange } = useChangeDisplayMode();
|
||||
const [searchParams] = useSearchParams();
|
||||
|
||||
const value = !searchParams.get("display_mode");
|
||||
|
||||
const handleChangeMode = (val: boolean) => {
|
||||
handleChange(val, onChange);
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="vm-graph-settings-row">
|
||||
<span className="vm-graph-settings-row__label">Histogram mode</span>
|
||||
<Switch
|
||||
value={value}
|
||||
onChange={handleChangeMode}
|
||||
label={value ? "Enabled" : "Disabled"}
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default GraphTypeSwitcher;
|
|
@ -0,0 +1,16 @@
|
|||
import { useTimeDispatch } from "../../../../state/time/TimeStateContext";
|
||||
import { useSearchParams } from "react-router-dom";
|
||||
|
||||
export const useChangeDisplayMode = () => {
|
||||
const [searchParams, setSearchParams] = useSearchParams();
|
||||
const dispatch = useTimeDispatch();
|
||||
|
||||
const handleChange = (val: boolean, callback?: () => void) => {
|
||||
val ? searchParams.delete("display_mode") : searchParams.set("display_mode", "lines");
|
||||
setSearchParams(searchParams);
|
||||
dispatch({ type: "RUN_QUERY" });
|
||||
callback && callback();
|
||||
};
|
||||
|
||||
return { handleChange };
|
||||
};
|
|
@ -10,14 +10,17 @@ interface Props {
|
|||
const LinesConfigurator: FC<Props> = ({ spanGaps, onChange }) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
|
||||
return <div>
|
||||
<Switch
|
||||
value={spanGaps}
|
||||
onChange={onChange}
|
||||
label="Connect null values"
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
</div>;
|
||||
return (
|
||||
<div className="vm-graph-settings-row">
|
||||
<span className="vm-graph-settings-row__label">Connect null values</span>
|
||||
<Switch
|
||||
value={spanGaps}
|
||||
onChange={onChange}
|
||||
label={spanGaps ? "Enabled" : "Disabled"}
|
||||
fullWidth={isMobile}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default LinesConfigurator;
|
||||
|
|
|
@ -1,15 +1,31 @@
|
|||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-graph-settings {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: $padding-small;
|
||||
|
||||
&-popper {
|
||||
display: grid;
|
||||
gap: $padding-global;
|
||||
padding: 0 0 $padding-global;
|
||||
padding: $padding-small $padding-large $padding-large;
|
||||
min-width: 300px;
|
||||
|
||||
&__body {
|
||||
display: grid;
|
||||
gap: $padding-large;
|
||||
padding: 0 $padding-global;
|
||||
}
|
||||
}
|
||||
|
||||
&-row {
|
||||
display: grid;
|
||||
gap: $padding-small;
|
||||
grid-template-columns: minmax(150px, max-content) 1fr;
|
||||
|
||||
&__label {
|
||||
&:after{
|
||||
content: ":";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,13 +41,13 @@ const GroupLogsConfigurators: FC<Props> = ({ logs }) => {
|
|||
const noWrapLines = searchParams.get(NO_WRAP_LINES) === "true";
|
||||
const compactGroupHeader = searchParams.get(COMPACT_GROUP_HEADER) === "true";
|
||||
const displayFieldsString = searchParams.get(DISPLAY_FIELDS) || "";
|
||||
const displayFields = displayFieldsString ? displayFieldsString.split(",") : [];
|
||||
const displayFields = displayFieldsString ? displayFieldsString.split(",") : [LOGS_DISPLAY_FIELDS];
|
||||
|
||||
const [dateFormat, setDateFormat] = useState(searchParams.get(DATE_FORMAT) || LOGS_DATE_FORMAT);
|
||||
const [errorFormat, setErrorFormat] = useState("");
|
||||
|
||||
const isGroupChanged = groupBy !== LOGS_GROUP_BY;
|
||||
const isDisplayFieldsChanged = displayFields.length > 0;
|
||||
const isDisplayFieldsChanged = displayFields.length !== 1 || displayFields[0] !== LOGS_DISPLAY_FIELDS;
|
||||
const isTimeChanged = searchParams.get(DATE_FORMAT) !== LOGS_DATE_FORMAT;
|
||||
const hasChanges = [
|
||||
isGroupChanged,
|
||||
|
@ -58,9 +58,7 @@ const GroupLogsConfigurators: FC<Props> = ({ logs }) => {
|
|||
].some(Boolean);
|
||||
|
||||
const logsKeys = useMemo(() => {
|
||||
const excludeKeys = ["_msg", "_time"];
|
||||
const uniqKeys = Array.from(new Set(logs.map(l => Object.keys(l)).flat()));
|
||||
return uniqKeys.filter(k => !excludeKeys.includes(k));
|
||||
return Array.from(new Set(logs.map(l => Object.keys(l)).flat()));
|
||||
}, [logs]);
|
||||
|
||||
const {
|
||||
|
|
|
@ -4,13 +4,16 @@ import { useState } from "react";
|
|||
import Tooltip from "../Tooltip/Tooltip";
|
||||
import Button from "../Button/Button";
|
||||
import { CopyIcon } from "../Icons";
|
||||
import useCopyToClipboard from "../../../hooks/useCopyToClipboard";
|
||||
|
||||
enum CopyState { copy = "Copy", copied = "Copied" }
|
||||
|
||||
const CodeExample: FC<{code: string}> = ({ code }) => {
|
||||
const copyToClipboard = useCopyToClipboard();
|
||||
|
||||
const [tooltip, setTooltip] = useState(CopyState.copy);
|
||||
const handlerCopy = () => {
|
||||
navigator.clipboard.writeText(code);
|
||||
const handlerCopy = async () => {
|
||||
await copyToClipboard(code);
|
||||
setTooltip(CopyState.copied);
|
||||
};
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
border-radius: $border-radius-small;
|
||||
|
||||
&_open {
|
||||
z-index: 100;
|
||||
z-index: 101;
|
||||
opacity: 1;
|
||||
transform-origin: top center;
|
||||
animation: vm-slider 150ms cubic-bezier(0.280, 0.840, 0.420, 1.1);
|
||||
|
|
|
@ -5,6 +5,7 @@ import { getComparator, stableSort } from "./helpers";
|
|||
import Tooltip from "../Main/Tooltip/Tooltip";
|
||||
import Button from "../Main/Button/Button";
|
||||
import { useEffect } from "preact/compat";
|
||||
import useCopyToClipboard from "../../hooks/useCopyToClipboard";
|
||||
|
||||
type OrderDir = "asc" | "desc"
|
||||
|
||||
|
@ -22,6 +23,8 @@ interface TableProps<T> {
|
|||
}
|
||||
|
||||
const Table = <T extends object>({ rows, columns, defaultOrderBy, defaultOrderDir, copyToClipboard, paginationOffset }: TableProps<T>) => {
|
||||
const handleCopyToClipboard = useCopyToClipboard();
|
||||
|
||||
const [orderBy, setOrderBy] = useState<keyof T>(defaultOrderBy);
|
||||
const [orderDir, setOrderDir] = useState<OrderDir>(defaultOrderDir || "desc");
|
||||
const [copied, setCopied] = useState<number | null>(null);
|
||||
|
@ -42,7 +45,7 @@ const Table = <T extends object>({ rows, columns, defaultOrderBy, defaultOrderDi
|
|||
const createCopyHandler = (copyValue: string | number, rowIndex: number) => async () => {
|
||||
if (copied === rowIndex) return;
|
||||
try {
|
||||
await navigator.clipboard.writeText(String(copyValue));
|
||||
await handleCopyToClipboard(String(copyValue));
|
||||
setCopied(rowIndex);
|
||||
} catch (e) {
|
||||
console.error(e);
|
||||
|
|
|
@ -26,6 +26,7 @@ import useElementSize from "../../../hooks/useElementSize";
|
|||
import { ChartTooltipProps } from "../../Chart/ChartTooltip/ChartTooltip";
|
||||
import LegendAnomaly from "../../Chart/Line/LegendAnomaly/LegendAnomaly";
|
||||
import { groupByMultipleKeys } from "../../../utils/array";
|
||||
import { useGraphDispatch } from "../../../state/graph/GraphStateContext";
|
||||
|
||||
export interface GraphViewProps {
|
||||
data?: MetricResult[];
|
||||
|
@ -62,6 +63,8 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
isAnomalyView,
|
||||
spanGaps
|
||||
}) => {
|
||||
const graphDispatch = useGraphDispatch();
|
||||
|
||||
const { isMobile } = useDeviceDetect();
|
||||
const { timezone } = useTimeState();
|
||||
const currentStep = useMemo(() => customStep || period.step || "1s", [period.step, customStep]);
|
||||
|
@ -196,6 +199,26 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
|
||||
const [containerRef, containerSize] = useElementSize();
|
||||
|
||||
const hasTimeData = dataChart[0]?.length > 0;
|
||||
|
||||
useEffect(() => {
|
||||
const checkEmptyHistogram = () => {
|
||||
if (!isHistogram || !data[1]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
const values = (dataChart?.[1]?.[2] || []) as (number | null)[];
|
||||
return values.every(v => v === null);
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
const isEmpty = checkEmptyHistogram();
|
||||
graphDispatch({ type: "SET_IS_EMPTY_HISTOGRAM", payload: isEmpty });
|
||||
}, [dataChart, isHistogram]);
|
||||
|
||||
return (
|
||||
<div
|
||||
className={classNames({
|
||||
|
@ -205,7 +228,7 @@ const GraphView: FC<GraphViewProps> = ({
|
|||
})}
|
||||
ref={containerRef}
|
||||
>
|
||||
{!isHistogram && (
|
||||
{!isHistogram && hasTimeData && (
|
||||
<LineChart
|
||||
data={dataChart}
|
||||
series={series}
|
||||
|
|
|
@ -13,6 +13,7 @@ import { isHistogramData } from "../utils/metric";
|
|||
import { useGraphState } from "../state/graph/GraphStateContext";
|
||||
import { getStepFromDuration } from "../utils/time";
|
||||
import { AppType } from "../types/appType";
|
||||
import { getQueryStringValue } from "../utils/query-string";
|
||||
|
||||
interface FetchQueryParams {
|
||||
predefinedQuery?: string[]
|
||||
|
@ -132,7 +133,8 @@ export const useFetchQuery = ({
|
|||
tempTraces.push(trace);
|
||||
}
|
||||
|
||||
isHistogramResult = !isAnomalyUI && isDisplayChart && isHistogramData(resp.data.result);
|
||||
const preventChangeType = !!getQueryStringValue("display_mode", null);
|
||||
isHistogramResult = !isAnomalyUI && isDisplayChart && !preventChangeType && isHistogramData(resp.data.result);
|
||||
seriesLimit = isHistogramResult ? Infinity : defaultLimit;
|
||||
const freeTempSize = seriesLimit - tempData.length;
|
||||
resp.data.result.slice(0, freeTempSize).forEach((d: MetricBase) => {
|
||||
|
|
|
@ -47,7 +47,9 @@ const GraphTab: FC<Props> = ({ isHistogram, graphData, controlsRef, isAnomalyVie
|
|||
<div className="vm-custom-panel-body-header__graph-controls">
|
||||
<GraphTips/>
|
||||
<GraphSettings
|
||||
data={graphData}
|
||||
yaxis={yaxis}
|
||||
isHistogram={isHistogram}
|
||||
setYaxisLimits={setYaxisLimits}
|
||||
toggleEnableLimits={toggleEnableLimits}
|
||||
spanGaps={{ value: spanGaps, onChange: setSpanGaps }}
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
import React, { FC } from "preact/compat";
|
||||
import Alert from "../../../components/Main/Alert/Alert";
|
||||
import { useGraphState } from "../../../state/graph/GraphStateContext";
|
||||
import {
|
||||
useChangeDisplayMode
|
||||
} from "../../../components/Configurators/GraphSettings/GraphTypeSwitcher/useChangeDisplayMode";
|
||||
import Button from "../../../components/Main/Button/Button";
|
||||
import "./style.scss";
|
||||
|
||||
const WarningHeatmapToLine:FC = () => {
|
||||
const { isEmptyHistogram } = useGraphState();
|
||||
const { handleChange } = useChangeDisplayMode();
|
||||
|
||||
if (!isEmptyHistogram) return null;
|
||||
|
||||
return (
|
||||
<Alert variant="warning">
|
||||
<div className="vm-warning-heatmap-to-line">
|
||||
<p className="vm-warning-heatmap-to-line__text">
|
||||
The expression cannot be displayed as a heatmap.
|
||||
To make the graph work, disable the heatmap in the "Graph settings" or modify the expression.
|
||||
</p>
|
||||
|
||||
<Button
|
||||
size="small"
|
||||
color="primary"
|
||||
variant="text"
|
||||
onClick={() => handleChange(false)}
|
||||
>
|
||||
Switch to line chart
|
||||
</Button>
|
||||
</div>
|
||||
</Alert>
|
||||
);
|
||||
};
|
||||
|
||||
export default WarningHeatmapToLine;
|
|
@ -0,0 +1,7 @@
|
|||
@use "src/styles/variables" as *;
|
||||
|
||||
.vm-warning-heatmap-to-line {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
}
|
|
@ -18,6 +18,7 @@ import WarningLimitSeries from "./WarningLimitSeries/WarningLimitSeries";
|
|||
import CustomPanelTabs from "./CustomPanelTabs";
|
||||
import { DisplayType } from "../../types";
|
||||
import DownloadReport from "./DownloadReport/DownloadReport";
|
||||
import WarningHeatmapToLine from "./WarningHeatmapToLine/WarningHeatmapToLine";
|
||||
|
||||
const CustomPanel: FC = () => {
|
||||
useSetQueryParams();
|
||||
|
@ -93,6 +94,7 @@ const CustomPanel: FC = () => {
|
|||
/>
|
||||
{showError && <Alert variant="error">{error}</Alert>}
|
||||
{showInstantQueryTip && <Alert variant="info"><InstantQueryTip/></Alert>}
|
||||
<WarningHeatmapToLine/>
|
||||
{warning && (
|
||||
<WarningLimitSeries
|
||||
warning={warning}
|
||||
|
|
|
@ -40,7 +40,7 @@ const GroupLogsItem: FC<Props> = ({ log, displayFields = ["_msg"] }) => {
|
|||
return marked(log._msg.replace(/```/g, "\n```\n")) as string;
|
||||
}, [log._msg, markdownParsing]);
|
||||
|
||||
const fields = useMemo(() => Object.entries(log).filter(([key]) => key !== "_msg"), [log]);
|
||||
const fields = useMemo(() => Object.entries(log), [log]);
|
||||
const hasFields = fields.length > 0;
|
||||
|
||||
const displayMessage = useMemo(() => {
|
||||
|
|
|
@ -66,7 +66,7 @@ export const useFetchLogHits = (server: string, query: string) => {
|
|||
setError(error);
|
||||
}
|
||||
|
||||
setLogHits(hits.map(hit => ({ ...hit, _isOther: isEmptyObject(hit.fields) })));
|
||||
setLogHits(hits.map(markIsOther).sort(sortHits));
|
||||
} catch (e) {
|
||||
if (e instanceof Error && e.name !== "AbortError") {
|
||||
setError(String(e));
|
||||
|
@ -85,3 +85,18 @@ export const useFetchLogHits = (server: string, query: string) => {
|
|||
abortController: abortControllerRef.current
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
// Helper function to check if a hit is "other"
|
||||
const markIsOther = (hit: LogHits) => ({
|
||||
...hit,
|
||||
_isOther: isEmptyObject(hit.fields)
|
||||
});
|
||||
|
||||
// Comparison function for sorting hits
|
||||
const sortHits = (a: LogHits, b: LogHits) => {
|
||||
if (a._isOther !== b._isOther) {
|
||||
return a._isOther ? -1 : 1; // "Other" hits first to avoid graph overlap
|
||||
}
|
||||
return b.total - a.total; // Sort remaining by total for better visibility
|
||||
};
|
||||
|
|
|
@ -119,6 +119,7 @@ const PredefinedPanel: FC<PredefinedPanelsProps> = ({
|
|||
{title || ""}
|
||||
</h3>
|
||||
<GraphSettings
|
||||
data={graphData || []}
|
||||
yaxis={yaxis}
|
||||
setYaxisLimits={setYaxisLimits}
|
||||
toggleEnableLimits={toggleEnableLimits}
|
||||
|
|
|
@ -20,6 +20,8 @@ import TableSettings from "../../../components/Table/TableSettings/TableSettings
|
|||
import { getColumns } from "../../../hooks/useSortedCategories";
|
||||
import { useCustomPanelDispatch, useCustomPanelState } from "../../../state/customPanel/CustomPanelStateContext";
|
||||
import TableView from "../../../components/Views/TableView/TableView";
|
||||
import { useSearchParams } from "react-router-dom";
|
||||
import WarningHeatmapToLine from "../../CustomPanel/WarningHeatmapToLine/WarningHeatmapToLine";
|
||||
|
||||
type Props = {
|
||||
data: DataAnalyzerType[];
|
||||
|
@ -28,6 +30,8 @@ type Props = {
|
|||
|
||||
const QueryAnalyzerView: FC<Props> = ({ data, period }) => {
|
||||
const { isMobile } = useDeviceDetect();
|
||||
const [searchParams, setSearchParams] = useSearchParams();
|
||||
|
||||
const { tableCompact } = useCustomPanelState();
|
||||
const customPanelDispatch = useCustomPanelDispatch();
|
||||
|
||||
|
@ -101,11 +105,16 @@ const QueryAnalyzerView: FC<Props> = ({ data, period }) => {
|
|||
setQueries(tempQueries);
|
||||
setGraphData(tempGraphData);
|
||||
setLiveData(tempLiveData);
|
||||
|
||||
// reset display mode
|
||||
searchParams.delete("display_mode");
|
||||
setSearchParams(searchParams);
|
||||
}, [data]);
|
||||
|
||||
useEffect(() => {
|
||||
setIsHistogram(!!graphData && isHistogramData(graphData));
|
||||
}, [graphData]);
|
||||
const noSpecificDisplayMode = !searchParams.get("display_mode");
|
||||
setIsHistogram(!!graphData && noSpecificDisplayMode && isHistogramData(graphData));
|
||||
}, [graphData, searchParams]);
|
||||
|
||||
return (
|
||||
<div
|
||||
|
@ -120,6 +129,7 @@ const QueryAnalyzerView: FC<Props> = ({ data, period }) => {
|
|||
onDeleteClick={handleTraceDelete}
|
||||
/>
|
||||
)}
|
||||
<WarningHeatmapToLine/>
|
||||
<div
|
||||
className={classNames({
|
||||
"vm-block": true,
|
||||
|
@ -138,7 +148,9 @@ const QueryAnalyzerView: FC<Props> = ({ data, period }) => {
|
|||
{displayType === "chart" && <GraphTips/>}
|
||||
{displayType === "chart" && (
|
||||
<GraphSettings
|
||||
data={graphData || []}
|
||||
yaxis={yaxis}
|
||||
isHistogram={isHistogram}
|
||||
setYaxisLimits={setYaxisLimits}
|
||||
toggleEnableLimits={toggleEnableLimits}
|
||||
spanGaps={{ value: spanGaps, onChange: setSpanGaps }}
|
||||
|
|
|
@ -15,6 +15,7 @@ export interface GraphState {
|
|||
customStep: string
|
||||
yaxis: YaxisState
|
||||
isHistogram: boolean
|
||||
isEmptyHistogram: boolean
|
||||
/** when true, null data values will not cause line breaks */
|
||||
spanGaps: boolean
|
||||
}
|
||||
|
@ -24,6 +25,7 @@ export type GraphAction =
|
|||
| { type: "SET_YAXIS_LIMITS", payload: AxisRange }
|
||||
| { type: "SET_CUSTOM_STEP", payload: string}
|
||||
| { type: "SET_IS_HISTOGRAM", payload: boolean }
|
||||
| { type: "SET_IS_EMPTY_HISTOGRAM", payload: boolean }
|
||||
| { type: "SET_SPAN_GAPS", payload: boolean }
|
||||
|
||||
export const initialGraphState: GraphState = {
|
||||
|
@ -32,6 +34,7 @@ export const initialGraphState: GraphState = {
|
|||
limits: { enable: false, range: { "1": [0, 0] } }
|
||||
},
|
||||
isHistogram: false,
|
||||
isEmptyHistogram: false,
|
||||
spanGaps: false,
|
||||
};
|
||||
|
||||
|
@ -69,6 +72,11 @@ export function reducer(state: GraphState, action: GraphAction): GraphState {
|
|||
...state,
|
||||
isHistogram: action.payload
|
||||
};
|
||||
case "SET_IS_EMPTY_HISTOGRAM":
|
||||
return {
|
||||
...state,
|
||||
isEmptyHistogram: action.payload
|
||||
};
|
||||
case "SET_SPAN_GAPS":
|
||||
return {
|
||||
...state,
|
||||
|
|
|
@ -153,7 +153,10 @@ export const normalizeData = (buckets: MetricResult[], isHistogram?: boolean): M
|
|||
const totalHitsPerTimestamp: { [timestamp: number]: number } = {};
|
||||
vmBuckets.forEach(bucket =>
|
||||
bucket.values.forEach(([timestamp, value]) => {
|
||||
totalHitsPerTimestamp[timestamp] = (totalHitsPerTimestamp[timestamp] || 0) + +value;
|
||||
const valueNum = Number(value);
|
||||
const number = isNaN(valueNum) ? 0 : valueNum;
|
||||
const prevTotal = totalHitsPerTimestamp[timestamp] || 0;
|
||||
totalHitsPerTimestamp[timestamp] = prevTotal + number;
|
||||
})
|
||||
);
|
||||
|
||||
|
|
|
@ -1686,7 +1686,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(instance)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(instance)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1933,7 +1933,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": true,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1981,7 +1981,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(instance)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(instance)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1687,7 +1687,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(instance)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(instance)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1934,7 +1934,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": true,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1982,7 +1982,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(instance)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(instance)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1666,7 +1666,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": true,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1473,7 +1473,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]) \n / \n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]) \n / \n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1361,7 +1361,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1665,7 +1665,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": true,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1472,7 +1472,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]) \n / \n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval]) \n / \n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -1360,7 +1360,7 @@
|
|||
},
|
||||
"editorMode": "code",
|
||||
"exemplar": false,
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n vm_available_cpu_cores{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"expr": "max(\n rate(process_cpu_seconds_total{job=~\"$job\", instance=~\"$instance\"}[$__rate_interval])\n /\n process_cpu_cores_available{job=~\"$job\", instance=~\"$instance\"}\n) by(job)",
|
||||
"format": "time_series",
|
||||
"interval": "",
|
||||
"intervalFactor": 1,
|
||||
|
|
|
@ -4,7 +4,7 @@ services:
|
|||
# And forward them to --remoteWrite.url
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.109.1
|
||||
image: victoriametrics/vmagent:v1.110.0
|
||||
depends_on:
|
||||
- "vminsert"
|
||||
ports:
|
||||
|
@ -39,7 +39,7 @@ services:
|
|||
# where N is number of vmstorages (2 in this case).
|
||||
vmstorage-1:
|
||||
container_name: vmstorage-1
|
||||
image: victoriametrics/vmstorage:v1.109.1-cluster
|
||||
image: victoriametrics/vmstorage:v1.110.0-cluster
|
||||
ports:
|
||||
- 8482
|
||||
- 8400
|
||||
|
@ -51,7 +51,7 @@ services:
|
|||
restart: always
|
||||
vmstorage-2:
|
||||
container_name: vmstorage-2
|
||||
image: victoriametrics/vmstorage:v1.109.1-cluster
|
||||
image: victoriametrics/vmstorage:v1.110.0-cluster
|
||||
ports:
|
||||
- 8482
|
||||
- 8400
|
||||
|
@ -66,7 +66,7 @@ services:
|
|||
# pre-process them and distributes across configured vmstorage shards.
|
||||
vminsert:
|
||||
container_name: vminsert
|
||||
image: victoriametrics/vminsert:v1.109.1-cluster
|
||||
image: victoriametrics/vminsert:v1.110.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
|
@ -81,7 +81,7 @@ services:
|
|||
# vmselect collects results from configured `--storageNode` shards.
|
||||
vmselect-1:
|
||||
container_name: vmselect-1
|
||||
image: victoriametrics/vmselect:v1.109.1-cluster
|
||||
image: victoriametrics/vmselect:v1.110.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
|
@ -94,7 +94,7 @@ services:
|
|||
restart: always
|
||||
vmselect-2:
|
||||
container_name: vmselect-2
|
||||
image: victoriametrics/vmselect:v1.109.1-cluster
|
||||
image: victoriametrics/vmselect:v1.110.0-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
|
@ -112,7 +112,7 @@ services:
|
|||
# It can be used as an authentication proxy.
|
||||
vmauth:
|
||||
container_name: vmauth
|
||||
image: victoriametrics/vmauth:v1.109.1
|
||||
image: victoriametrics/vmauth:v1.110.0
|
||||
depends_on:
|
||||
- "vmselect-1"
|
||||
- "vmselect-2"
|
||||
|
@ -127,7 +127,7 @@ services:
|
|||
# vmalert executes alerting and recording rules
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.109.1
|
||||
image: victoriametrics/vmalert:v1.110.0
|
||||
depends_on:
|
||||
- "vmauth"
|
||||
ports:
|
||||
|
|
|
@ -60,7 +60,7 @@ services:
|
|||
# scraping, storing metrics and serve read requests.
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.109.1
|
||||
image: victoriametrics/victoria-metrics:v1.110.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
|
@ -79,7 +79,7 @@ services:
|
|||
# depending on the requested path.
|
||||
vmauth:
|
||||
container_name: vmauth
|
||||
image: victoriametrics/vmauth:v1.109.1
|
||||
image: victoriametrics/vmauth:v1.110.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
- "victorialogs"
|
||||
|
@ -96,7 +96,7 @@ services:
|
|||
# vmalert executes alerting and recording rules according to given rule type.
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.109.1
|
||||
image: victoriametrics/vmalert:v1.110.0
|
||||
depends_on:
|
||||
- "vmauth"
|
||||
- "alertmanager"
|
||||
|
|
|
@ -4,7 +4,7 @@ services:
|
|||
# And forward them to --remoteWrite.url
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.109.1
|
||||
image: victoriametrics/vmagent:v1.110.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
@ -22,7 +22,7 @@ services:
|
|||
# storing metrics and serve read requests.
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.109.1
|
||||
image: victoriametrics/victoria-metrics:v1.110.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
- 8089:8089
|
||||
|
@ -65,7 +65,7 @@ services:
|
|||
# vmalert executes alerting and recording rules
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.109.1
|
||||
image: victoriametrics/vmalert:v1.110.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
- "alertmanager"
|
||||
|
|
|
@ -19,7 +19,7 @@ services:
|
|||
retries: 10
|
||||
|
||||
dd-proxy:
|
||||
image: docker.io/victoriametrics/vmauth:v1.109.1
|
||||
image: docker.io/victoriametrics/vmauth:v1.110.0
|
||||
restart: on-failure
|
||||
volumes:
|
||||
- ./:/etc/vmauth
|
||||
|
@ -45,7 +45,7 @@ services:
|
|||
replicas: 0
|
||||
|
||||
victoriametrics:
|
||||
image: victoriametrics/victoria-metrics:v1.109.1
|
||||
image: victoriametrics/victoria-metrics:v1.110.0
|
||||
ports:
|
||||
- '8428:8428'
|
||||
command:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
services:
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.109.1
|
||||
image: victoriametrics/vmagent:v1.110.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
@ -18,7 +18,7 @@ services:
|
|||
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.109.1
|
||||
image: victoriametrics/victoria-metrics:v1.110.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
|
@ -50,7 +50,7 @@ services:
|
|||
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.109.1
|
||||
image: victoriametrics/vmalert:v1.110.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
@ -72,7 +72,7 @@ services:
|
|||
restart: always
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.19.1
|
||||
image: victoriametrics/vmanomaly:v1.19.2
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
|
|
@ -86,6 +86,7 @@ See also [case studies](https://docs.victoriametrics.com/casestudies/).
|
|||
* [Persistent Data Structures in VictoriaMetrics (Part 2): vmselect](https://medium.com/@jiekun/persistent-data-structures-in-victoriametrics-part-2-vmselect-9e3de39a4d20)
|
||||
* [Migrating to VictoriaMetrics (by Zomato): A Complete Overhaul for Enhanced Observability](https://blog.zomato.com/migrating-to-victoriametrics-a-complete-overhaul-for-enhanced-observability)
|
||||
* [Harness the Power of VictoriaMetrics and Grafana Operators for Metrics Management](https://blog.ogenki.io/post/series/observability/metrics/)
|
||||
* [Reducing Inter-AZ traffic in VictoriaMetrics with Zonekeeper](https://tanmay-bhat.medium.com/reducing-inter-az-traffic-in-victoriametrics-with-zonekeeper-3bd7e1526796)
|
||||
|
||||
## Our articles
|
||||
|
||||
|
|
|
@ -48,6 +48,21 @@ VictoriaMetrics can run also on MacOS for testing and development purposes.
|
|||
* **MacOS**: amd64, arm64 (for testing and development purposes)
|
||||
* **Windows**: amd64
|
||||
|
||||
## Kubernetes
|
||||
|
||||
VictoriaMetrics natively supports deployment in Kubernetes via [helm charts](https://docs.victoriametrics.com/helm/)
|
||||
and [kubernetes operator](https://docs.victoriametrics.com/operator/). See how to [start using k8s operator](https://docs.victoriametrics.com/guides/getting-started-with-vm-operator/).
|
||||
|
||||
Common recommendations:
|
||||
1. Prefer setting [requests equal to limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits)
|
||||
for stateful components like [vmstorage](https://docs.victoriametrics.com/cluster-victoriametrics/#architecture-overview) to avoid unnecessary
|
||||
component restarts.
|
||||
|
||||
1. Avoid using [fractional CPU units](https://kubernetes.io/docs/tasks/configure-pod-container/assign-cpu-resource/#cpu-units)
|
||||
when setting resources for optimal performance. VictoriaMetrics is written in Go and its runtime requires specifying
|
||||
[integer number](https://pkg.go.dev/runtime#GOMAXPROCS) of concurrently running threads.
|
||||
When fractional CPU unit is specified, VictoriaMetrics will automatically round it down.
|
||||
|
||||
## Upgrade procedure
|
||||
|
||||
It is safe to upgrade VictoriaMetrics to new versions unless the [release notes](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) say otherwise.
|
||||
|
|
|
@ -22,5 +22,5 @@ to [the latest available releases](https://docs.victoriametrics.com/changelog/).
|
|||
|
||||
## Currently supported LTS release lines
|
||||
|
||||
- v1.102.x - the latest one is [v1.102.10 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.102.10)
|
||||
- v1.97.x - the latest one is [v1.97.15 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.97.15)
|
||||
- v1.102.x - the latest one is [v1.102.11 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.102.11)
|
||||
- v1.97.x - the latest one is [v1.97.16 LTS release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.97.16)
|
||||
|
|
|
@ -55,8 +55,8 @@ under the current directory:
|
|||
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/victoria-metrics:v1.109.1
|
||||
docker run -it --rm -v `pwd`/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 victoriametrics/victoria-metrics:v1.109.1
|
||||
docker pull victoriametrics/victoria-metrics:v1.110.0
|
||||
docker run -it --rm -v `pwd`/victoria-metrics-data:/victoria-metrics-data -p 8428:8428 victoriametrics/victoria-metrics:v1.110.0
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -18,6 +18,11 @@ according to [these docs](https://docs.victoriametrics.com/victorialogs/quicksta
|
|||
|
||||
* FEATURE: [`block_stats` pipe](https://docs.victoriametrics.com/victorialogs/logsql/#block_stats-pipe): return the path to the part where every data block is stored. The path to the part is returned in the `part_path` field. This allows investigating the distribution of data blocks among parts.
|
||||
* FEATURE: reduce VictoriaLogs startup time by multiple times when it opens a large datastore with big [retention](https://docs.victoriametrics.com/victorialogs/#retention).
|
||||
* FEATURE: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): add the `_msg` field to the list of fields for the group view, allowing users to select multiple fields, including `_msg`, for log display.
|
||||
|
||||
* BUGFIX: [data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/): drop log entries with too long field names and log the dropped log entries with the `ignoring log entry with too long field name` message, so human operators could notice and fix the ingestion of incorrect logs ASAP. Previously too long field names were silently truncated to shorter values. This isn't what most users expect. See [why VictoriaLogs has a limit on the field name length](https://docs.victoriametrics.com/victorialogs/faq/#what-is-the-maximum-supported-field-name-length).
|
||||
* BUGFIX: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): fix transparency for bars in the hits bar chart to improve visibility. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8152).
|
||||
* BUGFIX: [web UI](https://docs.victoriametrics.com/victorialogs/querying/#web-ui): fix `Group by field` dropdown menu not displaying any options in Group View settings. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8153).
|
||||
|
||||
## [v1.8.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.8.0-victorialogs)
|
||||
|
||||
|
|
|
@ -184,16 +184,36 @@ VictoriaLogs works optimally with log records of up to `10KB`. It works OK with
|
|||
log records of up to `100KB`. It works not so optimal with log records exceeding
|
||||
`100KB`.
|
||||
|
||||
The max size of a log record VictoriaLogs can handle is `2MB`. This is
|
||||
because VictoriaLogs stores log records in blocks and `2MB` is the max size of a
|
||||
block. Blocks of this size fit the L2 cache of a typical CPU, which gives an
|
||||
optimal processing performance.
|
||||
The max size of a log record VictoriaLogs can accept during [data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/)
|
||||
is `2MB`, because log records are stored in blocks of up to `2MB` size.
|
||||
Blocks of this size fit the L2 cache of a typical CPU, which gives an
|
||||
optimal performance during data ingestion and querying.
|
||||
|
||||
However, log records whose size is close to `2MB` aren't handled efficiently by
|
||||
Note that log records with sizes close to `2MB` aren't handled efficiently by
|
||||
VictoriaLogs because per-block overhead translates to a single log record, and
|
||||
this overhead is big.
|
||||
this overhead is big.
|
||||
|
||||
The `2MB` limit is hadrcoded and is unlikely to change.
|
||||
The `2MB` limit is hadrcoded and is unlikely to increase.
|
||||
|
||||
The limit can be set to the lower value during [data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/)
|
||||
via `-insert.maxLineSizeBytes` command-line flag.
|
||||
|
||||
## What is the maximum supported field name length
|
||||
|
||||
VictoriaLogs limits [log field](https://docs.victoriametrics.com/victorialogs/keyconcepts/#data-model) name length to 128 bytes -
|
||||
Log entries with longer field names are ignored during [date ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/).
|
||||
|
||||
The maximum length of a field name is hardcoded and is unikely to increase, since this may increase RAM and CPU usage.
|
||||
|
||||
## How many fields a single log entry may contain
|
||||
|
||||
A single log entry may contain up to 2000 fields. This fits well the majority of use cases for structured logs and
|
||||
for [wide events](https://jeremymorrell.dev/blog/a-practitioners-guide-to-wide-events/).
|
||||
|
||||
The maximum number of fields per log entry is hardcoded and is unlikely to increase, since this may increase RAM and CPU usage.
|
||||
|
||||
The limit can be set to the lower value during [data ingestion](https://docs.victoriametrics.com/victorialogs/data-ingestion/)
|
||||
via `-insert.maxFieldsPerLine` command-line flag.
|
||||
|
||||
## How to determine which log fields occupy the most of disk space?
|
||||
|
||||
|
|
|
@ -249,6 +249,37 @@ VictoriaLogs has very low overhead for per-tenant management, so it is OK to hav
|
|||
|
||||
VictoriaLogs doesn't perform per-tenant authorization. Use [vmauth](https://docs.victoriametrics.com/vmauth/) or similar tools for per-tenant authorization.
|
||||
|
||||
### Multitenancy access control
|
||||
|
||||
Enforce access control for tenants by using [vmauth](https://docs.victoriametrics.com/vmauth/). Access control can be configured for each tenant by setting up the following rules:
|
||||
|
||||
```yaml
|
||||
users:
|
||||
- username: "foo"
|
||||
password: "bar"
|
||||
url_map:
|
||||
- src_paths:
|
||||
- "/select/.*"
|
||||
- "/insert/.*"
|
||||
headers:
|
||||
- "AccountID: 1"
|
||||
- "ProjectID: 0"
|
||||
url_prefix:
|
||||
- "http://localhost:9428/"
|
||||
|
||||
- username: "baz"
|
||||
password: "bar"
|
||||
url_map:
|
||||
- src_paths: ["/select/.*"]
|
||||
headers:
|
||||
- "AccountID: 2"
|
||||
- "ProjectID: 0"
|
||||
url_prefix:
|
||||
- "http://localhost:9428/"
|
||||
```
|
||||
|
||||
This configuration allows `foo` to use the `/select/.*` and `/insert/.*` endpoints with `AccountID: 1` and `ProjectID: 0`, while `baz` can only use the `/select/.*` endpoint with `AccountID: 2` and `ProjectID: 0`.
|
||||
|
||||
## Security
|
||||
|
||||
It is expected that VictoriaLogs runs in a protected environment, which is unreachable from the Internet without proper authorization.
|
||||
|
|
|
@ -80,7 +80,7 @@ Please find the example of provisioning Grafana instance with VictoriaLogs datas
|
|||
grafana:
|
||||
image: grafana/grafana:11.0.0
|
||||
environment:
|
||||
- GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.13.1/victoriametrics-logs-datasource-v0.13.1.zip;victoriametrics-logs-datasource
|
||||
- GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.14.2/victoriametrics-logs-datasource-v0.14.2.zip;victoriametrics-logs-datasource
|
||||
- GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-logs-datasource
|
||||
ports:
|
||||
- 3000:3000/tcp
|
||||
|
@ -108,7 +108,7 @@ Option 1. Using Grafana provisioning:
|
|||
|
||||
``` yaml
|
||||
env:
|
||||
GF_INSTALL_PLUGINS: "https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.13.1/victoriametrics-logs-datasource-v0.13.1.zip;victoriametrics-logs-datasource"
|
||||
GF_INSTALL_PLUGINS: "https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.14.2/victoriametrics-logs-datasource-v0.14.2.zip;victoriametrics-logs-datasource"
|
||||
GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS: "victoriametrics-logs-datasource"
|
||||
```
|
||||
|
||||
|
@ -116,7 +116,7 @@ Option 2. Using Grafana plugins section in `values.yaml`:
|
|||
|
||||
``` yaml
|
||||
plugins:
|
||||
- https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.13.1/victoriametrics-logs-datasource-v0.13.1.zip;victoriametrics-logs-datasource
|
||||
- https://github.com/VictoriaMetrics/victorialogs-datasource/releases/download/v0.14.2/victoriametrics-logs-datasource-v0.14.2.zip;victoriametrics-logs-datasource
|
||||
```
|
||||
|
||||
Option 3. Using init container:
|
||||
|
|
|
@ -11,14 +11,29 @@ aliases:
|
|||
---
|
||||
Please find the changelog for VictoriaMetrics Anomaly Detection below.
|
||||
|
||||
## v1.19.2
|
||||
Released: 2025-01-27
|
||||
|
||||
- IMPROVEMENT: Added the `complete` option to the `--splitBy` argument in `config_splitter.py` [util](https://docs.victoriametrics.com/anomaly-detection/faq/index.html#splitting-the-config). This allows splitting a parent configuration into the smallest possible sub-configurations, each containing exactly one scheduler, one model, and either one or multiple queries (depending on whether the model is [multivariate](https://docs.victoriametrics.com/anomaly-detection/components/models/#multivariate-models) or not).
|
||||
|
||||
- FIX: Resolved an issue where duplicate log messages were generated during sub-config validation of the parent configuration.
|
||||
|
||||
- FIX: Corrected usage of `AccountID` and `ProjectID` extracted from `tenant_id`, which are appended as labels `vm_account_id` and `vm_project_id`, respectively (previously swapped) by `VmReader` when using the per-query `tenant_id` feature. **This issue affected versions [v1.19.0](#v1190) and [v1.19.1](#v1191).**
|
||||
|
||||
- FIX: Resolved an issue with the `VmReader` instance string representation that caused errors when `vmanomaly` was run with `--loggerLevel DEBUG`.
|
||||
|
||||
## v1.19.1
|
||||
Released: 2025-01-21
|
||||
|
||||
> **Note**: There is a known bug in [v1.19.0](#v1190) - the `AccountID` and `ProjectID` are swapped when they are extracted from the `tenant_id` argument in `VMReader`. This can cause correctly read results being written to the wrong tenant when using the per-query `tenant_id` feature with `AccountID` != `ProjectID`. Please update to patch [v1.19.2](#v1192), which resolves this issue.
|
||||
|
||||
- FIX: Resolved writer warnings for configurations where `reader.tenant_id` equals `writer.tenant_id` and **is not** `multitenant`, as this is a valid setup. Enhanced tenant_id-related log messages across config validation, reader, and writer for improved clarity.
|
||||
|
||||
## v1.19.0
|
||||
Released: 2025-01-20
|
||||
|
||||
> **Note**: There is a known bug in [v1.19.0](#v1190) - the `AccountID` and `ProjectID` are swapped when they are extracted from the `tenant_id` argument in `VMReader`. This can cause correctly read results being written to the wrong tenant when using the per-query `tenant_id` feature with `AccountID` != `ProjectID`. Please update to patch [v1.19.2](#v1192), which resolves this issue.
|
||||
|
||||
- FEATURE: Added support for per-query `tenant_id` in the [`VmReader`](https://docs.victoriametrics.com/anomaly-detection/components/reader/#vm-reader). This allows overriding the reader-level `tenant_id` within a single global `vmanomaly` configuration on a *per-query* basis, enabling isolation of data for different tenants in separate queries when querying the [VictoriaMetrics cluster version](https://docs.victoriametrics.com/cluster-victoriametrics/). For details, see the [documentation](https://docs.victoriametrics.com/anomaly-detection/components/reader/?highlight=tenant_id#per-query-parameters).
|
||||
- IMPROVEMEMT: Speedup the model infer stage on multicore systems.
|
||||
- IMPROVEMEMT: Speedup the model fitting stage by 1.25–3x, depending on configuration complexity.
|
||||
|
|
|
@ -158,7 +158,7 @@ services:
|
|||
# ...
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.19.1
|
||||
image: victoriametrics/vmanomaly:v1.19.2
|
||||
# ...
|
||||
ports:
|
||||
- "8490:8490"
|
||||
|
@ -337,10 +337,10 @@ For **horizontal** scalability, `vmanomaly` can be deployed as multiple independ
|
|||
|
||||
### Splitting the config
|
||||
|
||||
CLI utility named `config_splitter` is available in `vmanomaly` {{% available_from "v1.18.5" anomaly %}}. The config splitter tool enables splitting a parent vmanomaly YAML configuration file into multiple sub-configurations based on logical entities such as `schedulers`, `queries`, `models`, `extra_filters`. The resulting sub-configurations are fully validated, functional, account for many-to-many relationships between models and their associated queries, and the schedulers they are linked to. These sub-configurations can then be saved to a specified directory for further use:
|
||||
CLI utility named `config_splitter` is available in `vmanomaly` {{% available_from "v1.18.5" anomaly %}}. The config splitter tool enables splitting a parent vmanomaly YAML configuration file into multiple sub-configurations based on logical entities such as `schedulers`, `queries`, `models`, `extra_filters` and `complete` {{% available_from "v1.19.2" anomaly %}}. The resulting sub-configurations are fully validated, functional, account for many-to-many relationships between models and their associated queries, and the schedulers they are linked to. These sub-configurations can then be saved to a specified directory for further use:
|
||||
|
||||
```shellhelp
|
||||
usage: config_splitter.py [-h] --splitBy {schedulers,models,queries,extra_filters} --outputDir OUTPUT_DIR [--fileNameFormat {raw,hash,int}] [--loggerLevel {WARNING,INFO,ERROR,FATAL,DEBUG}]
|
||||
usage: config_splitter.py [-h] --splitBy {schedulers,models,queries,extra_filters,complete} --outputDir OUTPUT_DIR [--fileNameFormat {raw,hash,int}] [--loggerLevel {WARNING,ERROR,FATAL,INFO,DEBUG}]
|
||||
config [config ...]
|
||||
|
||||
Splits the configuration of VictoriaMetrics Anomaly Detection service by a logical entity.
|
||||
|
@ -350,21 +350,22 @@ positional arguments:
|
|||
|
||||
options:
|
||||
-h show this help message and exit
|
||||
--splitBy {schedulers,models,queries,extra_filters}
|
||||
The logical entity to split by. Choices: ['schedulers', 'models', 'queries', 'extra_filters'].
|
||||
--splitBy {schedulers,models,queries,extra_filters,complete}
|
||||
The logical entity to split by. Choices: ['schedulers', 'models', 'queries', 'extra_filters', 'complete']. `complete` produces configurations based on combinations of
|
||||
(scheduler, model, queries). Default: complete.
|
||||
--outputDir output_dir
|
||||
Directory where the split configuration files will be saved.
|
||||
--fileNameFormat {raw,hash,int}
|
||||
The naming format for the output configuration files. Choices: raw (use the entity alias), hash (use hashed alias), int (use a sequential integer from 0 to N for N
|
||||
produced sub-configs). Default: raw.
|
||||
--loggerLevel {WARNING,INFO,ERROR,FATAL,DEBUG}
|
||||
The naming format for the output configuration files. Choices: raw (use the entity alias), hash (use hashed alias), int (use a sequential integer from 0 to N for N produced
|
||||
sub-configs). Default: raw.
|
||||
--loggerLevel {WARNING,ERROR,FATAL,INFO,DEBUG}
|
||||
Minimum level to log. Default: INFO
|
||||
```
|
||||
|
||||
Here’s an example of using the config splitter to divide configurations based on the `extra_filters` argument from the reader section:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.19.1 && docker image tag victoriametrics/vmanomaly:v1.19.1 vmanomaly
|
||||
docker pull victoriametrics/vmanomaly:v1.19.2 && docker image tag victoriametrics/vmanomaly:v1.19.2 vmanomaly
|
||||
```
|
||||
|
||||
```sh
|
||||
|
|
|
@ -101,13 +101,13 @@ Below are the steps to get `vmanomaly` up and running inside a Docker container:
|
|||
1. Pull Docker image:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.19.1
|
||||
docker pull victoriametrics/vmanomaly:v1.19.2
|
||||
```
|
||||
|
||||
2. (Optional step) tag the `vmanomaly` Docker image:
|
||||
|
||||
```sh
|
||||
docker image tag victoriametrics/vmanomaly:v1.19.1 vmanomaly
|
||||
docker image tag victoriametrics/vmanomaly:v1.19.2 vmanomaly
|
||||
```
|
||||
|
||||
3. Start the `vmanomaly` Docker container with a *license file*, use the command below.
|
||||
|
@ -141,7 +141,7 @@ docker run -it --user 1000:1000 \
|
|||
services:
|
||||
# ...
|
||||
vmanomaly:
|
||||
image: victoriametrics/vmanomaly:v1.19.1
|
||||
image: victoriametrics/vmanomaly:v1.19.2
|
||||
volumes:
|
||||
$YOUR_LICENSE_FILE_PATH:/license
|
||||
$YOUR_CONFIG_FILE_PATH:/config.yml
|
||||
|
|
|
@ -994,7 +994,7 @@ monitoring:
|
|||
Let's pull the docker image for `vmanomaly`:
|
||||
|
||||
```sh
|
||||
docker pull victoriametrics/vmanomaly:v1.19.1
|
||||
docker pull victoriametrics/vmanomaly:v1.19.2
|
||||
```
|
||||
|
||||
Now we can run the docker container putting as volumes both config and model file:
|
||||
|
@ -1008,7 +1008,7 @@ docker run -it \
|
|||
-v $(PWD)/license:/license \
|
||||
-v $(PWD)/custom_model.py:/vmanomaly/model/custom.py \
|
||||
-v $(PWD)/custom.yaml:/config.yaml \
|
||||
victoriametrics/vmanomaly:v1.19.1 /config.yaml \
|
||||
victoriametrics/vmanomaly:v1.19.2 /config.yaml \
|
||||
--licenseFile=/license
|
||||
```
|
||||
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
|
||||
- To use *vmanomaly*, part of the enterprise package, a license key is required. Obtain your key [here](https://victoriametrics.com/products/enterprise/trial/) for this tutorial or for enterprise use.
|
||||
- In the tutorial, we'll be using the following VictoriaMetrics components:
|
||||
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/single-server-victoriametrics) (v1.109.1)
|
||||
- [vmalert](https://docs.victoriametrics.com/vmalert/) (v1.109.1)
|
||||
- [vmagent](https://docs.victoriametrics.com/vmagent/) (v1.109.1)
|
||||
- [VictoriaMetrics Single-Node](https://docs.victoriametrics.com/single-server-victoriametrics) (v1.110.0)
|
||||
- [vmalert](https://docs.victoriametrics.com/vmalert/) (v1.110.0)
|
||||
- [vmagent](https://docs.victoriametrics.com/vmagent/) (v1.110.0)
|
||||
- [Grafana](https://grafana.com/) (v.10.2.1)
|
||||
- [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/)
|
||||
- [Node exporter](https://github.com/prometheus/node_exporter#node-exporter) (v1.7.0) and [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) (v0.27.0)
|
||||
|
@ -315,7 +315,7 @@ Let's wrap it all up together into the `docker-compose.yml` file.
|
|||
services:
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.109.1
|
||||
image: victoriametrics/vmagent:v1.110.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
@ -332,7 +332,7 @@ services:
|
|||
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.109.1
|
||||
image: victoriametrics/victoria-metrics:v1.110.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
|
@ -365,7 +365,7 @@ services:
|
|||
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.109.1
|
||||
image: victoriametrics/vmalert:v1.110.0
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
@ -387,7 +387,7 @@ services:
|
|||
restart: always
|
||||
vmanomaly:
|
||||
container_name: vmanomaly
|
||||
image: victoriametrics/vmanomaly:v1.19.1
|
||||
image: victoriametrics/vmanomaly:v1.19.2
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
|
|
@ -18,20 +18,80 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/).
|
|||
|
||||
## tip
|
||||
|
||||
* FEATURE: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmstorage](https://docs.victoriametrics.com/cluster-victoriametrics/): improve startup times when opening a storage with the [retention](https://docs.victoriametrics.com/#retention) exceeding a few months.
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add the ability to switch the heatmap to a line chart. Now, vmui would suggest to switch to line graph display if heatmap can't be properly rendered. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8057).
|
||||
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): improve clipboard error handling in tables and code snippets by showing detailed messages with possible reasons. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/7778).
|
||||
|
||||
## [v1.102.12](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.102.12)
|
||||
|
||||
Released at 2025-01-28
|
||||
|
||||
**v1.102.x is a line of [LTS releases](https://docs.victoriametrics.com/lts-releases/). It contains important up-to-date bugfixes for [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html).
|
||||
All these fixes are also included in [the latest community release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
The v1.102.x line will be supported for at least 12 months since [v1.102.0](https://docs.victoriametrics.com/changelog/#v11020) release**
|
||||
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) and [vmagent](https://docs.victoriametrics.com/vmagent/): log metric names for signals with unsupported delta temporality on ingestion via [OpenTelemetry protocol for metrics](https://docs.victoriametrics.com/#sending-data-via-opentelemetry). Thanks to @chenlujjj for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8018).
|
||||
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): respect staleness detection in increase, increase_pure and delta functions when time series has gaps and `-search.maxStalenessInterval` is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8072) for details.
|
||||
|
||||
## [v1.97.17](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.97.17)
|
||||
|
||||
Released at 2025-01-28
|
||||
|
||||
**v1.97.x is a line of [LTS releases](https://docs.victoriametrics.com/lts-releases/). It contains important up-to-date bugfixes for [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html).
|
||||
All these fixes are also included in [the latest community release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
The v1.97.x line will be supported for at least 12 months since [v1.97.0](https://docs.victoriametrics.com/CHANGELOG.html#v1970) release**
|
||||
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) and [vmagent](https://docs.victoriametrics.com/vmagent/): log metric names for signals with unsupported delta temporality on ingestion via [OpenTelemetry protocol for metrics](https://docs.victoriametrics.com/#sending-data-via-opentelemetry). Thanks to @chenlujjj for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8018).
|
||||
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): respect staleness detection in increase, increase_pure and delta functions when time series has gaps and `-search.maxStalenessInterval` is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8072) for details.
|
||||
|
||||
## [v1.110.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.110.0)
|
||||
|
||||
Released at 2025-01-24
|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.23.4 to Go1.23.5. See the list of issues addressed in [Go1.23.5](https://github.com/golang/go/issues?q=milestone%3AGo1.23.5+label%3ACherryPickApproved).
|
||||
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/metricsql/): allow executing queries with `$__interval` and `$__rate_interval` - these placeholders are automatically replaced with `1i` (e.g. `step` arg value at [`/api/v1/query_range`](https://docs.victoriametrics.com/keyconcepts/#range-query)) during query execution. This simplifies copying queries from Grafana dashboards.
|
||||
* FEATURE: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/) and `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): add command-line flag `-search.maxDeleteDuration(default 5m)` to limit the duration of the `/api/v1/admin/tsdb/delete_series` call. Previously, the call is limited by `-search.maxQueryDuration`.
|
||||
* FEATURE: [dashboards](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/dashboards): all dashboards that use [VictoriaMetrics Grafana datasource](https://github.com/VictoriaMetrics/victoriametrics-datasource) were updated to use a [new datasource ID](https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/tag/v0.12.0).
|
||||
* FEATURE: [dashboards](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/dashboards): all dashboards that use [VictoriaMetrics Grafana datasource](https://github.com/VictoriaMetrics/victoriametrics-datasource) were updated to use a [new datasource ID](https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/tag/v0.12.0).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): reflect column settings for the table view in URL, so the table view can be shared via link. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7662).
|
||||
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) and [vmagent](https://docs.victoriametrics.com/vmagent/): allow ingesting histograms with missing `_sum` metric via [OpenTelemetry ingestion protocol](https://docs.victoriametrics.com/#sending-data-via-opentelemetry) in the same way as Prometheus does.
|
||||
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): respect staleness detection in increase, increase_pure and delta functions when time series has gaps and `-search.maxStalenessInterval` is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8072) for details.
|
||||
* BUGFIX: all VictoriaMetrics [enterprise](https://docs.victoriametrics.com/enterprise/) components: properly trim whitespaces at the end of license provided via `-license` and `-licenseFile` command-line flags. Previously, the trailing whitespaces could cause the license verification to fail.
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): fix possible runtime panic during requests processing under heavy load. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051) for details.
|
||||
* BUGFIX: [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): fix panic when trying to delete series by using [multitenant read](https://docs.victoriametrics.com/cluster-victoriametrics/#multitenancy-via-labels) endpoint. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8126) for the details.
|
||||
* BUGFIX: [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): prevent panic when `vmselect` receives an error response from `vmstorage` during the query execution and request processing for other `vmstorage` nodes is still in progress. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8114) for the details.
|
||||
* BUGFIX: all VictoriaMetrics [enterprise](https://docs.victoriametrics.com/enterprise/) components: properly trim whitespaces at the end of license provided via `-license` and `-licenseFile` command-line flags. Previously, the trailing whitespaces could cause the license verification to fail.
|
||||
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): respect staleness detection in increase, increase_pure and delta functions when time series has gaps and `-search.maxStalenessInterval` is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8072) for details.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) and [vmagent](https://docs.victoriametrics.com/vmagent/): allow ingesting histograms with missing `_sum` metric via [OpenTelemetry ingestion protocol](https://docs.victoriametrics.com/#sending-data-via-opentelemetry) in the same way as Prometheus does.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix an issue where pressing the "Enter" key in the query editor did not execute the query. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8058).
|
||||
* BUGFIX: [export API](https://docs.victoriametrics.com/#how-to-export-time-series): cancel export process on client connection close. Previously client connection close was ignored and VictoriaMetrics started to hog CPU by exporting metrics to nowhere until it export all of them.
|
||||
|
||||
## [v1.102.11](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.102.11)
|
||||
|
||||
Released at 2025-01-24
|
||||
|
||||
**v1.102.x is a line of [LTS releases](https://docs.victoriametrics.com/lts-releases/). It contains important up-to-date bugfixes for [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html).
|
||||
All these fixes are also included in [the latest community release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
The v1.102.x line will be supported for at least 12 months since [v1.102.0](https://docs.victoriametrics.com/changelog/#v11020) release**
|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.23.4 to Go1.23.5. See the list of issues addressed in [Go1.23.5](https://github.com/golang/go/issues?q=milestone%3AGo1.23.5+label%3ACherryPickApproved).
|
||||
|
||||
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth/): fix possible runtime panic during requests processing under heavy load. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8051) for details.
|
||||
* BUGFIX: all VictoriaMetrics [enterprise](https://docs.victoriametrics.com/enterprise/) components: properly trim whitespaces at the end of license provided via `-license` and `-licenseFile` command-line flags. Previously, the trailing whitespaces could cause the license verification to fail.
|
||||
* BUGFIX: all VictoriaMetrics [enterprise](https://docs.victoriametrics.com/enterprise/) components: remove unnecessary delay before failing if all online verification attempts have failed. This should reduce the time required for the component to proceed if all online verification attempts have failed.
|
||||
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): don't take into account the last raw sample before the lookbehind window is sample exceeds the staleness interval. This affects correctness of increase, increase_pure, delta functions when preforming calculations on time series with gaps. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8002) for details.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) and [vmagent](https://docs.victoriametrics.com/vmagent/): allow ingesting histograms with missing `_sum` metric via [OpenTelemetry ingestion protocol](https://docs.victoriametrics.com/#sending-data-via-opentelemetry) in the same way as Prometheus does.
|
||||
|
||||
## [v1.97.16](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.97.16)
|
||||
|
||||
Released at 2025-01-24
|
||||
|
||||
**v1.97.x is a line of [LTS releases](https://docs.victoriametrics.com/lts-releases/). It contains important up-to-date bugfixes for [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html).
|
||||
All these fixes are also included in [the latest community release](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
The v1.97.x line will be supported for at least 12 months since [v1.97.0](https://docs.victoriametrics.com/CHANGELOG.html#v1970) release**
|
||||
|
||||
* BUGFIX: all VictoriaMetrics [enterprise](https://docs.victoriametrics.com/enterprise/) components: remove unnecessary delay before failing if all online verification attempts have failed. This should reduce the time required for the component to proceed if all online verification attempts have failed.
|
||||
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmselect](https://docs.victoriametrics.com/cluster-victoriametrics/): don't take into account the last raw sample before the lookbehind window is sample exceeds the staleness interval. This affects correctness of increase, increase_pure, delta functions when preforming calculations on time series with gaps. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8002) for details.
|
||||
* BUGFIX: [vmsingle](https://docs.victoriametrics.com/single-server-victoriametrics/), `vminsert` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/) and [vmagent](https://docs.victoriametrics.com/vmagent/): allow ingesting histograms with missing `_sum` metric via [OpenTelemetry ingestion protocol](https://docs.victoriametrics.com/#sending-data-via-opentelemetry) in the same way as Prometheus does.
|
||||
|
||||
## [v1.109.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.109.1)
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ VictoriaMetrics Enterprise components are available in the following forms:
|
|||
It is allowed to run VictoriaMetrics Enterprise components in [cases listed here](#valid-cases-for-victoriametrics-enterprise).
|
||||
|
||||
Binary releases of VictoriaMetrics Enterprise are available [at the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest).
|
||||
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.109.1-enterprise.tar.gz`.
|
||||
Enterprise binaries and packages have `enterprise` suffix in their names. For example, `victoria-metrics-linux-amd64-v1.110.0-enterprise.tar.gz`.
|
||||
|
||||
In order to run binary release of VictoriaMetrics Enterprise component, please download the `*-enterprise.tar.gz` archive for your OS and architecture
|
||||
from the [releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/latest) and unpack it. Then run the unpacked binary.
|
||||
|
@ -100,8 +100,8 @@ For example, the following command runs VictoriaMetrics Enterprise binary with t
|
|||
obtained at [this page](https://victoriametrics.com/products/enterprise/trial/):
|
||||
|
||||
```sh
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1-enterprise.tar.gz
|
||||
tar -xzf victoria-metrics-linux-amd64-v1.109.1-enterprise.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.110.0/victoria-metrics-linux-amd64-v1.110.0-enterprise.tar.gz
|
||||
tar -xzf victoria-metrics-linux-amd64-v1.110.0-enterprise.tar.gz
|
||||
./victoria-metrics-prod -license=BASE64_ENCODED_LICENSE_KEY
|
||||
```
|
||||
|
||||
|
@ -116,7 +116,7 @@ Alternatively, VictoriaMetrics Enterprise license can be stored in the file and
|
|||
It is allowed to run VictoriaMetrics Enterprise components in [cases listed here](#valid-cases-for-victoriametrics-enterprise).
|
||||
|
||||
Docker images for VictoriaMetrics Enterprise are available [at VictoriaMetrics DockerHub](https://hub.docker.com/u/victoriametrics).
|
||||
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.109.1-enterprise`.
|
||||
Enterprise docker images have `enterprise` suffix in their names. For example, `victoriametrics/victoria-metrics:v1.110.0-enterprise`.
|
||||
|
||||
In order to run Docker image of VictoriaMetrics Enterprise component, it is required to provide the license key via command-line
|
||||
flag as described [here](#binary-releases).
|
||||
|
@ -126,13 +126,13 @@ Enterprise license key can be obtained at [this page](https://victoriametrics.co
|
|||
For example, the following command runs VictoriaMetrics Enterprise Docker image with the specified license key:
|
||||
|
||||
```sh
|
||||
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.109.1-enterprise -license=BASE64_ENCODED_LICENSE_KEY
|
||||
docker run --name=victoria-metrics victoriametrics/victoria-metrics:v1.110.0-enterprise -license=BASE64_ENCODED_LICENSE_KEY
|
||||
```
|
||||
|
||||
Alternatively, the license code can be stored in the file and then referred via `-licenseFile` command-line flag:
|
||||
|
||||
```sh
|
||||
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.109.1-enterprise -licenseFile=/path/to/vm-license
|
||||
docker run --name=victoria-metrics -v /vm-license:/vm-license victoriametrics/victoria-metrics:v1.110.0-enterprise -licenseFile=/path/to/vm-license
|
||||
```
|
||||
|
||||
Example docker-compose configuration:
|
||||
|
@ -141,7 +141,7 @@ version: "3.5"
|
|||
services:
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.109.1
|
||||
image: victoriametrics/victoria-metrics:v1.110.0
|
||||
ports:
|
||||
- 8428:8428
|
||||
volumes:
|
||||
|
@ -173,7 +173,7 @@ is used to provide key in plain-text:
|
|||
```yaml
|
||||
server:
|
||||
image:
|
||||
tag: v1.109.1-enterprise
|
||||
tag: v1.110.0-enterprise
|
||||
|
||||
license:
|
||||
key: {BASE64_ENCODED_LICENSE_KEY}
|
||||
|
@ -184,7 +184,7 @@ In order to provide key via existing secret, the following values file is used:
|
|||
```yaml
|
||||
server:
|
||||
image:
|
||||
tag: v1.109.1-enterprise
|
||||
tag: v1.110.0-enterprise
|
||||
|
||||
license:
|
||||
secret:
|
||||
|
@ -233,7 +233,7 @@ spec:
|
|||
license:
|
||||
key: {BASE64_ENCODED_LICENSE_KEY}
|
||||
image:
|
||||
tag: v1.109.1-enterprise
|
||||
tag: v1.110.0-enterprise
|
||||
```
|
||||
|
||||
In order to provide key via existing secret, the following custom resource is used:
|
||||
|
@ -250,7 +250,7 @@ spec:
|
|||
name: vm-license
|
||||
key: license
|
||||
image:
|
||||
tag: v1.109.1-enterprise
|
||||
tag: v1.110.0-enterprise
|
||||
```
|
||||
|
||||
Example secret with license key:
|
||||
|
|
|
@ -236,27 +236,27 @@ services:
|
|||
- grafana_data:/var/lib/grafana/
|
||||
|
||||
vmsingle:
|
||||
image: victoriametrics/victoria-metrics:v1.109.1
|
||||
image: victoriametrics/victoria-metrics:v1.110.0
|
||||
command:
|
||||
- -httpListenAddr=0.0.0.0:8429
|
||||
|
||||
vmstorage:
|
||||
image: victoriametrics/vmstorage:v1.109.1-cluster
|
||||
image: victoriametrics/vmstorage:v1.110.0-cluster
|
||||
|
||||
vminsert:
|
||||
image: victoriametrics/vminsert:v1.109.1-cluster
|
||||
image: victoriametrics/vminsert:v1.110.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8400
|
||||
- -httpListenAddr=0.0.0.0:8480
|
||||
|
||||
vmselect:
|
||||
image: victoriametrics/vmselect:v1.109.1-cluster
|
||||
image: victoriametrics/vmselect:v1.110.0-cluster
|
||||
command:
|
||||
- -storageNode=vmstorage:8401
|
||||
- -httpListenAddr=0.0.0.0:8481
|
||||
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.109.1
|
||||
image: victoriametrics/vmagent:v1.110.0
|
||||
volumes:
|
||||
- ./scrape.yaml:/etc/vmagent/config.yaml
|
||||
command:
|
||||
|
@ -265,7 +265,7 @@ services:
|
|||
- -remoteWrite.url=http://vmsingle:8429/api/v1/write
|
||||
|
||||
vmgateway-cluster:
|
||||
image: victoriametrics/vmgateway:v1.109.1-enterprise
|
||||
image: victoriametrics/vmgateway:v1.110.0-enterprise
|
||||
ports:
|
||||
- 8431:8431
|
||||
volumes:
|
||||
|
@ -281,7 +281,7 @@ services:
|
|||
- -auth.oidcDiscoveryEndpoints=http://keycloak:8080/realms/master/.well-known/openid-configuration
|
||||
|
||||
vmgateway-single:
|
||||
image: victoriametrics/vmgateway:v1.109.1-enterprise
|
||||
image: victoriametrics/vmgateway:v1.110.0-enterprise
|
||||
ports:
|
||||
- 8432:8431
|
||||
volumes:
|
||||
|
@ -393,7 +393,7 @@ Once iDP configuration is done, vmagent configuration needs to be updated to use
|
|||
|
||||
```yaml
|
||||
vmagent:
|
||||
image: victoriametrics/vmagent:v1.109.1
|
||||
image: victoriametrics/vmagent:v1.110.0
|
||||
volumes:
|
||||
- ./scrape.yaml:/etc/vmagent/config.yaml
|
||||
- ./vmagent-client-secret:/etc/vmagent/oauth2-client-secret
|
||||
|
|
|
@ -30,8 +30,8 @@ scrape_configs:
|
|||
After you created the `scrape.yaml` file, download and unpack [single-node VictoriaMetrics](https://docs.victoriametrics.com/) to the same directory:
|
||||
|
||||
```
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.109.1.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.110.0/victoria-metrics-linux-amd64-v1.110.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.110.0.tar.gz
|
||||
```
|
||||
|
||||
Then start VictoriaMetrics and instruct it to scrape targets defined in `scrape.yaml` and save scraped metrics
|
||||
|
@ -146,8 +146,8 @@ Then start [single-node VictoriaMetrics](https://docs.victoriametrics.com/) acco
|
|||
|
||||
```yaml
|
||||
# Download and unpack single-node VictoriaMetrics
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.109.1/victoria-metrics-linux-amd64-v1.109.1.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.109.1.tar.gz
|
||||
wget https://github.com/VictoriaMetrics/VictoriaMetrics/releases/download/v1.110.0/victoria-metrics-linux-amd64-v1.110.0.tar.gz
|
||||
tar xzf victoria-metrics-linux-amd64-v1.110.0.tar.gz
|
||||
|
||||
# Run single-node VictoriaMetrics with the given scrape.yaml
|
||||
./victoria-metrics-prod -promscrape.config=scrape.yaml
|
||||
|
|
|
@ -24,10 +24,10 @@ For a detailed explanation of each parameter, visit the guide on [Understanding
|
|||
|
||||
## Flag Parameters Configuration
|
||||
|
||||
| **Flag** | **Default Value** | **Description** |
|
||||
|-----------------------------------|---------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Max Label Value Length** | `<= 1kb` (Default: `4kb`) | Maximum length of label values. Longer values are truncated. Large label values can lead to high RAM consumption. This can be adjusted via [support](mailto:support-cloud@victoriametrics.com). |
|
||||
| **Max Labels per Time Series** | `<= 30` | Maximum number of labels per time series. Excess labels are dropped. Higher values can increase [cardinality](https://docs.victoriametrics.com/keyconcepts/#cardinality) and resource usage. This can be configured in [deployment settings](https://docs.victoriametrics.com/victoriametrics-cloud/quickstart/#modifying-an-existing-deployment). |
|
||||
| **Flag** | **Default Value** | **Description** |
|
||||
|-----------------------------------|---------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **Max Label Value Length** | `<= 1kb` (Default: `4kb`) | Maximum length of label values. Time series with longer values are dropped. Large label values can lead to high RAM consumption. This can be adjusted via [support](mailto:support-cloud@victoriametrics.com). |
|
||||
| **Max Labels per Time Series** | `<= 30` | Maximum number of labels per time series. Time series with excess labels are dropped. Higher values can increase [cardinality](https://docs.victoriametrics.com/keyconcepts/#cardinality) and resource usage. This can be configured in [deployment settings](https://docs.victoriametrics.com/victoriametrics-cloud/quickstart/#modifying-an-existing-deployment). |
|
||||
|
||||
|
||||
## Terms and definitions:
|
||||
|
|
|
@ -46,7 +46,7 @@ Installing VictoriaMetrics Grafana datasource [requires](https://grafana.com/doc
|
|||
|
||||
``` ini
|
||||
[plugins]
|
||||
allow_loading_unsigned_plugins = victoriametrics-datasource
|
||||
allow_loading_unsigned_plugins = victoriametrics-metrics-datasource
|
||||
```
|
||||
|
||||
For `grafana-operator` users, please adjust `config:` section in your `kind=Grafana` resource as below
|
||||
|
@ -54,7 +54,7 @@ For `grafana-operator` users, please adjust `config:` section in your `kind=Graf
|
|||
```
|
||||
config:
|
||||
plugins:
|
||||
allow_loading_unsigned_plugins: "victoriametrics-datasource"
|
||||
allow_loading_unsigned_plugins: "victoriametrics-metrics-datasource"
|
||||
```
|
||||
|
||||
See [why VictoriaMetrics datasource is unsigned](#why-victoriaMetrics-datasource-is-unsigned).
|
||||
|
@ -83,7 +83,7 @@ datasources:
|
|||
# displayed in Grafana panels and queries.
|
||||
- name: VictoriaMetrics
|
||||
# <string, required> Sets the data source type.
|
||||
type: victoriametrics-datasource
|
||||
type: victoriametrics-metrics-datasource
|
||||
# <string, required> Sets the access mode, either
|
||||
# proxy or direct (Server or Browser in the UI).
|
||||
# Some data sources are incompatible with any setting
|
||||
|
@ -99,7 +99,7 @@ datasources:
|
|||
# displayed in Grafana panels and queries.
|
||||
- name: VictoriaMetrics - cluster
|
||||
# <string, required> Sets the data source type.
|
||||
type: victoriametrics-datasource
|
||||
type: victoriametrics-metrics-datasource
|
||||
# <string, required> Sets the access mode, either
|
||||
# proxy or direct (Server or Browser in the UI).
|
||||
# Some data sources are incompatible with any setting
|
||||
|
@ -124,8 +124,8 @@ Please find the example of provisioning Grafana instance with VictoriaMetrics da
|
|||
grafana:
|
||||
image: grafana/grafana:11.0.0
|
||||
environment:
|
||||
- GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.10.3/victoriametrics-datasource-v0.10.3.zip;victoriametrics-datasource
|
||||
- GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-datasource
|
||||
- GF_INSTALL_PLUGINS=https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.13.0/victoriametrics-metrics-datasource-v0.13.0.zip;victoriametrics-metrics-datasource
|
||||
- GF_PLUGINS_ALLOW_LOADING_UNSIGNED_PLUGINS=victoriametrics-metrics-datasource
|
||||
ports:
|
||||
- 3000:3000/tcp
|
||||
volumes:
|
||||
|
@ -152,14 +152,14 @@ Option 1. Using Grafana provisioning:
|
|||
|
||||
``` yaml
|
||||
env:
|
||||
GF_INSTALL_PLUGINS: "https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.10.3/victoriametrics-datasource-v0.10.3.zip;victoriametrics-datasource"
|
||||
GF_INSTALL_PLUGINS: "https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.13.0/victoriametrics-metrics-datasource-v0.13.0.zip;victoriametrics-metrics-datasource"
|
||||
```
|
||||
|
||||
Option 2. Using Grafana plugins section in `values.yaml`:
|
||||
|
||||
``` yaml
|
||||
plugins:
|
||||
- https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.10.3/victoriametrics-datasource-v0.10.3.zip;victoriametrics-datasource
|
||||
- https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/v0.13.0/victoriametrics-metrics-datasource-v0.13.0.zip;victoriametrics-metrics-datasource
|
||||
```
|
||||
|
||||
Option 3. Using init container:
|
||||
|
@ -179,7 +179,7 @@ extraInitContainers:
|
|||
set -ex
|
||||
mkdir -p /var/lib/grafana/plugins/
|
||||
ver=$(curl -s -L https://api.github.com/repos/VictoriaMetrics/victoriametrics-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||
curl -L https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/$ver/victoriametrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
curl -L https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/$ver/victoriametrics-metrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
tar -xf /var/lib/grafana/plugins/vm-plugin.tar.gz -C /var/lib/grafana/plugins/
|
||||
rm /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
volumeMounts:
|
||||
|
@ -239,7 +239,7 @@ spec:
|
|||
set -ex
|
||||
mkdir -p /var/lib/grafana/plugins/
|
||||
ver=$(curl -s https://api.github.com/repos/VictoriaMetrics/victoriametrics-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||
curl -L https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/$ver/victoriametrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
curl -L https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/$ver/victoriametrics-metrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
tar -xf /var/lib/grafana/plugins/vm-plugin.tar.gz -C /var/lib/grafana/plugins/
|
||||
rm /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
volumeMounts:
|
||||
|
@ -247,7 +247,7 @@ spec:
|
|||
mountPath: /var/lib/grafana
|
||||
config:
|
||||
plugins:
|
||||
allow_loading_unsigned_plugins: victoriametrics-datasource
|
||||
allow_loading_unsigned_plugins: victoriametrics-metrics-datasource
|
||||
```
|
||||
|
||||
See [Grafana operator reference](https://grafana-operator.github.io/grafana-operator/docs/grafana/) to find more about Grafana operator.
|
||||
|
@ -259,7 +259,7 @@ This example uses init container to download and install plugin.
|
|||
|
||||
```sh
|
||||
ver=$(curl -s https://api.github.com/repos/VictoriaMetrics/victoriametrics-datasource/releases/latest | grep -oE 'v[0-9]+\.[0-9]+\.[0-9]+' | head -1)
|
||||
curl -L https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/$ver/victoriametrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
curl -L https://github.com/VictoriaMetrics/victoriametrics-datasource/releases/download/$ver/victoriametrics-metrics-datasource-$ver.tar.gz -o /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
tar -xf /var/lib/grafana/plugins/vm-plugin.tar.gz -C /var/lib/grafana/plugins/
|
||||
rm /var/lib/grafana/plugins/vm-plugin.tar.gz
|
||||
```
|
||||
|
@ -279,7 +279,7 @@ plugins = {{path to directory with plugin}}
|
|||
|
||||
``` ini
|
||||
[plugins]
|
||||
allow_loading_unsigned_plugins = victoriametrics-datasource
|
||||
allow_loading_unsigned_plugins = victoriametrics-metrics-datasource
|
||||
```
|
||||
|
||||
### 2. Run the plugin
|
||||
|
|
|
@ -661,6 +661,19 @@ unauthorized_user:
|
|||
headers:
|
||||
- "TenantID: foobar"
|
||||
- "X-Forwarded-For:"
|
||||
|
||||
users:
|
||||
- username: "foo"
|
||||
password: "bar"
|
||||
dump_request_on_errors: true
|
||||
url_map:
|
||||
- src_paths: ["/select/.*"]
|
||||
headers:
|
||||
- "AccountID: 1"
|
||||
- "ProjectID: 0"
|
||||
url_prefix:
|
||||
- "http://backend:9428/"
|
||||
|
||||
```
|
||||
|
||||
`vmauth` also supports the ability to set and remove HTTP response headers before returning the response from the backend to client.
|
||||
|
|
87
go.mod
87
go.mod
|
@ -1,9 +1,6 @@
|
|||
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
|
||||
go 1.23.3
|
||||
|
||||
// See // See https://github.com/googleapis/google-cloud-go/issues/11283#issuecomment-2558515586
|
||||
exclude google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a
|
||||
go 1.23.5
|
||||
|
||||
// This is needed in order to avoid vmbackup and vmrestore binary size increase by 20MB
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/8008
|
||||
|
@ -13,19 +10,19 @@ replace cloud.google.com/go/storage => cloud.google.com/go/storage v1.43.0
|
|||
require (
|
||||
cloud.google.com/go/storage v1.50.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0
|
||||
github.com/VictoriaMetrics/easyproto v0.1.4
|
||||
github.com/VictoriaMetrics/fastcache v1.12.2
|
||||
github.com/VictoriaMetrics/metrics v1.35.1
|
||||
github.com/VictoriaMetrics/metricsql v0.82.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.10
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2
|
||||
github.com/bmatcuk/doublestar/v4 v4.7.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.34.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.2
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.54
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.74.1
|
||||
github.com/bmatcuk/doublestar/v4 v4.8.1
|
||||
github.com/cespare/xxhash/v2 v2.3.0
|
||||
github.com/cheggaaa/pb/v3 v3.1.5
|
||||
github.com/cheggaaa/pb/v3 v3.1.6
|
||||
github.com/ergochat/readline v0.1.3
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/snappy v0.0.4
|
||||
|
@ -45,7 +42,7 @@ require (
|
|||
golang.org/x/net v0.34.0
|
||||
golang.org/x/oauth2 v0.25.0
|
||||
golang.org/x/sys v0.29.0
|
||||
google.golang.org/api v0.216.0
|
||||
google.golang.org/api v0.218.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
|
@ -59,22 +56,22 @@ require (
|
|||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.51 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 // indirect
|
||||
github.com/aws/smithy-go v1.22.1 // indirect
|
||||
github.com/aws/aws-sdk-go v1.55.6 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.55 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 // indirect
|
||||
github.com/aws/smithy-go v1.22.2 // indirect
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 // indirect
|
||||
|
@ -107,7 +104,7 @@ require (
|
|||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.61.0 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/prometheus/sigv4 v0.1.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
|
@ -116,15 +113,15 @@ require (
|
|||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.23.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.117.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/collector/pdata v1.24.0 // indirect
|
||||
go.opentelemetry.io/collector/semconv v0.118.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
|
||||
go.opentelemetry.io/otel v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.34.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.34.0 // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/goleak v1.3.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
|
@ -133,14 +130,14 @@ require (
|
|||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250106144421-5f5ef82da422 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 // indirect
|
||||
google.golang.org/grpc v1.69.2 // indirect
|
||||
google.golang.org/protobuf v1.36.2 // indirect
|
||||
google.golang.org/genproto v0.0.0-20250127172529-29210b9bc287 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250127172529-29210b9bc287 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 // indirect
|
||||
google.golang.org/grpc v1.70.0 // indirect
|
||||
google.golang.org/protobuf v1.36.4 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apimachinery v0.32.0 // indirect
|
||||
k8s.io/client-go v0.32.0 // indirect
|
||||
k8s.io/apimachinery v0.32.1 // indirect
|
||||
k8s.io/client-go v0.32.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect
|
||||
)
|
||||
|
|
176
go.sum
176
go.sum
|
@ -14,10 +14,10 @@ cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyX
|
|||
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0 h1:g0EZJwz7xkXQiZAI5xi9f3WWFYBlX1CPTrR+NDToRkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.0/go.mod h1:XCW7KnZet0Opnr7HccfUw1PLc4CjHqpcaxW8DHklNkQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 h1:B/dfvscEQtew9dVuoxqxrUKKv8Ih2f55PydknDamU+g=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0/go.mod h1:fiPSssYvltE08HJchL04dOy+RD4hgrjph0cwGGMntdI=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0 h1:+m0M/LFxN43KvULkDNfdXOgrjtg6UYJPFBJyuEcRCAw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.0/go.mod h1:PwOyop78lveYMRs6oCxjiVyBdyCgIYH6XHIVZO9/SFQ=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1 h1:1mvYtZfWQAnwNah/C+Z+Jb9rQH95LPE2vlmMuWAHJk8=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.1/go.mod h1:75I/mXtme1JyWFtz8GocPHVFyH421IBoZErnO16dd0k=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1 h1:Bk5uOhSAenHyR5P61D/NzeQCv+4fEVV8mOkJ82NqpWw=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.1/go.mod h1:QZ4pw3or1WPmRBxf0cHd1tknzrT54WPBOQoGutCPvSU=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
|
||||
|
@ -26,8 +26,8 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3
|
|||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0 h1:PiSrjRPpkQNjrM8H0WwKMnZUdu1RGMtd/LdGKUrOo+c=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.6.0/go.mod h1:oDrbWx4ewMylP7xHivfgixbfGBT6APAwsSoHRKotnIc=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0 h1:mlmW46Q0B79I+Aj4azKC6xDMFN9a9SyZWESlGWYXbFs=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0/go.mod h1:PXe2h+LKcWTX9afWdZoHyODqR4fBa5boUM/8uJfZ0Jo=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 h1:UXT0o77lXQrikd1kgwIPQOUect7EoR/+sbP4wQKdzxM=
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0/go.mod h1:cTvi54pg19DoT07ekoeMgE/taAwNtCShVeZqA+Iv2xI=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
|
||||
github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ=
|
||||
|
@ -53,57 +53,57 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah
|
|||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
|
||||
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
|
||||
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
|
||||
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8 h1:cZV+NUS/eGxKXMtmyhtYPJ7Z4YLoI/V8bkTdRZfYhGo=
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.8/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.10 h1:fKODZHfqQu06pCzR69KJ3GuttraRJkhlC8g80RZ0Dfg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.28.10/go.mod h1:PvdxRYZ5Um9QMq9PQ0zHHNdtKK+he2NHtFCUFMXWXeg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.51 h1:F/9Sm6Y6k4LqDesZDPJCLxQGXNNHd/ZtJiWd0lCZKRk=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.51/go.mod h1:TKbzCHm43AoPyA+iLGGcruXd4AFhF8tOmLex2R9jWNQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23 h1:IBAoD/1d8A8/1aA8g4MBVtTRHhXRiNAgwdbo/xRM2DI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.23/go.mod h1:vfENuCM7dofkgKpYzuzf1VT1UKkA/YL3qanfBn7HCaA=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48 h1:XnXVe2zRyPf0+fAW5L05esmngvBpC6DQZK7oZB/z/Co=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.48/go.mod h1:S3wey90OrS4f7kYxH6PT175YyEcHTORY07++HurMaRM=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27 h1:jSJjSBzw8VDIbWv+mmvBSP8ezsztMYJGH+eKqi9AmNs=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.27/go.mod h1:/DAhLbFRgwhmvJdOfSm+WwikZrCuUJiA4WgJG0fTNSw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27 h1:l+X4K77Dui85pIj5foXDhPlnqcNRG2QUyvca300lXh8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.27/go.mod h1:KvZXSFEXm6x84yE8qffKvT3x8J5clWnVFXphpohhzJ8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27 h1:AmB5QxnD+fBFrg9LcqzkgF/CaYvMyU/BTlejG4t1S7Q=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.27/go.mod h1:Sai7P3xTiyv9ZUYO3IFxMnmiIP759/67iQbU4kdmkyU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8 h1:iwYS40JnrBeA9e9aI5S6KKN4EB2zR4iUVYN0nwVivz4=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.8/go.mod h1:Fm9Mi+ApqmFiknZtGpohVcBGvpTu542VC4XO9YudRi0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8 h1:cWno7lefSH6Pp+mSznagKCgfDGeZRin66UvYUqAkyeA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.8/go.mod h1:tPD+VjU3ABTBoEJ3nctu5Nyg4P4yjqSH5bJGGkY4+XE=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8 h1:/Mn7gTedG86nbpjT4QEKsN1D/fThiYe1qvq7WsBGNHg=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.8/go.mod h1:Ae3va9LPmvjj231ukHB6UeT8nS7wTPfC3tMZSZMwNYg=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2 h1:a7aQ3RW+ug4IbhoQp29NZdc7vqrzKZZfWZSaQAXOZvQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.72.2/go.mod h1:xMekrnhmJ5aqmyxtmALs7mlvXw5xRh+eYjOjvrIIFJ4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9 h1:YqtxripbjWb2QLyzRK9pByfEDvgg95gpC2AyDq4hFE8=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.9/go.mod h1:lV8iQpg6OLOfBnqbGMBKYjilBlf633qwHnBEiMSPoHY=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8 h1:6dBT1Lz8fK11m22R+AqfRsFn8320K0T5DTGxxOQBSMw=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.8/go.mod h1:/kiBvRQXBc6xeJTYzhSdGvJ5vm1tjaDEjH+MSeRJnlY=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.6 h1:VwhTrsTuVn52an4mXx29PqRzs2Dvu921NpGk7y43tAM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.6/go.mod h1:+8h7PZb3yY5ftmVLD7ocEoE98hdc8PoKS0H3wfx1dlc=
|
||||
github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro=
|
||||
github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
|
||||
github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.34.0 h1:9iyL+cjifckRGEVpRKZP3eIxVlL06Qk1Tk13vreaVQU=
|
||||
github.com/aws/aws-sdk-go-v2 v1.34.0/go.mod h1:JgstGg0JjWU1KpVJjD5H0y0yyAIpSdKEq556EI6yOOM=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.2 h1:JuIxOEPcSKpMB0J+khMjznG9LIhIBdmqNiEcPclnwqc=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.2/go.mod h1:HktTHregOZwNSM/e7WTfVSu9RCX+3eOv+6ij27PtaYs=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.55 h1:CDhKnDEaGkLA5ZszV/qw5uwN5M8rbv9Cl0JRN+PRsaM=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.55/go.mod h1:kPD/vj+RB5MREDUky376+zdnjZpR+WgdBBvwrmnlmKE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25 h1:kU7tmXNaJ07LsyN3BUgGqAmVmQtq0w6duVIHAKfp0/w=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.25/go.mod h1:OiC8+OiqrURb1wrwmr/UbOVLFSWEGxjinj5C299VQdo=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.54 h1:6BWOAho3Cgdy4cmNJ4HWY8VZgqODEU7Gw78XXireNZI=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.54/go.mod h1:n+t/oyYErOV3jf/GxNTVlizSM9RMV1yH7jvcIvld3Do=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29 h1:Ej0Rf3GMv50Qh4G4852j2djtoDb7AzQ7MuQeFHa3D70=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.29/go.mod h1:oeNTC7PwJNoM5AznVr23wxhLnuJv0ZDe5v7w0wqIs9M=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29 h1:6e8a71X+9GfghragVevC5bZqvATtc3mAMgxpSNbgzF0=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.29/go.mod h1:c4jkZiQ+BWpNqq7VtrxjwISrLrt/VvPq3XiopkUIolI=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2 h1:Pg9URiobXy85kgFev3og2CuOZ8JZUBENF+dcgWBaYNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.2/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29 h1:g9OUETuxA8i/Www5Cby0R3WSTe7ppFTZXHVLNskNS4w=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.29/go.mod h1:CQk+koLR1QeY1+vm7lqNfFii07DEderKq6T3F1L2pyc=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2 h1:D4oz8/CzT9bAEYtVhSBmFj2dNOtaHOtMKc2vHBwYizA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.2/go.mod h1:Za3IHqTQ+yNcRHxu1OFucBh0ACZT4j4VQFF0BqpZcLY=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3 h1:EP1ITDgYVPM2dL1bBBntJ7AW5yTjuWGz9XO+CZwpALU=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.5.3/go.mod h1:5lWNWeAgWenJ/BZ/CP9k9DjLbC0pjnM045WjXRPPi14=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10 h1:hN4yJBGswmFTOVYqmbz1GBs9ZMtQe8SrYxPwrkrlRv8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.10/go.mod h1:TsxON4fEZXyrKY+D+3d2gSTyJkGORexIYab9PTf56DA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10 h1:fXoWC2gi7tdJYNTPnnlSGzEVwewUchOi8xVq/dkg8Qs=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.10/go.mod h1:cvzBApD5dVazHU8C2rbBQzzzsKc8m5+wNJ9mCRZLKPc=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.74.1 h1:9LawY3cDJ3HE+v2GMd5SOkNLDwgN4K7TsCjyVBYu/L4=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.74.1/go.mod h1:hHnELVnIHltd8EOF3YzahVX6F6y2C6dNqpRj1IMkS5I=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.12 h1:kznaW4f81mNMlREkU9w3jUuJvU5g/KsqDV43ab7Rp6s=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.24.12/go.mod h1:bZy9r8e0/s0P7BSDHgMLXK2KvdyRRBIQ2blKlvLt0IU=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11 h1:mUwIpAvILeKFnRx4h1dEgGEFGuV8KJ3pEScZWVFYuZA=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.11/go.mod h1:JDJtD+b8HNVv71axz8+S5492KM8wTzHRFpMKQbPlYxw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.10 h1:g9d+TOsu3ac7SgmY2dUf1qMgu/uJVTlQ4VCbH6hRxSw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.10/go.mod h1:WZfNmntu92HO44MVZAubQaz3qCuIdeOdog2sADfU6hU=
|
||||
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
|
||||
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
|
||||
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q=
|
||||
github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/bmatcuk/doublestar/v4 v4.8.1 h1:54Bopc5c2cAvhLRAzqOGCYHYyhcDHsFF4wWIR5wKP38=
|
||||
github.com/bmatcuk/doublestar/v4 v4.8.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cheggaaa/pb/v3 v3.1.5 h1:QuuUzeM2WsAqG2gMqtzaWithDJv0i+i6UlnwSCI4QLk=
|
||||
github.com/cheggaaa/pb/v3 v3.1.5/go.mod h1:CrxkeghYTXi1lQBEI7jSn+3svI3cuc19haAj6jM60XI=
|
||||
github.com/cheggaaa/pb/v3 v3.1.6 h1:h0x+vd7EiUohAJ29DJtJy+SNAc55t/elW3jCD086EXk=
|
||||
github.com/cheggaaa/pb/v3 v3.1.6/go.mod h1:urxmfVtaxT+9aWk92DbsvXFZtNSWQSO5TRAp+MJ3l1s=
|
||||
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 h1:boJj011Hh+874zpIySeApCX4GeOjPl9qhRF3QuIZq+Q=
|
||||
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0=
|
||||
|
@ -298,16 +298,16 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+
|
|||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ=
|
||||
github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/prometheus v0.301.0 h1:0z8dgegmILivNomCd79RKvVkIols8vBGPKmcIBc7OyY=
|
||||
github.com/prometheus/prometheus v0.301.0/go.mod h1:BJLjWCKNfRfjp7Q48DrAjARnCi7GhfUVvUFEAWTssZM=
|
||||
github.com/prometheus/sigv4 v0.1.1 h1:UJxjOqVcXctZlwDjpUpZ2OiMWJdFijgSofwLzO1Xk0Q=
|
||||
github.com/prometheus/sigv4 v0.1.1/go.mod h1:RAmWVKqx0bwi0Qm4lrKMXFM0nhpesBcenfCtz9qRyH8=
|
||||
github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4=
|
||||
github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA=
|
||||
github.com/redis/go-redis/v9 v9.7.0 h1:HhLSs+B6O021gwzl+locl0zEDnyNkxMtf/Z3NNBMa9E=
|
||||
github.com/redis/go-redis/v9 v9.7.0/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
|
@ -359,26 +359,26 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
|||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/collector/pdata v1.23.0 h1:tEk0dkfB8RdSukoOMfEa8duB938gfZowdfRkrJxGDrw=
|
||||
go.opentelemetry.io/collector/pdata v1.23.0/go.mod h1:I2jggpBMiO8A+7TXhzNpcJZkJtvi1cU0iVNIi+6bc+o=
|
||||
go.opentelemetry.io/collector/semconv v0.117.0 h1:SavOvSbHPVD/QdAnXlI/cMca+yxCNyXStY1mQzerHs4=
|
||||
go.opentelemetry.io/collector/semconv v0.117.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0 h1:xwH3QJv6zL4u+gkPUu59NeT1Gyw9nScWT8FQpKLUJJI=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0/go.mod h1:uosvgpqTcTXtcPQORTbEkZNDQTCDOgTz1fe6aLSyqrQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
|
||||
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
||||
go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg=
|
||||
go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc=
|
||||
go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w=
|
||||
go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0 h1:iQZYNQ7WwIcYXzOPR46FQv9O0dS1PW16RjvR0TjDOe8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.59.0/go.mod h1:54CaSNqYEXvpzDh8KPjiMVoWm60t5R0dZRt0leEPgAs=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
|
||||
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
|
||||
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
|
||||
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
||||
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q=
|
||||
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
||||
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
||||
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
||||
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
|
||||
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
|
||||
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
|
@ -437,18 +437,18 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.216.0 h1:xnEHy+xWFrtYInWPy8OdGFsyIfWJjtVnO39g7pz2BFY=
|
||||
google.golang.org/api v0.216.0/go.mod h1:K9wzQMvWi47Z9IU7OgdOofvZuw75Ge3PPITImZR/UyI=
|
||||
google.golang.org/genproto v0.0.0-20250106144421-5f5ef82da422 h1:6GUHKGv2huWOHKmDXLMNE94q3fBDlEHI+oTRIZSebK0=
|
||||
google.golang.org/genproto v0.0.0-20250106144421-5f5ef82da422/go.mod h1:1NPAxoesyw/SgLPqaUp9u1f9PWCLAk/jVmhx7gJZStg=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 h1:3UsHvIr4Wc2aW4brOaSCmcxh9ksica6fHEr8P1XhkYw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU=
|
||||
google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
|
||||
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/api v0.218.0 h1:x6JCjEWeZ9PFCRe9z0FBrNwj7pB7DOAqT35N+IPnAUA=
|
||||
google.golang.org/api v0.218.0/go.mod h1:5VGHBAkxrA/8EFjLVEYmMUJ8/8+gWWQ3s4cFH0FxG2M=
|
||||
google.golang.org/genproto v0.0.0-20250127172529-29210b9bc287 h1:WoUI1G0DQ648FKvSl756SKxHQR/bI+y4HyyIQfxMWI8=
|
||||
google.golang.org/genproto v0.0.0-20250127172529-29210b9bc287/go.mod h1:wkQ2Aj/xvshAUDtO/JHvu9y+AaN9cqs28QuSVSHtZSY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250127172529-29210b9bc287 h1:A2ni10G3UlplFrWdCDJTl7D7mJ7GSRm37S+PDimaKRw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250127172529-29210b9bc287/go.mod h1:iYONQfRdizDB8JJBybql13nArx91jcUk7zCXEsOofM4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 h1:J1H9f+LEdWAfHcez/4cvaVBox7cOYT+IU6rgqj5x++8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
|
||||
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
|
||||
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
|
||||
google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
|
||||
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
@ -464,12 +464,12 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
|||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
|
||||
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
|
||||
k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc=
|
||||
k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k=
|
||||
k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs=
|
||||
k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU=
|
||||
k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
|
|
|
@ -64,21 +64,30 @@ func (bw *Writer) Write(p []byte) (int, error) {
|
|||
return 0, bw.err
|
||||
}
|
||||
n, err := bw.bw.Write(p)
|
||||
if err != nil && !netutil.IsTrivialNetworkError(err) {
|
||||
if err != nil {
|
||||
bw.err = fmt.Errorf("cannot send %d bytes to client: %w", len(p), err)
|
||||
}
|
||||
return n, bw.err
|
||||
}
|
||||
|
||||
// Flush flushes bw to the underlying writer.
|
||||
//
|
||||
// Connection close errors are ignored to not trigger on them and to not write to logs, but Write method doesn't ignore
|
||||
// them since it may lead to an unexpected behaviour (see https://github.com/VictoriaMetrics/VictoriaMetrics/pull/8157)
|
||||
func (bw *Writer) Flush() error {
|
||||
bw.lock.Lock()
|
||||
defer bw.lock.Unlock()
|
||||
if bw.err != nil {
|
||||
if netutil.IsTrivialNetworkError(bw.err) {
|
||||
return nil
|
||||
}
|
||||
return bw.err
|
||||
}
|
||||
if err := bw.bw.Flush(); err != nil && !netutil.IsTrivialNetworkError(err) {
|
||||
if err := bw.bw.Flush(); err != nil {
|
||||
bw.err = fmt.Errorf("cannot flush data to client: %w", err)
|
||||
if netutil.IsTrivialNetworkError(err) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return bw.err
|
||||
}
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
// AvailableCPUs returns the number of available CPU cores for the app.
|
||||
|
@ -44,6 +46,9 @@ func updateGOMAXPROCSToCPUQuota(cpuQuota float64) {
|
|||
if gomaxprocs <= 0 {
|
||||
gomaxprocs = 1
|
||||
}
|
||||
if cpuQuota > float64(gomaxprocs) {
|
||||
logger.Warnf("rounding CPU quota %.1f to %d CPUs for performance reasons - see https://docs.victoriametrics.com/bestpractices/#kubernetes", cpuQuota, gomaxprocs)
|
||||
}
|
||||
|
||||
numCPU := runtime.NumCPU()
|
||||
if gomaxprocs > numCPU {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fsutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
@ -242,9 +242,9 @@ func (w *Writer) MustClose() {
|
|||
putBufioWriter(w.bw)
|
||||
w.bw = nil
|
||||
|
||||
if !envutil.IsFsyncDisabled() {
|
||||
if !fsutil.IsFsyncDisabled() {
|
||||
if err := w.f.Sync(); err != nil {
|
||||
logger.Panicf("FATAL: cannot sync file %q: %d", w.f.Name(), err)
|
||||
logger.Panicf("FATAL: cannot sync file %q: %s", w.f.Name(), err)
|
||||
}
|
||||
}
|
||||
if err := w.st.close(); err != nil {
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fsutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
@ -31,7 +31,7 @@ func mustSyncPath(path string) {
|
|||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot open file for fsync: %s", err)
|
||||
}
|
||||
if !envutil.IsFsyncDisabled() {
|
||||
if !fsutil.IsFsyncDisabled() {
|
||||
if err := d.Sync(); err != nil {
|
||||
_ = d.Close()
|
||||
logger.Panicf("FATAL: cannot flush %q to storage: %s", path, err)
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs/fsutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
@ -24,7 +24,7 @@ func mustSyncPath(path string) {
|
|||
if err != nil {
|
||||
logger.Panicf("FATAL: cannot open file for fsync: %s", err)
|
||||
}
|
||||
if !envutil.IsFsyncDisabled() {
|
||||
if !fsutil.IsFsyncDisabled() {
|
||||
if err := d.Sync(); err != nil {
|
||||
_ = d.Close()
|
||||
logger.Panicf("FATAL: cannot flush %q to storage: %s", path, err)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package envutil
|
||||
package fsutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
@ -14,6 +14,12 @@ import (
|
|||
// The fsync is enabled for ordinary programs. It can be disabled by setting DISABLE_FSYNC_FOR_TESTING
|
||||
// environment variable to true.
|
||||
func IsFsyncDisabled() bool {
|
||||
return isFsyncDisabled
|
||||
}
|
||||
|
||||
var isFsyncDisabled = isFsyncDisabledInternal()
|
||||
|
||||
func isFsyncDisabledInternal() bool {
|
||||
s := os.Getenv("DISABLE_FSYNC_FOR_TESTING")
|
||||
if s == "" {
|
||||
return testing.Testing()
|
|
@ -1,18 +1,18 @@
|
|||
package envutil
|
||||
package fsutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIsFsyncDisabled(t *testing.T) {
|
||||
func TestIsFsyncDisabledInternal(t *testing.T) {
|
||||
f := func(envVarValue string, resultExpected bool) {
|
||||
t.Helper()
|
||||
|
||||
os.Setenv("DISABLE_FSYNC_FOR_TESTING", envVarValue)
|
||||
defer os.Unsetenv("DISABLE_FSYNC_FOR_TESTING")
|
||||
|
||||
result := IsFsyncDisabled()
|
||||
result := isFsyncDisabledInternal()
|
||||
if result != resultExpected {
|
||||
t.Errorf("unexpected value for DISABLE_FSYNC_FOR_TESTING=%q; got %v; want %v", envVarValue, result, resultExpected)
|
||||
}
|
|
@ -29,10 +29,10 @@ const maxRowsPerBlock = 8 * 1024 * 1024
|
|||
// in excess memory usage during data ingestion and significant slowdown during query execution.
|
||||
const maxColumnsPerBlock = 2_000
|
||||
|
||||
// MaxFieldNameSize is the maximum size in bytes for field name.
|
||||
// maxFieldNameSize is the maximum size in bytes for field name.
|
||||
//
|
||||
// Longer field names are truncated during data ingestion to MaxFieldNameSize length.
|
||||
const MaxFieldNameSize = 128
|
||||
// Log entries with longer field names are rejected during data ingestion.
|
||||
const maxFieldNameSize = 128
|
||||
|
||||
// maxConstColumnValueSize is the maximum size in bytes for const column value.
|
||||
//
|
||||
|
|
|
@ -65,15 +65,6 @@ func (sf *sortedFields) Swap(i, j int) {
|
|||
a[i], a[j] = a[j], a[i]
|
||||
}
|
||||
|
||||
// RowFormatter implementes fmt.Stringer for []Field aka a single log row
|
||||
type RowFormatter []Field
|
||||
|
||||
// String returns user-readable representation for rf
|
||||
func (rf *RowFormatter) String() string {
|
||||
result := MarshalFieldsToJSON(nil, *rf)
|
||||
return string(result)
|
||||
}
|
||||
|
||||
// Reset resets lr with all its settings.
|
||||
//
|
||||
// Call ResetKeepSettings() for resetting lr without resetting its settings.
|
||||
|
@ -142,23 +133,33 @@ func (lr *LogRows) NeedFlush() bool {
|
|||
// It is OK to modify the args after returning from the function,
|
||||
// since lr copies all the args to internal data.
|
||||
//
|
||||
// Field names longer than MaxFieldNameSize are automatically truncated to MaxFieldNameSize length.
|
||||
//
|
||||
// Log entries with too big number of fields are ignored.
|
||||
// Loo long log entries are ignored.
|
||||
// Log entries are dropped with the warning message in the following cases:
|
||||
// - if there are too many log fields
|
||||
// - if there are too long log field names
|
||||
// - if the total length of log entries is too long
|
||||
func (lr *LogRows) MustAdd(tenantID TenantID, timestamp int64, fields, streamFields []Field) {
|
||||
// Verify that the log entry doesn't exceed limits.
|
||||
if len(fields) > maxColumnsPerBlock {
|
||||
fieldNames := make([]string, len(fields))
|
||||
for i, f := range fields {
|
||||
fieldNames[i] = f.Name
|
||||
}
|
||||
logger.Infof("ignoring log entry with too big number of fields, which exceeds %d; fieldNames=%q", maxColumnsPerBlock, fieldNames)
|
||||
line := MarshalFieldsToJSON(nil, fields)
|
||||
logger.Warnf("ignoring log entry with too big number of fields %d, since it exceeds the limit %d; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#how-many-fields-a-single-log-entry-may-contain ; log entry: %s", len(fields), maxColumnsPerBlock, line)
|
||||
return
|
||||
}
|
||||
for i := range fields {
|
||||
fieldName := fields[i].Name
|
||||
if len(fieldName) > maxFieldNameSize {
|
||||
line := MarshalFieldsToJSON(nil, fields)
|
||||
logger.Warnf("ignoring log entry with too long field name %q, since its length (%d) exceeds the limit %d bytes; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#what-is-the-maximum-supported-field-name-length ; log entry: %s",
|
||||
fieldName, len(fieldName), maxFieldNameSize, line)
|
||||
return
|
||||
}
|
||||
}
|
||||
rowLen := uncompressedRowSizeBytes(fields)
|
||||
if rowLen > maxUncompressedBlockSize {
|
||||
logger.Infof("ignoring too long log record with the estimated size %d bytes, since it exceeds the limit %d; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#what-length-a-log-record-is-expected-to-have", rowLen, maxUncompressedBlockSize)
|
||||
line := MarshalFieldsToJSON(nil, fields)
|
||||
logger.Warnf("ignoring too long log entry with the estimated length of %d bytes, since it exceeds the limit %d bytes; "+
|
||||
"see https://docs.victoriametrics.com/victorialogs/faq/#what-length-a-log-record-is-expected-to-have ; log entry: %s", rowLen, maxUncompressedBlockSize, line)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -248,9 +249,6 @@ func (lr *LogRows) addFieldsInternal(fields []Field, ignoreFields map[string]str
|
|||
dstField := &fb[len(fb)-1]
|
||||
|
||||
fieldName := f.Name
|
||||
if len(fieldName) > MaxFieldNameSize {
|
||||
fieldName = fieldName[:MaxFieldNameSize]
|
||||
}
|
||||
if fieldName == "_msg" {
|
||||
fieldName = ""
|
||||
hasMsgField = true
|
||||
|
@ -267,20 +265,21 @@ func (lr *LogRows) addFieldsInternal(fields []Field, ignoreFields map[string]str
|
|||
func (lr *LogRows) GetRowString(idx int) string {
|
||||
tf := TimeFormatter(lr.timestamps[idx])
|
||||
streamTags := getStreamTagsString(lr.streamTagsCanonicals[idx])
|
||||
var rf RowFormatter
|
||||
rf = append(rf[:0], lr.rows[idx]...)
|
||||
rf = append(rf, Field{
|
||||
var fields []Field
|
||||
fields = append(fields[:0], lr.rows[idx]...)
|
||||
fields = append(fields, Field{
|
||||
Name: "_time",
|
||||
Value: tf.String(),
|
||||
})
|
||||
rf = append(rf, Field{
|
||||
fields = append(fields, Field{
|
||||
Name: "_stream",
|
||||
Value: streamTags,
|
||||
})
|
||||
sort.Slice(rf, func(i, j int) bool {
|
||||
return rf[i].Name < rf[j].Name
|
||||
sort.Slice(fields, func(i, j int) bool {
|
||||
return fields[i].Name < fields[j].Name
|
||||
})
|
||||
return rf.String()
|
||||
line := MarshalFieldsToJSON(nil, fields)
|
||||
return string(line)
|
||||
}
|
||||
|
||||
// GetLogRows returns LogRows from the pool for the given streamFields.
|
||||
|
|
|
@ -147,8 +147,8 @@ func (pt *partition) mustAddRows(lr *LogRows) {
|
|||
|
||||
func (pt *partition) logNewStream(streamTagsCanonical []byte, fields []Field) {
|
||||
streamTags := getStreamTagsString(streamTagsCanonical)
|
||||
rf := RowFormatter(fields)
|
||||
logger.Infof("partition %s: new stream %s for log entry %s", pt.path, streamTags, &rf)
|
||||
line := MarshalFieldsToJSON(nil, fields)
|
||||
logger.Infof("partition %s: new stream %s for log entry %s", pt.path, streamTags, line)
|
||||
}
|
||||
|
||||
func (pt *partition) logIngestedRows(lr *LogRows) {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
|
||||
|
@ -268,12 +269,17 @@ func MustOpenStorage(path string, cfg *StorageConfig) *Storage {
|
|||
// Open partitions in parallel. This should improve VictoriaLogs initializiation duration
|
||||
// when it opens many partitions.
|
||||
var wg sync.WaitGroup
|
||||
concurrencyLimiterCh := make(chan struct{}, cgroup.AvailableCPUs())
|
||||
for i, de := range des {
|
||||
fname := de.Name()
|
||||
|
||||
wg.Add(1)
|
||||
concurrencyLimiterCh <- struct{}{}
|
||||
go func(idx int) {
|
||||
defer wg.Done()
|
||||
defer func() {
|
||||
<-concurrencyLimiterCh
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
t, err := time.Parse(partitionNameFormat, fname)
|
||||
if err != nil {
|
||||
|
@ -538,22 +544,22 @@ func (s *Storage) MustAddRows(lr *LogRows) {
|
|||
for i, ts := range lr.timestamps {
|
||||
day := ts / nsecsPerDay
|
||||
if day < minAllowedDay {
|
||||
rf := RowFormatter(lr.rows[i])
|
||||
line := MarshalFieldsToJSON(nil, lr.rows[i])
|
||||
tsf := TimeFormatter(ts)
|
||||
minAllowedTsf := TimeFormatter(minAllowedDay * nsecsPerDay)
|
||||
tooSmallTimestampLogger.Warnf("skipping log entry with too small timestamp=%s; it must be bigger than %s according "+
|
||||
"to the configured -retentionPeriod=%dd. See https://docs.victoriametrics.com/victorialogs/#retention ; "+
|
||||
"log entry: %s", &tsf, &minAllowedTsf, durationToDays(s.retention), &rf)
|
||||
"log entry: %s", &tsf, &minAllowedTsf, durationToDays(s.retention), line)
|
||||
s.rowsDroppedTooSmallTimestamp.Add(1)
|
||||
continue
|
||||
}
|
||||
if day > maxAllowedDay {
|
||||
rf := RowFormatter(lr.rows[i])
|
||||
line := MarshalFieldsToJSON(nil, lr.rows[i])
|
||||
tsf := TimeFormatter(ts)
|
||||
maxAllowedTsf := TimeFormatter(maxAllowedDay * nsecsPerDay)
|
||||
tooBigTimestampLogger.Warnf("skipping log entry with too big timestamp=%s; it must be smaller than %s according "+
|
||||
"to the configured -futureRetention=%dd; see https://docs.victoriametrics.com/victorialogs/#retention ; "+
|
||||
"log entry: %s", &tsf, &maxAllowedTsf, durationToDays(s.futureRetention), &rf)
|
||||
"log entry: %s", &tsf, &maxAllowedTsf, durationToDays(s.futureRetention), line)
|
||||
s.rowsDroppedTooBigTimestamp.Add(1)
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
|
@ -523,12 +524,31 @@ func mustOpenPartitions(smallPartitionsPath, bigPartitionsPath string, s *Storag
|
|||
mustPopulatePartitionNames(smallPartitionsPath, ptNames)
|
||||
mustPopulatePartitionNames(bigPartitionsPath, ptNames)
|
||||
var pts []*partition
|
||||
var ptsLock sync.Mutex
|
||||
|
||||
// Open partitions in parallel. This should reduce the time needed for opening multiple partitions.
|
||||
var wg sync.WaitGroup
|
||||
concurrencyLimiterCh := make(chan struct{}, cgroup.AvailableCPUs())
|
||||
for ptName := range ptNames {
|
||||
smallPartsPath := filepath.Join(smallPartitionsPath, ptName)
|
||||
bigPartsPath := filepath.Join(bigPartitionsPath, ptName)
|
||||
pt := mustOpenPartition(smallPartsPath, bigPartsPath, s)
|
||||
pts = append(pts, pt)
|
||||
wg.Add(1)
|
||||
concurrencyLimiterCh <- struct{}{}
|
||||
go func(ptName string) {
|
||||
defer func() {
|
||||
<-concurrencyLimiterCh
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
smallPartsPath := filepath.Join(smallPartitionsPath, ptName)
|
||||
bigPartsPath := filepath.Join(bigPartitionsPath, ptName)
|
||||
pt := mustOpenPartition(smallPartsPath, bigPartsPath, s)
|
||||
|
||||
ptsLock.Lock()
|
||||
pts = append(pts, pt)
|
||||
ptsLock.Unlock()
|
||||
}(ptName)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return pts
|
||||
}
|
||||
|
||||
|
|
10
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md
generated
vendored
10
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md
generated
vendored
|
@ -1,5 +1,15 @@
|
|||
# Breaking Changes
|
||||
|
||||
## v1.8.0
|
||||
|
||||
### New errors from `NewManagedIdentityCredential` in some environments
|
||||
|
||||
`NewManagedIdentityCredential` now returns an error when `ManagedIdentityCredentialOptions.ID` is set in a hosting environment whose managed identity API doesn't support user-assigned identities. `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases. Returning an error instead prevents the credential authenticating an unexpected identity. The affected hosting environments are:
|
||||
* Azure Arc
|
||||
* Azure ML (when a resource or object ID is specified; client IDs are supported)
|
||||
* Cloud Shell
|
||||
* Service Fabric
|
||||
|
||||
## v1.6.0
|
||||
|
||||
### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios
|
||||
|
|
14
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
generated
vendored
14
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
generated
vendored
|
@ -1,5 +1,19 @@
|
|||
# Release History
|
||||
|
||||
## 1.8.1 (2025-01-15)
|
||||
|
||||
### Bugs Fixed
|
||||
* User credential types inconsistently log access token scopes
|
||||
* `DefaultAzureCredential` skips managed identity in Azure Container Instances
|
||||
* Credentials having optional tenant IDs such as `AzureCLICredential` and
|
||||
`InteractiveBrowserCredential` require setting `AdditionallyAllowedTenants`
|
||||
when used with some clients
|
||||
|
||||
### Other Changes
|
||||
* `ChainedTokenCredential` and `DefaultAzureCredential` continue to their next
|
||||
credential after `ManagedIdentityCredential` receives an unexpected response
|
||||
from IMDS, indicating the response is from something else such as a proxy
|
||||
|
||||
## 1.8.0 (2024-10-08)
|
||||
|
||||
### Other Changes
|
||||
|
|
24
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
generated
vendored
24
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
generated
vendored
|
@ -54,17 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID.
|
|||
|
||||
### DefaultAzureCredential
|
||||
|
||||
`DefaultAzureCredential` simplifies authentication while developing applications that deploy to Azure by combining credentials used in Azure hosting environments and credentials used in local development. In production, it's better to use a specific credential type so authentication is more predictable and easier to debug. `DefaultAzureCredential` attempts to authenticate via the following mechanisms in this order, stopping when one succeeds:
|
||||
|
||||

|
||||
|
||||
1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate.
|
||||
1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity.
|
||||
1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it.
|
||||
1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity.
|
||||
1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account.
|
||||
|
||||
> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types.
|
||||
`DefaultAzureCredential` simplifies authentication while developing apps that deploy to Azure by combining credentials used in Azure hosting environments with credentials used in local development. For more information, see [DefaultAzureCredential overview][dac_overview].
|
||||
|
||||
## Managed Identity
|
||||
|
||||
|
@ -128,10 +118,10 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|
|||
|
||||
### Credential chains
|
||||
|
||||
|Credential|Usage
|
||||
|-|-
|
||||
|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps
|
||||
|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials
|
||||
|Credential|Usage|Reference
|
||||
|-|-|-
|
||||
|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps|[DefaultAzureCredential overview][dac_overview]|
|
||||
|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials|[ChainedTokenCredential overview][ctc_overview]|
|
||||
|
||||
### Authenticating Azure-Hosted Applications
|
||||
|
||||
|
@ -260,4 +250,8 @@ For more information, see the
|
|||
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
|
||||
additional questions or comments.
|
||||
|
||||
<!-- LINKS -->
|
||||
[ctc_overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview
|
||||
[dac_overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview
|
||||
|
||||

|
||||
|
|
14
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
generated
vendored
14
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
generated
vendored
|
@ -22,13 +22,13 @@ Some credential types support opt-in persistent token caching (see [the below ta
|
|||
|
||||
Persistent caches are encrypted at rest using a mechanism that depends on the operating system:
|
||||
|
||||
| Operating system | Encryption facility |
|
||||
|------------------|---------------------------------------|
|
||||
| Linux | kernel key retention service (keyctl) |
|
||||
| macOS | Keychain |
|
||||
| Windows | Data Protection API (DPAPI) |
|
||||
| Operating system | Encryption facility |
|
||||
| ---------------- | ---------------------------------------------- |
|
||||
| Linux | kernel key retention service (keyctl) |
|
||||
| macOS | Keychain (requires cgo and native build tools) |
|
||||
| Windows | Data Protection API (DPAPI) |
|
||||
|
||||
Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the [package documentation][example] for example code showing how to configure persistent caching and access cached data.
|
||||
Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example].
|
||||
|
||||
### Credentials supporting token caching
|
||||
|
||||
|
@ -37,7 +37,7 @@ The following table indicates the state of in-memory and persistent caching in e
|
|||
**Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example].
|
||||
|
||||
| Credential | In-memory token caching | Persistent token caching |
|
||||
|--------------------------------|---------------------------------------------------------------------|--------------------------|
|
||||
| ------------------------------ | ------------------------------------------------------------------- | ------------------------ |
|
||||
| `AzureCLICredential` | Not Supported | Not Supported |
|
||||
| `AzureDeveloperCLICredential` | Not Supported | Not Supported |
|
||||
| `AzurePipelinesCredential` | Supported | Supported |
|
||||
|
|
24
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
generated
vendored
24
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
generated
vendored
|
@ -8,6 +8,7 @@ This troubleshooting guide covers failure investigation techniques, common error
|
|||
- [Permission issues](#permission-issues)
|
||||
- [Find relevant information in errors](#find-relevant-information-in-errors)
|
||||
- [Enable and configure logging](#enable-and-configure-logging)
|
||||
- [Troubleshoot persistent token caching issues](#troubleshoot-persistent-token-caching-issues)
|
||||
- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues)
|
||||
- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues)
|
||||
- [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues)
|
||||
|
@ -236,6 +237,29 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul
|
|||
| No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.|
|
||||
|401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).|
|
||||
|
||||
## Troubleshoot persistent token caching issues
|
||||
|
||||
### macOS
|
||||
|
||||
[azidentity/cache](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache) encrypts persistent caches with the system Keychain on macOS. You may see build and runtime errors there because calling the Keychain API requires cgo and macOS prohibits Keychain access in some scenarios.
|
||||
|
||||
#### Build errors
|
||||
|
||||
Build errors about undefined `accessor` symbols indicate that cgo wasn't enabled. For example:
|
||||
```
|
||||
$ GOOS=darwin go build
|
||||
# github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache
|
||||
../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:19: undefined: accessor.New
|
||||
../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:38: undefined: accessor.WithAccount
|
||||
```
|
||||
|
||||
Try `go build` again with `CGO_ENABLED=1`. You may need to install native build tools.
|
||||
|
||||
#### Runtime errors
|
||||
|
||||
macOS prohibits Keychain access from environments without a GUI such as SSH sessions. If your application calls the persistent cache constructor ([cache.New](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache#New)) from an SSH session on a macOS host, you'll see an error like
|
||||
`persistent storage isn't available due to error "User interaction is not allowed. (-25308)"`. This doesn't mean authentication is impossible, only that credentials can't persist data and the application must reauthenticate the next time it runs.
|
||||
|
||||
## Get additional help
|
||||
|
||||
Additional information on ways to reach out for support can be found in [SUPPORT.md](https://github.com/Azure/azure-sdk-for-go/blob/main/SUPPORT.md).
|
||||
|
|
24
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
generated
vendored
24
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
generated
vendored
|
@ -42,6 +42,8 @@ const (
|
|||
developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46"
|
||||
defaultSuffix = "/.default"
|
||||
|
||||
scopeLogFmt = "%s.GetToken() acquired a token for scope %q"
|
||||
|
||||
traceNamespace = "Microsoft.Entra"
|
||||
traceOpGetToken = "GetToken"
|
||||
traceOpAuthenticate = "Authenticate"
|
||||
|
@ -103,7 +105,16 @@ func resolveAdditionalTenants(tenants []string) []string {
|
|||
return cp
|
||||
}
|
||||
|
||||
// resolveTenant returns the correct tenant for a token request
|
||||
// resolveTenant returns the correct tenant for a token request, or "" when the calling credential doesn't
|
||||
// have an explicitly configured tenant and the caller didn't specify a tenant for the token request.
|
||||
//
|
||||
// - defaultTenant: tenant set when constructing the credential, if any. "" is valid for credentials
|
||||
// having an optional or implicit tenant such as dev tool and interactive user credentials. Those
|
||||
// default to the tool's configured tenant or the user's home tenant, respectively.
|
||||
// - specified: tenant specified for this token request i.e., TokenRequestOptions.TenantID. May be "".
|
||||
// - credName: name of the calling credential type; for error messages
|
||||
// - additionalTenants: optional allow list of tenants the credential may acquire tokens from in
|
||||
// addition to defaultTenant i.e., the credential's AdditionallyAllowedTenants option
|
||||
func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) {
|
||||
if specified == "" || specified == defaultTenant {
|
||||
return defaultTenant, nil
|
||||
|
@ -119,6 +130,17 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants
|
|||
return specified, nil
|
||||
}
|
||||
}
|
||||
if len(additionalTenants) == 0 {
|
||||
switch defaultTenant {
|
||||
case "", organizationsTenantID:
|
||||
// The application didn't specify a tenant or allow list when constructing the credential. Allow the
|
||||
// tenant specified for this token request because we have nothing to compare it to (i.e., it vacuously
|
||||
// satisfies the credential's configuration); don't know whether the application is multitenant; and
|
||||
// don't want to return an error in the common case that the specified tenant matches the credential's
|
||||
// default tenant determined elsewhere e.g., in some dev tool's configuration.
|
||||
return specified, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified)
|
||||
}
|
||||
|
||||
|
|
6
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
generated
vendored
6
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
generated
vendored
|
@ -30,9 +30,9 @@ type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscrip
|
|||
|
||||
// AzureCLICredentialOptions contains optional parameters for AzureCLICredential.
|
||||
type AzureCLICredentialOptions struct {
|
||||
// AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition
|
||||
// to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
|
||||
// logged in account can access.
|
||||
// AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
|
||||
// TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
|
||||
// any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
|
||||
AdditionallyAllowedTenants []string
|
||||
|
||||
// Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other
|
||||
|
|
|
@ -30,9 +30,9 @@ type azdTokenProvider func(ctx context.Context, scopes []string, tenant string)
|
|||
|
||||
// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential.
|
||||
type AzureDeveloperCLICredentialOptions struct {
|
||||
// AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition
|
||||
// to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
|
||||
// logged in account can access.
|
||||
// AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
|
||||
// TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
|
||||
// any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
|
||||
AdditionallyAllowedTenants []string
|
||||
|
||||
// TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment,
|
||||
|
|
8
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
generated
vendored
8
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
generated
vendored
|
@ -27,7 +27,10 @@ type ChainedTokenCredentialOptions struct {
|
|||
}
|
||||
|
||||
// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default,
|
||||
// it tries all the credentials until one authenticates, after which it always uses that credential.
|
||||
// it tries all the credentials until one authenticates, after which it always uses that credential. For more information,
|
||||
// see [ChainedTokenCredential overview].
|
||||
//
|
||||
// [ChainedTokenCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview
|
||||
type ChainedTokenCredential struct {
|
||||
cond *sync.Cond
|
||||
iterating bool
|
||||
|
@ -46,6 +49,9 @@ func NewChainedTokenCredential(sources []azcore.TokenCredential, options *Chaine
|
|||
if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil
|
||||
return nil, errors.New("sources cannot contain nil")
|
||||
}
|
||||
if mc, ok := source.(*ManagedIdentityCredential); ok {
|
||||
mc.mic.chained = true
|
||||
}
|
||||
}
|
||||
cp := make([]azcore.TokenCredential, len(sources))
|
||||
copy(cp, sources)
|
||||
|
|
15
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
generated
vendored
15
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
generated
vendored
|
@ -26,27 +26,16 @@ extends:
|
|||
parameters:
|
||||
CloudConfig:
|
||||
Public:
|
||||
ServiceConnection: azure-sdk-tests
|
||||
SubscriptionConfigurationFilePaths:
|
||||
- eng/common/TestResources/sub-config/AzurePublicMsft.json
|
||||
SubscriptionConfigurations:
|
||||
- $(sub-config-azure-cloud-test-resources)
|
||||
- $(sub-config-identity-test-resources)
|
||||
EnableRaceDetector: true
|
||||
Location: westus2
|
||||
RunLiveTests: true
|
||||
ServiceDirectory: azidentity
|
||||
UsePipelineProxy: false
|
||||
|
||||
${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}:
|
||||
PreSteps:
|
||||
- task: AzureCLI@2
|
||||
displayName: Set OIDC token
|
||||
inputs:
|
||||
addSpnToEnvironment: true
|
||||
azureSubscription: azure-sdk-tests
|
||||
inlineScript: Write-Host "##vso[task.setvariable variable=OIDC_TOKEN;]$($env:idToken)"
|
||||
scriptLocation: inlineScript
|
||||
scriptType: pscore
|
||||
PersistOidcToken: true
|
||||
MatrixConfigs:
|
||||
- Name: managed_identity_matrix
|
||||
GenerateVMJobs: true
|
||||
|
|
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
generated
vendored
|
@ -115,7 +115,7 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque
|
|||
err = newAuthenticationFailedErrorFromMSAL(c.name, err)
|
||||
}
|
||||
} else {
|
||||
msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", "))
|
||||
msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", "))
|
||||
log.Write(EventAuthentication, msg)
|
||||
}
|
||||
return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
|
||||
|
|
14
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
generated
vendored
14
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
generated
vendored
|
@ -23,15 +23,19 @@ type DefaultAzureCredentialOptions struct {
|
|||
// to credential types that authenticate via external tools such as the Azure CLI.
|
||||
azcore.ClientOptions
|
||||
|
||||
// AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add
|
||||
// the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be
|
||||
// set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS.
|
||||
// AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
|
||||
// TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
|
||||
// any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
|
||||
// This value can also be set as a semicolon delimited list of tenants in the environment variable
|
||||
// AZURE_ADDITIONALLY_ALLOWED_TENANTS.
|
||||
AdditionallyAllowedTenants []string
|
||||
|
||||
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
|
||||
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
|
||||
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
|
||||
// the application responsible for ensuring the configured authority is valid and trustworthy.
|
||||
DisableInstanceDiscovery bool
|
||||
|
||||
// TenantID sets the default tenant for authentication via the Azure CLI and workload identity.
|
||||
TenantID string
|
||||
}
|
||||
|
@ -39,7 +43,7 @@ type DefaultAzureCredentialOptions struct {
|
|||
// DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by
|
||||
// combining credentials used in Azure hosting environments and credentials used in local development. In
|
||||
// production, it's better to use a specific credential type so authentication is more predictable and easier
|
||||
// to debug.
|
||||
// to debug. For more information, see [DefaultAzureCredential overview].
|
||||
//
|
||||
// DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order,
|
||||
// stopping when one provides a token:
|
||||
|
@ -55,6 +59,8 @@ type DefaultAzureCredentialOptions struct {
|
|||
// Consult the documentation for these credential types for more information on how they authenticate.
|
||||
// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for
|
||||
// every subsequent authentication.
|
||||
//
|
||||
// [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview
|
||||
type DefaultAzureCredential struct {
|
||||
chain *ChainedTokenCredential
|
||||
}
|
||||
|
|
5
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
generated
vendored
5
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
generated
vendored
|
@ -21,8 +21,9 @@ const credNameDeviceCode = "DeviceCodeCredential"
|
|||
type DeviceCodeCredentialOptions struct {
|
||||
azcore.ClientOptions
|
||||
|
||||
// AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
|
||||
// tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
|
||||
// AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
|
||||
// TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
|
||||
// any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
|
||||
AdditionallyAllowedTenants []string
|
||||
|
||||
// AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
|
||||
|
|
|
@ -20,8 +20,9 @@ const credNameBrowser = "InteractiveBrowserCredential"
|
|||
type InteractiveBrowserCredentialOptions struct {
|
||||
azcore.ClientOptions
|
||||
|
||||
// AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
|
||||
// tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
|
||||
// AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
|
||||
// TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
|
||||
// any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
|
||||
AdditionallyAllowedTenants []string
|
||||
|
||||
// AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
|
||||
|
|
39
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
generated
vendored
39
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
generated
vendored
|
@ -65,6 +65,9 @@ type managedIdentityClient struct {
|
|||
id ManagedIDKind
|
||||
msiType msiType
|
||||
probeIMDS bool
|
||||
// chained indicates whether the client is part of a credential chain. If true, the client will return
|
||||
// a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response.
|
||||
chained bool
|
||||
}
|
||||
|
||||
// arcKeyDirectory returns the directory expected to contain Azure Arc keys
|
||||
|
@ -144,7 +147,7 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
|
|||
if _, ok := os.LookupEnv(identityHeader); ok {
|
||||
if _, ok := os.LookupEnv(identityServerThumbprint); ok {
|
||||
if options.ID != nil {
|
||||
return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned managed identity at runtime")
|
||||
return nil, errors.New("the Service Fabric API doesn't support specifying a user-assigned identity at runtime. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi")
|
||||
}
|
||||
env = "Service Fabric"
|
||||
c.endpoint = endpoint
|
||||
|
@ -215,6 +218,7 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
|
|||
// no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client,
|
||||
// and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block
|
||||
if c.probeIMDS {
|
||||
// send a malformed request (no Metadata header) to IMDS to determine whether the endpoint is available
|
||||
cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout)
|
||||
defer cancel()
|
||||
cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1})
|
||||
|
@ -222,24 +226,14 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
|
|||
if err != nil {
|
||||
return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err)
|
||||
}
|
||||
res, err := c.azClient.Pipeline().Do(req)
|
||||
if err != nil {
|
||||
if _, err = c.azClient.Pipeline().Do(req); err != nil {
|
||||
msg := err.Error()
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information"
|
||||
}
|
||||
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, msg)
|
||||
}
|
||||
// because IMDS always responds with JSON, assume a non-JSON response is from something else, such
|
||||
// as a proxy, and return credentialUnavailableError so DefaultAzureCredential continues iterating
|
||||
b, err := azruntime.Payload(res)
|
||||
if err != nil {
|
||||
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("failed to read IMDS probe response: %s", err))
|
||||
}
|
||||
if !json.Valid(b) {
|
||||
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, "unexpected response to IMDS probe")
|
||||
}
|
||||
// send normal token requests from now on because IMDS responded
|
||||
// send normal token requests from now on because something responded
|
||||
c.probeIMDS = false
|
||||
}
|
||||
|
||||
|
@ -254,13 +248,21 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
|
|||
}
|
||||
|
||||
if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) {
|
||||
return c.createAccessToken(resp)
|
||||
tk, err := c.createAccessToken(resp)
|
||||
if err != nil && c.chained && c.msiType == msiTypeIMDS {
|
||||
// failure to unmarshal a 2xx implies the response is from something other than IMDS such as a proxy listening at
|
||||
// the same address. Return a credentialUnavailableError so credential chains continue to their next credential
|
||||
err = newCredentialUnavailableError(credNameManagedIdentity, err.Error())
|
||||
}
|
||||
return tk, err
|
||||
}
|
||||
|
||||
if c.msiType == msiTypeIMDS {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusBadRequest:
|
||||
if id != nil {
|
||||
// return authenticationFailedError, halting any encompassing credential chain,
|
||||
// because the explicit user-assigned identity implies the developer expected this to work
|
||||
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp)
|
||||
}
|
||||
msg := "failed to authenticate a system assigned identity"
|
||||
|
@ -276,6 +278,13 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
|
|||
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, fmt.Sprintf("unexpected response %q", string(body)))
|
||||
}
|
||||
}
|
||||
if c.chained {
|
||||
// the response may be from something other than IMDS, for example a proxy returning
|
||||
// 404. Return credentialUnavailableError so credential chains continue to their
|
||||
// next credential, include the response in the error message to help debugging
|
||||
err = newAuthenticationFailedError(credNameManagedIdentity, "", resp)
|
||||
return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "", resp)
|
||||
|
@ -290,7 +299,7 @@ func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.Ac
|
|||
ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string
|
||||
}{}
|
||||
if err := azruntime.UnmarshalAsJSON(res, &value); err != nil {
|
||||
return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err)
|
||||
return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "Unexpected response content", res)
|
||||
}
|
||||
if value.ExpiresIn != "" {
|
||||
expiresIn, err := json.Number(value.ExpiresIn).Int64()
|
||||
|
|
9
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
generated
vendored
9
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
generated
vendored
|
@ -154,12 +154,7 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti
|
|||
if p.opts.DisableAutomaticAuthentication {
|
||||
return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro)
|
||||
}
|
||||
at, err := p.reqToken(ctx, client, tro)
|
||||
if err == nil {
|
||||
msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", "))
|
||||
log.Write(EventAuthentication, msg)
|
||||
}
|
||||
return at, err
|
||||
return p.reqToken(ctx, client, tro)
|
||||
}
|
||||
|
||||
// reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache.
|
||||
|
@ -242,6 +237,8 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) {
|
|||
|
||||
func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) {
|
||||
if err == nil {
|
||||
msg := fmt.Sprintf(scopeLogFmt, p.name, strings.Join(ar.GrantedScopes, ", "))
|
||||
log.Write(EventAuthentication, msg)
|
||||
p.record, err = newAuthenticationRecord(ar)
|
||||
} else {
|
||||
err = newAuthenticationFailedErrorFromMSAL(p.name, err)
|
||||
|
|
16
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
generated
vendored
16
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
generated
vendored
|
@ -7,6 +7,10 @@ param (
|
|||
[hashtable] $AdditionalParameters = @{},
|
||||
[hashtable] $DeploymentOutputs,
|
||||
|
||||
[Parameter(Mandatory = $true)]
|
||||
[ValidateNotNullOrEmpty()]
|
||||
[string] $SubscriptionId,
|
||||
|
||||
[Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)]
|
||||
[ValidateNotNullOrEmpty()]
|
||||
[string] $TenantId,
|
||||
|
@ -15,6 +19,10 @@ param (
|
|||
[ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')]
|
||||
[string] $TestApplicationId,
|
||||
|
||||
[Parameter(Mandatory = $true)]
|
||||
[ValidateNotNullOrEmpty()]
|
||||
[string] $Environment,
|
||||
|
||||
# Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors).
|
||||
[Parameter(ValueFromRemainingArguments = $true)]
|
||||
$RemainingArguments
|
||||
|
@ -28,8 +36,9 @@ if ($CI) {
|
|||
Write-Host "Skipping post-provisioning script because resources weren't deployed"
|
||||
return
|
||||
}
|
||||
az login --federated-token $env:OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId
|
||||
az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID']
|
||||
az cloud set -n $Environment
|
||||
az login --federated-token $env:ARM_OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId
|
||||
az account set --subscription $SubscriptionId
|
||||
}
|
||||
|
||||
Write-Host "Building container"
|
||||
|
@ -62,6 +71,9 @@ $aciName = "azidentity-test"
|
|||
az container create -g $rg -n $aciName --image $image `
|
||||
--acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
|
||||
--assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
|
||||
--cpu 1 `
|
||||
--memory 1.0 `
|
||||
--os-type Linux `
|
||||
--role "Storage Blob Data Reader" `
|
||||
--scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) `
|
||||
-e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) `
|
||||
|
|
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
generated
vendored
2
vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
generated
vendored
|
@ -14,5 +14,5 @@ const (
|
|||
module = "github.com/Azure/azure-sdk-for-go/sdk/" + component
|
||||
|
||||
// Version is the semantic version (see http://semver.org) of this module.
|
||||
version = "v1.8.0"
|
||||
version = "v1.8.1"
|
||||
)
|
||||
|
|
15
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
generated
vendored
15
vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
generated
vendored
|
@ -1,10 +1,23 @@
|
|||
# Release History
|
||||
|
||||
## 1.6.0 (2025-01-23)
|
||||
|
||||
### Features Added
|
||||
* Upgraded service version to `2025-01-05`.
|
||||
|
||||
## 1.6.0-beta.1 (2025-01-13)
|
||||
|
||||
### Features Added
|
||||
* Added permissions & resourcetype parameters in listblob response.
|
||||
* Added BlobProperties field in BlobPrefix definition in listblob response.
|
||||
|
||||
### Bugs Fixed
|
||||
* Fix FilterBlob API if Query contains a space character. Fixes [#23546](https://github.com/Azure/azure-sdk-for-go/issues/23546)
|
||||
|
||||
## 1.5.0 (2024-11-13)
|
||||
|
||||
### Features Added
|
||||
* Fix compareHeaders custom sorting algorithm for String To Sign.
|
||||
* Added permissions & resourcetype parameters in listblob response.
|
||||
|
||||
## 1.5.0-beta.1 (2024-10-22)
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue