vendor: make vendor-update

This commit is contained in:
Aliaksandr Valialkin 2023-04-06 16:30:47 -07:00
commit e0cef082f4
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
174 changed files with 4870 additions and 4618 deletions

View file

@ -17,7 +17,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@main
with:
go-version: 1.20.2
go-version: 1.20.3
id: go
- name: Code checkout
uses: actions/checkout@master

View file

@ -57,7 +57,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 1.20.2
go-version: 1.20.3
check-latest: true
cache: true
if: ${{ matrix.language == 'go' }}

View file

@ -32,7 +32,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: 1.20.2
go-version: 1.20.3
check-latest: true
cache: true
@ -56,7 +56,7 @@ jobs:
- name: Setup Go
uses: actions/setup-go@v4
with:
go-version: 1.20.2
go-version: 1.20.3
check-latest: true
cache: true
@ -81,7 +81,7 @@ jobs:
id: go
uses: actions/setup-go@v4
with:
go-version: 1.20.2
go-version: 1.20.3
check-latest: true
cache: true

View file

@ -916,7 +916,7 @@ The shortlist of configuration flags is the following:
-evaluationInterval duration
How often to evaluate the rules (default 1m0s)
-external.alert.source string
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . For example, link to Grafana: -external.alert.source='explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":{{$expr|jsonEscape|queryEscape}} },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'. Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . For example, link to Grafana: -external.alert.source='explore?orgId=1&left={"datasource":"VictoriaMetrics","queries":[{"expr":{{$expr|jsonEscape|queryEscape}},"refId":"A"}],"range":{"from":"now-1h","to":"now"}}'. Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.
-external.label array
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
Supports an array of values separated by comma or specified via multiple flags.

View file

@ -73,7 +73,7 @@ absolute path to all .tpl files in root.`)
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager `+
`for cases where you want to build a custom link to Grafana, Prometheus or any other service. `+
`Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . `+
`For example, link to Grafana: -external.alert.source='explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":{{$expr|jsonEscape|queryEscape}} },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'. `+
`For example, link to Grafana: -external.alert.source='explore?orgId=1&left={"datasource":"VictoriaMetrics","queries":[{"expr":{{$expr|jsonEscape|queryEscape}},"refId":"A"}],"range":{"from":"now-1h","to":"now"}}'. `+
`Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. `+
`If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.`)
externalLabels = flagutil.NewArrayString("external.label", "Optional label in the form 'Name=value' to add to all generated recording rules and alerts. "+

View file

@ -14,6 +14,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/cheggaaa/pb/v3"
)
@ -232,6 +233,13 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
// any error breaks the import
for s := range metrics {
match, err := buildMatchWithFilter(p.filter.Match, s)
if err != nil {
logger.Errorf("failed to build export filters: %s", err)
continue
}
for _, times := range ranges {
select {
case <-ctx.Done():
@ -239,7 +247,7 @@ func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string,
case infErr := <-errCh:
return fmt.Errorf("native error: %s", infErr)
case filterCh <- native.Filter{
Match: fmt.Sprintf("{%s=%q}", nameLabel, s),
Match: match,
TimeStart: times[0].Format(time.RFC3339),
TimeEnd: times[1].Format(time.RFC3339),
}:
@ -303,3 +311,13 @@ func byteCountSI(b int64) string {
return fmt.Sprintf("%.1f %cB",
float64(b)/float64(div), "kMGTPE"[exp])
}
func buildMatchWithFilter(filter string, metricName string) (string, error) {
labels, err := promutils.NewLabelsFromString(filter)
if err != nil {
return "", err
}
labels.Set("__name__", metricName)
return labels.String(), nil
}

View file

@ -294,3 +294,68 @@ func deleteSeries(name, value string) (int, error) {
}
return vmstorage.DeleteSeries(nil, []*storage.TagFilters{tfs})
}
func Test_buildMatchWithFilter(t *testing.T) {
tests := []struct {
name string
filter string
metricName string
want string
wantErr bool
}{
{
name: "parsed metric with label",
filter: `{__name__="http_request_count_total",cluster="kube1"}`,
metricName: "http_request_count_total",
want: `{__name__="http_request_count_total",cluster="kube1"}`,
wantErr: false,
},
{
name: "metric name with label",
filter: `http_request_count_total{cluster="kube1"}`,
metricName: "http_request_count_total",
want: `{__name__="http_request_count_total",cluster="kube1"}`,
wantErr: false,
},
{
name: "parsed metric with regexp value",
filter: `{__name__="http_request_count_total",cluster~="kube.*"}`,
metricName: "http_request_count_total",
want: `{__name__="http_request_count_total",cluster~="kube.*"}`,
wantErr: false,
},
{
name: "only label with regexp",
filter: `{cluster~=".*"}`,
metricName: "http_request_count_total",
want: `{cluster~=".*",__name__="http_request_count_total"}`,
wantErr: false,
},
{
name: "many labels in filter with regexp",
filter: `{cluster~=".*",job!=""}`,
metricName: "http_request_count_total",
want: `{cluster~=".*",job!="",__name__="http_request_count_total"}`,
wantErr: false,
},
{
name: "match with error",
filter: `{cluster=~".*"}`,
metricName: "http_request_count_total",
want: ``,
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := buildMatchWithFilter(tt.filter, tt.metricName)
if (err != nil) != tt.wantErr {
t.Errorf("buildMatchWithFilter() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("buildMatchWithFilter() got = %v, want %v", got, tt.want)
}
})
}
}

View file

@ -1,14 +1,14 @@
{
"files": {
"main.css": "./static/css/main.ebde9e58.css",
"main.js": "./static/js/main.ee50e2ce.js",
"main.css": "./static/css/main.0d9f8101.css",
"main.js": "./static/js/main.ba695a31.js",
"static/js/27.c1ccfd29.chunk.js": "./static/js/27.c1ccfd29.chunk.js",
"static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf",
"static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf",
"index.html": "./index.html"
},
"entrypoints": [
"static/css/main.ebde9e58.css",
"static/js/main.ee50e2ce.js"
"static/css/main.0d9f8101.css",
"static/js/main.ba695a31.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.ee50e2ce.js"></script><link href="./static/css/main.ebde9e58.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.ba695a31.js"></script><link href="./static/css/main.0d9f8101.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -1,4 +1,4 @@
FROM golang:1.20.2 as build-web-stage
FROM golang:1.20.3 as build-web-stage
COPY build /build
WORKDIR /build

View file

@ -51,15 +51,16 @@ const legendTips = [
</>
},
{
title: "Copy key-value pairs",
title: "Copy label key-value pairs",
description: <>
<code>click</code> on a key-value pair to save it to the clipboard.
<code>click</code> on a label key-value pair to save it to the clipboard.
</>
},
{
title: "Collapse/Expand the legend group",
description: <>
<code>click</code> on the group name (e.g. <b>Query 1: &#123;name!=&#34;&#34;&#125;</b>) to collapse or expand the legend.
<code>click</code> on the group name (e.g. <b>Query 1: &#123;__name__!=&#34;&#34;&#125;</b>)
to collapse or expand the legend.
</>
},
];

View file

@ -1,18 +1,17 @@
import React, { FC, useEffect, useMemo, useRef, useState } from "preact/compat";
import uPlot from "uplot";
import ReactDOM from "react-dom";
import Button from "../../Main/Button/Button";
import { CloseIcon, DragIcon } from "../../Main/Icons";
import Button from "../../../Main/Button/Button";
import { CloseIcon, DragIcon } from "../../../Main/Icons";
import classNames from "classnames";
import { MouseEvent as ReactMouseEvent } from "react";
import "../ChartTooltip/style.scss";
import "../../Line/ChartTooltip/style.scss";
export interface TooltipHeatmapProps {
cursor: {left: number, top: number}
startDate: string,
endDate: string,
metricName: string,
fields: string[],
bucket: string,
value: number,
valueFormat: string
}
@ -36,8 +35,7 @@ const ChartTooltipHeatmap: FC<ChartTooltipHeatmapProps> = ({
onClose,
startDate,
endDate,
metricName,
fields,
bucket,
valueFormat,
value
}) => {
@ -141,18 +139,12 @@ const ChartTooltipHeatmap: FC<ChartTooltipHeatmapProps> = ({
</div>
<div className="vm-chart-tooltip-data">
<p>
{metricName}:
<b className="vm-chart-tooltip-data__value">{valueFormat}</b>
{unit}
value: <b className="vm-chart-tooltip-data__value">{valueFormat}</b>{unit}
</p>
</div>
{!!fields.length && (
<div className="vm-chart-tooltip-info">
{fields.map((f, i) => (
<div key={`${f}_${i}`}>{f}</div>
))}
</div>
)}
<div className="vm-chart-tooltip-info">
{bucket}
</div>
</div>
), targetPortal);
};

View file

@ -4,21 +4,21 @@ import uPlot, {
Options as uPlotOptions,
Range
} from "uplot";
import { defaultOptions, sizeAxis } from "../../../utils/uplot/helpers";
import { dragChart } from "../../../utils/uplot/events";
import { getAxes } from "../../../utils/uplot/axes";
import { MetricResult } from "../../../api/types";
import { dateFromSeconds, formatDateForNativeInput, limitsDurations } from "../../../utils/time";
import { defaultOptions, sizeAxis } from "../../../../utils/uplot/helpers";
import { dragChart } from "../../../../utils/uplot/events";
import { getAxes } from "../../../../utils/uplot/axes";
import { MetricResult } from "../../../../api/types";
import { dateFromSeconds, formatDateForNativeInput, limitsDurations } from "../../../../utils/time";
import throttle from "lodash.throttle";
import useResize from "../../../hooks/useResize";
import { TimeParams } from "../../../types";
import { YaxisState } from "../../../state/graph/reducer";
import useResize from "../../../../hooks/useResize";
import { TimeParams } from "../../../../types";
import { YaxisState } from "../../../../state/graph/reducer";
import "uplot/dist/uPlot.min.css";
import classNames from "classnames";
import dayjs from "dayjs";
import { useAppState } from "../../../state/common/StateContext";
import { heatmapPaths } from "../../../utils/uplot/heatmap";
import { DATE_FULL_TIMEZONE_FORMAT } from "../../../constants/date";
import { useAppState } from "../../../../state/common/StateContext";
import { heatmapPaths } from "../../../../utils/uplot/heatmap";
import { DATE_FULL_TIMEZONE_FORMAT } from "../../../../constants/date";
import ChartTooltipHeatmap, {
ChartTooltipHeatmapProps,
TooltipHeatmapProps
@ -33,7 +33,7 @@ export interface HeatmapChartProps {
setPeriod: ({ from, to }: {from: Date, to: Date}) => void;
container: HTMLDivElement | null;
height?: number;
onChangeLegend: (val: number) => void;
onChangeLegend: (val: TooltipHeatmapProps) => void;
}
enum typeChartUpdate {xRange = "xRange", yRange = "yRange"}
@ -62,7 +62,7 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
const [tooltipOffset, setTooltipOffset] = useState({ left: 0, top: 0 });
const [stickyTooltips, setStickyToolTips] = useState<ChartTooltipHeatmapProps[]>([]);
const tooltipId = useMemo(() => {
return `${tooltipProps?.fields.join(",")}_${tooltipProps?.startDate}`;
return `${tooltipProps?.bucket}_${tooltipProps?.startDate}`;
}, [tooltipProps]);
const setScale = ({ min, max }: { min: number, max: number }): void => {
@ -135,7 +135,7 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
const handleClick = () => {
if (!tooltipProps) return;
const id = `${tooltipProps?.fields.join(",")}_${tooltipProps?.startDate}`;
const id = `${tooltipProps?.bucket}_${tooltipProps?.startDate}`;
const props = {
id,
unit,
@ -171,12 +171,6 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
return;
}
const metric = result?.metric;
const metricName = metric["__name__"] || "value";
const labelNames = Object.keys(metric).filter(x => x != "__name__");
const fields = labelNames.map(key => `${key}=${JSON.stringify(metric[key])}`);
const [endTime = 0, value = ""] = result.values.find(v => v[0] === second) || [];
const valueFormat = `${+value}%`;
const startTime = xArr[xIdx];
@ -187,8 +181,7 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
cursor: { left, top },
startDate,
endDate,
metricName,
fields,
bucket: result?.metric?.vmrange || "",
value: +value,
valueFormat: valueFormat,
});
@ -228,7 +221,7 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
font: axes[0].font,
size: sizeAxis,
splits: metrics.map((m, i) => i),
values: metrics.map(m => Object.entries(m.metric).map(e => `${e[0]}=${JSON.stringify(e[1])}`)[0]),
values: metrics.map(m => m.metric.vmrange),
}
],
scales: {
@ -339,7 +332,7 @@ const HeatmapChart: FC<HeatmapChartProps> = ({
}, [tooltipProps, stickyTooltips]);
useEffect(() => {
onChangeLegend(tooltipProps?.value || 0);
if (tooltipProps) onChangeLegend(tooltipProps);
}, [tooltipProps]);
return (

View file

@ -0,0 +1,68 @@
import React, { FC, useEffect, useState } from "preact/compat";
import { gradMetal16 } from "../../../../utils/uplot/heatmap";
import "./style.scss";
import { TooltipHeatmapProps } from "../ChartTooltipHeatmap/ChartTooltipHeatmap";
import { SeriesItem } from "../../../../utils/uplot/series";
import LegendItem from "../../Line/Legend/LegendItem/LegendItem";
import { LegendItemType } from "../../../../utils/uplot/types";
interface LegendHeatmapProps {
min: number
max: number
legendValue: TooltipHeatmapProps | null,
series: SeriesItem[]
}
const LegendHeatmap: FC<LegendHeatmapProps> = (
{
min,
max,
legendValue,
series,
}
) => {
const [percent, setPercent] = useState(0);
const [valueFormat, setValueFormat] = useState("");
const [minFormat, setMinFormat] = useState("");
const [maxFormat, setMaxFormat] = useState("");
useEffect(() => {
const value = legendValue?.value || 0;
setPercent(value ? (value - min) / (max - min) * 100 : 0);
setValueFormat(value ? `${value}%` : "");
setMinFormat(`${min}%`);
setMaxFormat(`${max}%`);
}, [legendValue, min, max]);
return (
<div className="vm-legend-heatmap__wrapper">
<div className="vm-legend-heatmap">
<div
className="vm-legend-heatmap-gradient"
style={{ background: `linear-gradient(to right, ${gradMetal16.join(", ")})` }}
>
{!!legendValue?.value && (
<div
className="vm-legend-heatmap-gradient__value"
style={{ left: `${percent}%` }}
>
<span>{valueFormat}</span>
</div>
)}
</div>
<div className="vm-legend-heatmap__value">{minFormat}</div>
<div className="vm-legend-heatmap__value">{maxFormat}</div>
</div>
{series[1] && (
<LegendItem
key={series[1]?.label}
legend={series[1] as LegendItemType}
isHeatmap
/>
)}
</div>
);
};
export default LegendHeatmap;

View file

@ -7,6 +7,14 @@
justify-content: space-between;
gap: 4px;
&__wrapper {
display: flex;
align-items: flex-start;
justify-content: space-between;
gap: $padding-global;
flex-wrap: wrap;
}
&__value {
color: $color-text;
font-size: $font-size-small;
@ -52,4 +60,8 @@
}
}
}
&__labels {
word-break: break-all;
}
}

View file

@ -1,46 +0,0 @@
import React, { FC, useEffect, useState } from "preact/compat";
import { gradMetal16 } from "../../../utils/uplot/heatmap";
import "./style.scss";
interface LegendHeatmapProps {
min: number
max: number
value?: number
}
const LegendHeatmap: FC<LegendHeatmapProps> = ({ min, max, value }) => {
const [percent, setPercent] = useState(0);
const [valueFormat, setValueFormat] = useState("");
const [minFormat, setMinFormat] = useState("");
const [maxFormat, setMaxFormat] = useState("");
useEffect(() => {
setPercent(value ? (value - min) / (max - min) * 100 : 0);
setValueFormat(value ? `${value}%` : "");
setMinFormat(`${min}%`);
setMaxFormat(`${max}%`);
}, [value, min, max]);
return (
<div className="vm-legend-heatmap">
<div
className="vm-legend-heatmap-gradient"
style={{ background: `linear-gradient(to right, ${gradMetal16.join(", ")})` }}
>
{!!value && (
<div
className="vm-legend-heatmap-gradient__value"
style={{ left: `${percent}%` }}
>
<span>{valueFormat}</span>
</div>
)}
</div>
<div className="vm-legend-heatmap__value">{minFormat}</div>
<div className="vm-legend-heatmap__value">{maxFormat}</div>
</div>
);
};
export default LegendHeatmap;

View file

@ -1,17 +1,17 @@
import React, { FC, useEffect, useMemo, useRef, useState } from "preact/compat";
import uPlot from "uplot";
import { MetricResult } from "../../../api/types";
import { formatPrettyNumber } from "../../../utils/uplot/helpers";
import { MetricResult } from "../../../../api/types";
import { formatPrettyNumber } from "../../../../utils/uplot/helpers";
import dayjs from "dayjs";
import { DATE_FULL_TIMEZONE_FORMAT } from "../../../constants/date";
import { DATE_FULL_TIMEZONE_FORMAT } from "../../../../constants/date";
import ReactDOM from "react-dom";
import get from "lodash.get";
import Button from "../../Main/Button/Button";
import { CloseIcon, DragIcon } from "../../Main/Icons";
import Button from "../../../Main/Button/Button";
import { CloseIcon, DragIcon } from "../../../Main/Icons";
import classNames from "classnames";
import { MouseEvent as ReactMouseEvent } from "react";
import "./style.scss";
import { SeriesItem } from "../../../utils/uplot/series";
import { SeriesItem } from "../../../../utils/uplot/series";
export interface ChartTooltipProps {
id: string,

View file

@ -78,5 +78,6 @@ $chart-tooltip-y: -1 * ($padding-small + $chart-tooltip-half-icon);
display: grid;
grid-gap: 4px;
word-break: break-all;
white-space: pre-wrap;
}
}

View file

@ -1,7 +1,7 @@
import React, { FC, useMemo } from "preact/compat";
import { LegendItemType } from "../../../utils/uplot/types";
import { LegendItemType } from "../../../../utils/uplot/types";
import LegendItem from "./LegendItem/LegendItem";
import Accordion from "../../Main/Accordion/Accordion";
import Accordion from "../../../Main/Accordion/Accordion";
import "./style.scss";
interface LegendProps {

View file

@ -1,19 +1,23 @@
import React, { FC, useState, useMemo } from "preact/compat";
import { MouseEvent } from "react";
import { LegendItemType } from "../../../../utils/uplot/types";
import { LegendItemType } from "../../../../../utils/uplot/types";
import "./style.scss";
import classNames from "classnames";
import Tooltip from "../../../Main/Tooltip/Tooltip";
import Tooltip from "../../../../Main/Tooltip/Tooltip";
import { getFreeFields } from "./helpers";
interface LegendItemProps {
legend: LegendItemType;
onChange: (item: LegendItemType, metaKey: boolean) => void;
onChange?: (item: LegendItemType, metaKey: boolean) => void;
isHeatmap?: boolean;
}
const LegendItem: FC<LegendItemProps> = ({ legend, onChange }) => {
const LegendItem: FC<LegendItemProps> = ({ legend, onChange, isHeatmap }) => {
const [copiedValue, setCopiedValue] = useState("");
const freeFormFields = useMemo(() => getFreeFields(legend), [legend]);
const freeFormFields = useMemo(() => {
const result = getFreeFields(legend);
return isHeatmap ? result.filter(f => f.key !== "vmrange") : result;
}, [legend, isHeatmap]);
const calculations = legend.calculations;
const showCalculations = Object.values(calculations).some(v => v);
@ -24,7 +28,7 @@ const LegendItem: FC<LegendItemProps> = ({ legend, onChange }) => {
};
const createHandlerClick = (legend: LegendItemType) => (e: MouseEvent<HTMLDivElement>) => {
onChange(legend, e.ctrlKey || e.metaKey);
onChange && onChange(legend, e.ctrlKey || e.metaKey);
};
const createHandlerCopy = (freeField: string, id: string) => (e: MouseEvent<HTMLDivElement>) => {
@ -37,18 +41,21 @@ const LegendItem: FC<LegendItemProps> = ({ legend, onChange }) => {
className={classNames({
"vm-legend-item": true,
"vm-legend-row": true,
"vm-legend-item_hide": !legend.checked,
"vm-legend-item_hide": !legend.checked && !isHeatmap,
"vm-legend-item_static": isHeatmap,
})}
onClick={createHandlerClick(legend)}
>
<div
className="vm-legend-item__marker"
style={{ backgroundColor: legend.color }}
/>
{!isHeatmap && (
<div
className="vm-legend-item__marker"
style={{ backgroundColor: legend.color }}
/>
)}
<div className="vm-legend-item-info">
<span className="vm-legend-item-info__label">
{legend.freeFormFields["__name__"]}
&#123;
{!!freeFormFields.length && <>&#123;</>}
{freeFormFields.map((f, i) => (
<Tooltip
key={f.id}
@ -66,10 +73,10 @@ const LegendItem: FC<LegendItemProps> = ({ legend, onChange }) => {
</span>
</Tooltip>
))}
&#125;
{!!freeFormFields.length && <>&#125;</>}
</span>
</div>
{showCalculations && (
{!isHeatmap && showCalculations && (
<div className="vm-legend-item-values">
median:{calculations.median}, min:{calculations.min}, max:{calculations.max}, last:{calculations.last}
</div>

View file

@ -1,4 +1,4 @@
import { LegendItemType } from "../../../../utils/uplot/types";
import { LegendItemType } from "../../../../../utils/uplot/types";
export const getFreeFields = (legend: LegendItemType) => {
const keys = Object.keys(legend.freeFormFields).filter(f => f !== "__name__");

View file

@ -21,6 +21,17 @@
opacity: 0.5;
}
&_static {
grid-template-columns: 1fr;
margin: 0;
padding: 0;
cursor: default;
&:hover {
background-color: $color-background-block;
}
}
&__marker {
width: 14px;
height: 14px;

View file

@ -7,22 +7,22 @@ import uPlot, {
Scales,
Scale,
} from "uplot";
import { defaultOptions } from "../../../utils/uplot/helpers";
import { dragChart } from "../../../utils/uplot/events";
import { getAxes, getMinMaxBuffer } from "../../../utils/uplot/axes";
import { MetricResult } from "../../../api/types";
import { dateFromSeconds, formatDateForNativeInput, limitsDurations } from "../../../utils/time";
import { defaultOptions } from "../../../../utils/uplot/helpers";
import { dragChart } from "../../../../utils/uplot/events";
import { getAxes, getMinMaxBuffer } from "../../../../utils/uplot/axes";
import { MetricResult } from "../../../../api/types";
import { dateFromSeconds, formatDateForNativeInput, limitsDurations } from "../../../../utils/time";
import throttle from "lodash.throttle";
import useResize from "../../../hooks/useResize";
import { TimeParams } from "../../../types";
import { YaxisState } from "../../../state/graph/reducer";
import useResize from "../../../../hooks/useResize";
import { TimeParams } from "../../../../types";
import { YaxisState } from "../../../../state/graph/reducer";
import "uplot/dist/uPlot.min.css";
import "./style.scss";
import classNames from "classnames";
import ChartTooltip, { ChartTooltipProps } from "../ChartTooltip/ChartTooltip";
import dayjs from "dayjs";
import { useAppState } from "../../../state/common/StateContext";
import { SeriesItem } from "../../../utils/uplot/series";
import { useAppState } from "../../../../state/common/StateContext";
import { SeriesItem } from "../../../../utils/uplot/series";
export interface LineChartProps {
metrics: MetricResult[];

View file

@ -1,77 +1,18 @@
import React, { FC, useState } from "preact/compat";
import { isMacOs } from "../../../utils/detect-device";
import React, { FC, useEffect, useState } from "preact/compat";
import { getAppModeEnable } from "../../../utils/app-mode";
import Button from "../Button/Button";
import { KeyboardIcon } from "../Icons";
import Modal from "../Modal/Modal";
import "./style.scss";
import Tooltip from "../Tooltip/Tooltip";
const ctrlMeta = isMacOs() ? "Cmd" : "Ctrl";
const keyList = [
{
title: "Query",
list: [
{
keys: ["Enter"],
description: "Run"
},
{
keys: ["Shift", "Enter"],
description: "Multi-line queries"
},
{
keys: [ctrlMeta, "Arrow Up"],
description: "Previous command from the Query history"
},
{
keys: [ctrlMeta, "Arrow Down"],
description: "Next command from the Query history"
},
{
keys: [ctrlMeta, "Click by 'Eye'"],
description: "Toggle multiple queries"
}
]
},
{
title: "Graph",
list: [
{
keys: [ctrlMeta, "Scroll Up"],
alt: ["+"],
description: "Zoom in"
},
{
keys: [ctrlMeta, "Scroll Down"],
alt: ["-"],
description: "Zoom out"
},
{
keys: [ctrlMeta, "Click and Drag"],
description: "Move the graph left/right"
},
]
},
{
title: "Legend",
list: [
{
keys: ["Mouse Click"],
description: "Select series"
},
{
keys: [ctrlMeta, "Mouse Click"],
description: "Toggle multiple series"
}
]
}
];
import keyList from "./constants/keyList";
import { isMacOs } from "../../../utils/detect-device";
const title = "Shortcut keys";
const isMac = isMacOs();
const keyOpenHelp = isMac ? "Cmd + /" : "F1";
const ShortcutKeys: FC<{showTitle?: boolean}> = ({ showTitle }) => {
const ShortcutKeys: FC<{ showTitle?: boolean }> = ({ showTitle }) => {
const [openList, setOpenList] = useState(false);
const appModeEnable = getAppModeEnable();
@ -83,10 +24,26 @@ const ShortcutKeys: FC<{showTitle?: boolean}> = ({ showTitle }) => {
setOpenList(false);
};
const handleKeyDown = (e: KeyboardEvent) => {
const openOnMac = isMac && e.key === "/" && e.metaKey;
const openOnOther = !isMac && e.key === "F1" && !e.metaKey;
if (openOnMac || openOnOther) {
handleOpen();
}
};
useEffect(() => {
window.addEventListener("keydown", handleKeyDown);
return () => {
window.removeEventListener("keydown", handleKeyDown);
};
}, []);
return <>
<Tooltip
open={showTitle === true ? false : undefined}
title={title}
title={`${title} (${keyOpenHelp})`}
placement="bottom-center"
>
<Button
@ -111,33 +68,20 @@ const ShortcutKeys: FC<{showTitle?: boolean}> = ({ showTitle }) => {
className="vm-shortcuts-section"
key={section.title}
>
{section.readMore && (
<div className="vm-shortcuts-section__read-more">{section.readMore}</div>
)}
<h3 className="vm-shortcuts-section__title">
{section.title}
</h3>
<div className="vm-shortcuts-section-list">
{section.list.map(l => (
{section.list.map((l, i) => (
<div
className="vm-shortcuts-section-list-item"
key={l.keys.join("+")}
key={`${section.title}_${i}`}
>
<div className="vm-shortcuts-section-list-item__key">
{l.keys.map((k, i) => (
<>
<code key={k}>
{k}
</code>
{i !== l.keys.length - 1 ? "+" : ""}
</>
))}
{l.alt && l.alt.map((alt, i) => (
<>
or
<code key={alt}>
{alt}
</code>
{i !== l.alt.length - 1 ? "+" : ""}
</>
))}
{l.keys}
</div>
<p className="vm-shortcuts-section-list-item__description">
{l.description}

View file

@ -0,0 +1,62 @@
import React from "preact/compat";
import { isMacOs } from "../../../../utils/detect-device";
import { VisibilityIcon } from "../../Icons";
import GraphTips from "../../../Chart/GraphTips/GraphTips";
const ctrlMeta = <code>{isMacOs() ? "Cmd" : "Ctrl"}</code>;
const keyList = [
{
title: "Query",
list: [
{
keys: <code>Enter</code>,
description: "Run"
},
{
keys: <><code>Shift</code> + <code>Enter</code></>,
description: "Multi-line queries"
},
{
keys: <>{ctrlMeta} + <code>Arrow Up</code></>,
description: "Previous command from the Query history"
},
{
keys: <>{ctrlMeta} + <code>Arrow Down</code></>,
description: "Next command from the Query history"
},
{
keys: <>{ctrlMeta} + <code>click</code> by <VisibilityIcon/></>,
description: "Toggle multiple queries"
}
]
},
{
title: "Graph",
readMore: <GraphTips/>,
list: [
{
keys: <>{ctrlMeta} + <code>scroll Up</code> or <code>+</code></>,
description: "Zoom in"
},
{
keys: <>{ctrlMeta} + <code>scroll Down</code> or <code>-</code></>,
description: "Zoom out"
},
{
keys: <>{ctrlMeta} + <code>drag</code></>,
description: "Move the graph left/right"
},
{
keys: <><code>click</code></>,
description: "Select the series in the legend"
},
{
keys: <>{ctrlMeta} + <code>click</code></>,
description: "Toggle multiple series in the legend"
}
]
},
];
export default keyList;

View file

@ -8,13 +8,20 @@
}
&-section {
margin-bottom: $padding-medium;
position: relative;
margin-bottom: $padding-global;
padding-bottom: $padding-global;
border-bottom: $border-divider;
&__title {
font-weight: bold;
padding: $padding-small 0;
margin-bottom: $padding-global;
border-bottom: $border-divider;
}
&__read-more {
position: absolute;
top: -8px;
right: 0;
}
&-list {
@ -40,6 +47,7 @@
align-items: center;
gap: 4px;
svg,
code {
display: inline-block;
padding: 2px $padding-small 0;
@ -52,6 +60,11 @@
border: $border-divider;
border-radius: 4px;
}
svg {
width: 24px;
padding: 4px;
}
}
&__description {

View file

@ -1,10 +1,10 @@
import React, { FC, useCallback, useEffect, useMemo, useRef, useState } from "preact/compat";
import { MetricResult } from "../../../api/types";
import LineChart from "../../Chart/LineChart/LineChart";
import LineChart from "../../Chart/Line/LineChart/LineChart";
import { AlignedData as uPlotData, Series as uPlotSeries } from "uplot";
import Legend from "../../Chart/Legend/Legend";
import LegendHeatmap from "../../Chart/LegendHeatmap/LegendHeatmap";
import { getHideSeries, getLegendItem, getSeriesItemContext } from "../../../utils/uplot/series";
import Legend from "../../Chart/Line/Legend/Legend";
import LegendHeatmap from "../../Chart/Heatmap/LegendHeatmap/LegendHeatmap";
import { getHideSeries, getLegendItem, getSeriesItemContext, SeriesItem } from "../../../utils/uplot/series";
import { getLimitsYAxis, getMinMaxBuffer, getTimeSeries } from "../../../utils/uplot/axes";
import { LegendItemType } from "../../../utils/uplot/types";
import { TimeParams } from "../../../types";
@ -12,11 +12,12 @@ import { AxisRange, YaxisState } from "../../../state/graph/reducer";
import { getAvgFromArray, getMaxFromArray, getMinFromArray } from "../../../utils/math";
import classNames from "classnames";
import { useTimeState } from "../../../state/time/TimeStateContext";
import HeatmapChart from "../../Chart/HeatmapChart/HeatmapChart";
import HeatmapChart from "../../Chart/Heatmap/HeatmapChart/HeatmapChart";
import "./style.scss";
import { promValueToNumber } from "../../../utils/metric";
import { normalizeData } from "../../../utils/uplot/heatmap";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import { TooltipHeatmapProps } from "../../Chart/Heatmap/ChartTooltipHeatmap/ChartTooltipHeatmap";
export interface GraphViewProps {
data?: MetricResult[];
@ -60,7 +61,7 @@ const GraphView: FC<GraphViewProps> = ({
const [series, setSeries] = useState<uPlotSeries[]>([]);
const [legend, setLegend] = useState<LegendItemType[]>([]);
const [hideSeries, setHideSeries] = useState<string[]>([]);
const [legendValue, setLegendValue] = useState(0);
const [legendValue, setLegendValue] = useState<TooltipHeatmapProps | null>(null);
const setLimitsYaxis = (values: {[key: string]: number[]}) => {
const limits = getLimitsYAxis(values, !isHistogram);
@ -71,7 +72,7 @@ const GraphView: FC<GraphViewProps> = ({
setHideSeries(getHideSeries({ hideSeries, legend, metaKey, series }));
};
const handleChangeLegend = (val: number) => {
const handleChangeLegend = (val: TooltipHeatmapProps) => {
setLegendValue(val);
};
@ -209,9 +210,10 @@ const GraphView: FC<GraphViewProps> = ({
)}
{isHistogram && showLegend && (
<LegendHeatmap
series={series as SeriesItem[]}
min={yaxis.limits.range[1][0] || 0}
max={yaxis.limits.range[1][1] || 0}
value={legendValue}
legendValue={legendValue}
/>
)}
</div>

View file

@ -79,7 +79,7 @@ export const useFetchQuery = ({
setFetchQueue([...fetchQueue, controller]);
try {
const isDisplayChart = displayType === "chart";
const seriesLimit = showAllSeries ? Infinity : (+stateSeriesLimits[displayType] || Infinity);
let seriesLimit = showAllSeries ? Infinity : (+stateSeriesLimits[displayType] || Infinity);
const tempData: MetricBase[] = [];
const tempTraces: Trace[] = [];
let counter = 1;
@ -89,6 +89,7 @@ export const useFetchQuery = ({
const isHideQuery = hideQuery?.includes(counter - 1);
if (isHideQuery) {
setQueryErrors(prev => [...prev, ""]);
counter++;
continue;
}
@ -104,6 +105,9 @@ export const useFetchQuery = ({
tempTraces.push(trace);
}
const isHistogramResult = isDisplayChart && isHistogramData(resp.data.result);
if (resp.data.result.length) setIsHistogram(isHistogramResult);
if (isHistogramResult) seriesLimit = Infinity;
const freeTempSize = seriesLimit - tempData.length;
resp.data.result.slice(0, freeTempSize).forEach((d: MetricBase) => {
d.group = counter;
@ -120,8 +124,6 @@ export const useFetchQuery = ({
const limitText = `Showing ${seriesLimit} series out of ${totalLength} series due to performance reasons. Please narrow down the query, so it returns less series`;
setWarning(totalLength > seriesLimit ? limitText : "");
setIsHistogram(isDisplayChart && isHistogramData(tempData));
isDisplayChart ? setGraphData(tempData as MetricResult[]) : setLiveData(tempData as InstantMetricResult[]);
setTraces(tempTraces);
} catch (e) {

View file

@ -31,14 +31,13 @@ export const promValueToNumber = (s: string): number => {
export const isHistogramData = (result: MetricBase[]) => {
if (result.length < 2) return false;
const histogramNames = ["le", "vmrange"];
const histogramLabels = ["le", "vmrange"];
return result.every(r => {
const keys = Object.keys(r.metric);
const labels = Object.keys(r.metric).filter(n => !histogramNames.includes(n));
const byName = keys.length > labels.length;
const byLabels = labels.every(l => r.metric[l] === result[0].metric[l]);
return byName && byLabels;
const firstLabels = Object.keys(result[0].metric).filter(n => !histogramLabels.includes(n));
const isHistogram = result.every(r => {
const labels = Object.keys(r.metric).filter(n => !histogramLabels.includes(n));
return firstLabels.length === labels.length && labels.every(l => r.metric[l] === result[0].metric[l]);
});
return isHistogram && result.every(r => histogramLabels.some(l => l in r.metric));
};

View file

@ -1,6 +1,7 @@
import uPlot from "uplot";
import { generateGradient } from "../color";
import { MetricResult } from "../../api/types";
import { promValueToNumber } from "../metric";
// 16-color gradient from "rgb(246, 226, 219)" to "rgb(127, 39, 4)"
export const gradMetal16 = generateGradient([246, 226, 219], [127, 39, 4], 16);
@ -115,11 +116,11 @@ export const convertPrometheusToVictoriaMetrics = (buckets: MetricResult[]): Met
const sortedBuckets = buckets.sort((a,b) => parseFloat(a.metric.le) - parseFloat(b.metric.le));
const group = buckets[0]?.group || 1;
let prevBucket: MetricResult = { metric: { le: "0" }, values: [], group };
let prevBucket: MetricResult = { metric: { le: "" }, values: [], group };
const result: MetricResult[] = [];
for (const bucket of sortedBuckets) {
const vmrange = `${prevBucket.metric.le}..${bucket.metric.le}`;
const vmrange = [prevBucket.metric.le, bucket.metric.le].filter(n => n).join("...");
const values: [number, string][] = [];
for (const [timestamp, value] of bucket.values) {
@ -135,17 +136,30 @@ export const convertPrometheusToVictoriaMetrics = (buckets: MetricResult[]): Met
return result;
};
const getUpperBound = (bucket: MetricResult) => {
const values = (bucket.metric.vmrange || bucket.metric.le).split("...");
return promValueToNumber(values[values.length - 1]);
};
const sortBucketsByValues = (a: MetricResult, b: MetricResult) => getUpperBound(a) - getUpperBound(b);
export const normalizeData = (buckets: MetricResult[], isHistogram?: boolean): MetricResult[] => {
if (!isHistogram) return buckets;
const vmBuckets = convertPrometheusToVictoriaMetrics(buckets);
const sortedBuckets = buckets.sort(sortBucketsByValues);
const vmBuckets = convertPrometheusToVictoriaMetrics(sortedBuckets);
const allValues = vmBuckets.map(b => b.values).flat();
return vmBuckets.map(bucket => {
const result = vmBuckets.map(bucket => {
const values = bucket.values.map((v) => {
const totalHits = allValues.filter(av => av[0] === v[0]).reduce((bucketSum, v) => bucketSum + +v[1], 0);
const totalHits = allValues
.filter(av => av[0] === v[0])
.reduce((bucketSum, v) => bucketSum + +v[1], 0);
return [v[0], `${Math.round((+v[1] / totalHits) * 100)}`];
});
return { ...bucket, values };
}) as MetricResult[];
return result.filter(r => !r.values.every(v => v[1] === "0"));
};

View file

@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
ROOT_IMAGE ?= alpine:3.17.3
CERTS_IMAGE := alpine:3.17.3
GO_BUILDER_IMAGE := golang:1.20.2-alpine
GO_BUILDER_IMAGE := golang:1.20.3-alpine
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
BASE_IMAGE := local/base:1.1.4-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)

View file

@ -101,7 +101,7 @@ services:
# display source of alerts in grafana
- '-external.url=http://127.0.0.1:3000' #grafana outside container
# when copypaste the line below be aware of '$$' for escaping in '$expr'
- '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":{{$$expr|jsonEscape|queryEscape}} },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'
- '--external.alert.source=explore?orgId=1&left={"datasource":"VictoriaMetrics","queries":[{"expr":{{$$expr|jsonEscape|queryEscape}},"refId":"A"}],"range":{"from":"now-1h","to":"now"}}'
restart: always
alertmanager:

View file

@ -76,7 +76,7 @@ services:
# display source of alerts in grafana
- "--external.url=http://127.0.0.1:3000" #grafana outside container
# when copypaste the line be aware of '$$' for escaping in '$expr'
- '--external.alert.source=explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":{{$$expr|jsonEscape|queryEscape}} },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'
- '--external.alert.source=explore?orgId=1&left={"datasource":"VictoriaMetrics","queries":[{"expr":{{$$expr|jsonEscape|queryEscape}},"refId":"A"}],"range":{"from":"now-1h","to":"now"}}'
networks:
- vm_net
restart: always

View file

@ -15,14 +15,18 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip
## [v1.90.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.90.0)
Released at 2023-04-06
**Update note: this release contains backwards-incompatible change in storage data format,
so the previous versions of VictoriaMetrics will exit with the `unexpected number of substrings in the part name` error when trying to run them on the data
created by v1.90.0 or newer versions. The solution is to upgrade to v1.90.0 or newer releases**
* SECURITY: upgrade base docker image (alpine) from 3.17.2 to 3.17.3. See [alpine 3.17.3 release notes](https://alpinelinux.org/posts/Alpine-3.17.3-released.html).
* SECURITY: upgrade Go builder from Go1.20.2 to Go1.20.3. See [the list of issues addressed in Go1.20.3](https://github.com/golang/go/issues?q=milestone%3AGo1.20.3+label%3ACherryPickApproved).
* FEATURE: open source [Graphite Render API](https://docs.victoriametrics.com/#graphite-render-api-usage). This API allows using VictoriaMetrics as a drop-in replacement for Graphite at both data ingestion and querying sides and reducing infrastructure costs by up to 10x comparing to Graphite. See [this case study](https://docs.victoriametrics.com/CaseStudies.html#grammarly) as an example.
* FEATURE: release Windows binaries for [single-node VictoriaMetrics](https://docs.victoriametrics.com/), [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html), [vmbackup](https://docs.victoriametrics.com/vmbackup.html) and [vmrestore](https://docs.victoriametrics.com/vmrestore.html). See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3236), [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3821) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/70) issues.
* FEATURE: release Windows binaries for [single-node VictoriaMetrics](https://docs.victoriametrics.com/), [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html), [vmbackup](https://docs.victoriametrics.com/vmbackup.html) and [vmrestore](https://docs.victoriametrics.com/vmrestore.html). See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3236), [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3821) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/70) issues. This release of VictoriaMetrics for Windows cannot delete [snapshots](https://docs.victoriametrics.com/#how-to-work-with-snapshots) due to Windows constraints. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/70#issuecomment-1491529183) for details. This issue should be resolved in future releases.
* FEATURE: log metrics with truncated labels if the length of label value in the ingested metric exceeds `-maxLabelValueLen`. This should simplify debugging for this case.
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): show target URL when debugging [target relabeling](https://docs.victoriametrics.com/vmagent.html#relabel-debug). This should simplify target relabel debugging a bit. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3882).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for [VictoriaMetrics remote write protocol](https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol) when [sending / receiving data to / from Kafka](https://docs.victoriametrics.com/vmagent.html#kafka-integration). This protocol allows saving egress network bandwidth costs when sending data from `vmagent` to `Kafka` located in another datacenter or availability zone. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1225).
@ -46,6 +50,7 @@ created by v1.90.0 or newer versions. The solution is to upgrade to v1.90.0 or n
* BUGFIX: [vmauth](https://docs.victoriametrics.com/vmauth.html): suppress [proxy protocol](https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt) parsing errors in case of `EOF`. Usually, the error is caused by health checks and is not a sign of an actual error.
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix displaying errors for each query. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3987).
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): fix snapshot not being deleted in case of error during backup. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2055).
* BUGFIX: [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html): suppress `series after dedup` error message in logs when `-remoteWrite.streamAggr.dedupInterval` command-line flag is set at [vmagent](https://docs.victoriametrics.com/vmgent.html) or when `-streamAggr.dedupInterval` command-line flag is set at [single-node VictoriaMetrics](https://docs.victoriametrics.com/).
* BUGFIX: allow using dashes and dots in environment variables names referred in config files via `%{ENV-VAR.SYNTAX}`. See [these docs](https://docs.victoriametrics.com/#environment-variables) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3999).
* BUGFIX: return back query performance scalability on hosts with big number of CPU cores. The scalability has been reduced in [v1.86.0](https://docs.victoriametrics.com/CHANGELOG.html#v1860). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3966).
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly convert [VictoriaMetrics historgram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) to Prometheus histogram buckets when VictoriaMetrics histogram contain zero buckets. Previously these buckets were ignored, and this could lead to missing Prometheus histogram buckets after the conversion. Thanks to @zklapow for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4021).
@ -54,7 +59,7 @@ created by v1.90.0 or newer versions. The solution is to upgrade to v1.90.0 or n
* BUGFIX: properly support comma-separated filters inside [retention filters](https://docs.victoriametrics.com/#retention-filters). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3915).
* BUGFIX: verify response code when fetching configuration files via HTTP. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4034).
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): replace empty labels with "" instead of "<no value>" during templating, as Prometheus does. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4012).
* BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl.html): properly pass multiple filters from `--vm-native-filter-match` command-line flag to the data source. Previously filters from `--vm-native-filter-match` were only used to discover the metric names, and the metric names like `__name__="metric_name"` has been taken into account, while the remaining filters were ignored. For example `--vm-native-src-addr={foo="bar",baz="abc"}` may found `metric_name{foo="bar",baz="abc"}` and filter was treated as `--vm-native-src-addr={__name__="metrics_name"}`, e.g. `foo="bar",baz="abc"` filter was ignored. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4062).
## [v1.89.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.89.1)
@ -147,6 +152,22 @@ Released at 2023-02-24
* BUGFIX: properly parse timestamps in milliseconds when [ingesting data via OpenTSDB telnet put protocol](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol). Previously timestamps in milliseconds were mistakenly multiplied by 1000. Thanks to @Droxenator for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3810).
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): do not add extrapolated points outside the real points when using [interpolate()](https://docs.victoriametrics.com/MetricsQL.html#interpolate) function. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3816).
## [v1.87.5](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.5)
Released at 2023-04-06
**v1.87.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
The v1.87.x line will be supported for at least 12 months since [v1.87.0](https://docs.victoriametrics.com/CHANGELOG.html#v1870) release**
* SECURITY: upgrade base docker image (alpine) from 3.17.2 to 3.17.3. See [alpine 3.17.3 release notes](https://alpinelinux.org/posts/Alpine-3.17.3-released.html).
* SECURITY: upgrade Go builder from Go1.20.2 to Go1.20.3. See [the list of issues addressed in Go1.20.3](https://github.com/golang/go/issues?q=milestone%3AGo1.20.3+label%3ACherryPickApproved).
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly convert [VictoriaMetrics historgram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350) to Prometheus histogram buckets when VictoriaMetrics histogram contain zero buckets. Previously these buckets were ignored, and this could lead to missing Prometheus histogram buckets after the conversion. Thanks to @zklapow for [the fix](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4021).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmgent.html): fix CPU and memory usage spikes when files pointed by [file_sd_config](https://docs.victoriametrics.com/sd_configs.html#file_sd_configs) cannot be re-read. See [this_issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3989).
* BUGFIX: prevent unexpected merges on start-up when `-storage.minFreeDiskSpaceBytes` is set. See [the issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4023).
* BUGFIX: properly support comma-separated filters inside [retention filters](https://docs.victoriametrics.com/#retention-filters). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3915).
* BUGFIX: verify response code when fetching configuration files via HTTP. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4034).
## [v1.87.4](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.4)
Released at 2023-03-25
@ -702,6 +723,20 @@ Released at 2022-08-08
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly show date picker at `Table` tab. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2874).
* BUGFIX: properly generate http redirects if `-http.pathPrefix` command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2918).
## [v1.79.12](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.12)
Released at 2023-04-06
**v1.79.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
The v1.79.x line will be supported for at least 12 months since [v1.79.0](https://docs.victoriametrics.com/CHANGELOG.html#v1790) release**
* SECURITY: upgrade base docker image (alpine) from 3.17.2 to 3.17.3. See [alpine 3.17.3 release notes](https://alpinelinux.org/posts/Alpine-3.17.3-released.html).
* SECURITY: upgrade Go builder from Go1.20.2 to Go1.20.3. See [the list of issues addressed in Go1.20.3](https://github.com/golang/go/issues?q=milestone%3AGo1.20.3+label%3ACherryPickApproved).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmgent.html): fix CPU and memory usage spikes when files pointed by [file_sd_config](https://docs.victoriametrics.com/sd_configs.html#file_sd_configs) cannot be re-read. See [this_issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3989).
* BUGFIX: prevent unexpected merges on start-up when `-storage.minFreeDiskSpaceBytes` is set. See [the issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4023).
* BUGFIX: verify response code when fetching configuration files via HTTP. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4034).
## [v1.79.11](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.79.11)
Released at 2023-03-12

View file

@ -6,6 +6,7 @@ sort: 23
This document contains troubleshooting guides for most common issues when working with VictoriaMetrics:
- [General troubleshooting checklist](#general-troubleshooting-checklist)
- [Unexpected query results](#unexpected-query-results)
- [Slow data ingestion](#slow-data-ingestion)
- [Slow queries](#slow-queries)
@ -13,12 +14,90 @@ This document contains troubleshooting guides for most common issues when workin
- [Cluster instability](#cluster-instability)
- [Monitoring](#monitoring)
## General troubleshooting checklist
If you hit some issue or have some question about VictoriaMetrics components,
then please follow the following steps in order to quickly find the solution:
1. Check the version of VictoriaMetrics component, which needs to be troubleshot and compare
it to [the latest available version](https://docs.victoriametrics.com/CHANGELOG.html).
If the used version is lower than the latest available version, then there are high chances
that the issue is already resolved in newer versions. Carefully read [the changelog](https://docs.victoriametrics.com/CHANGELOG.html)
between your version and the latest version and check whether the issue is already fixed there.
If the issue is already fixed in newer versions, then upgrade to the newer version and verify whether the issue is fixed:
- [How to upgrade single-node VictoriaMetrics](https://docs.victoriametrics.com/#how-to-upgrade-victoriametrics)
- [How to upgrade VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#updating--reconfiguring-cluster-nodes)
Upgrade procedure for other VictoriaMetrics components is as simple as gracefully stopping the component
by sending `SIGINT` signal to it and starting the new version of the component.
There may be breaking changes between different versions of VictoriaMetrics components in rare cases.
These cases are documented in [the changelog](https://docs.victoriametrics.com/CHANGELOG.html).
So please read the changelog before the upgrade.
1. Check for logs in VictoriaMetrics components. They may contain useful information about the issue.
If the log message doesn't have enough useful information for troubleshooting the issue,
then search the log message in Google. There are high chances that the issue is already reported
somewhere (docs, StackOverflow, Github issues, etc.) and indexed by Google and the solution is already documented there.
1. If VictoriaMetrics logs have no relevant information, then try searching for the issue in Google
via multiple keywords and phrases specific to the issue. There are high chances that the issue
and the solution is already documented somewhere.
1. Try searching for the issue at [VictoriaMetrics Github](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
The signal/noise quality of search results here is much lower than in Google, but sometimes it may help
finding the relevant information about the issue when Google fails to find the needed information.
If you located the relevant Github issue, but it misses some information on how to diagnose or troubleshoot
the issue, then please provide this information in comments to the issue. This increases chances
that the issue will be resolved soon.
1. Try searching for information about the issue in [VictoriaMetrics source code](https://github.com/search?q=repo%3AVictoriaMetrics%2FVictoriaMetrics&type=code).
Github code search may be not very good in some cases, so it is recommended [checking out VictoriaMetrics source code](https://github.com/VictoriaMetrics/VictoriaMetrics/)
and perform local search in the checked out code.
Note that the source code for VictoriaMetrics cluster is located in [the cluster](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster) branch.
1. Try searching for information about the issue in the history of [VictoriaMetrics Slack chat](https://victoriametrics.slack.com).
There are non-zero chances that somebody already stuck with the same issue and documented the solution at Slack.
1. If steps above didn't help finding the solution to the issue, then please [file a new issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/new/choose)
by providing the maximum details on how to reproduce the issue.
After that you can post the link to the issue to [VictoriaMetrics Slack chat](https://victoriametrics.slack.com),
so VictoriaMetrics community could help finding the solution to the issue. It is better filing the issue at VictoriaMetrics Github
before posting your question to VictoriaMetrics Slack chat, since Github issues are indexed by Google,
while Slack messages aren't indexed by Google. This simplifies searching for the solution to the issue for future VictoriaMetrics users.
1. Pro tip 1: if you see that [VictoriaMetrics docs](https://docs.victoriametrics.com/) contain incomplete or incorrect information,
then please create a pull request with the relevant changes. This will help VictoriaMetrics community.
All the docs published at `https://docs.victoriametrics.com` are located in the [docs](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/docs)
folder inside VictoriaMetrics repository.
1. Pro tip 2: please provide links to existing docs / Github issues / StackOverflow questions
instead of copy-n-pasting the information from these sources when asking or answering questions
from VictoriaMetrics community. If the linked resources have no enough information,
then it is better posting the missing information in the web resource before providing links
to this information in Slack chat. This will simplify searching for this information in the future
for VictoriaMetrics users via Google and ChatGPT :)
1. Pro tip 3: if you are answering somebody's question about VictoriaMetrics components
at Github issues / Slack chat / StackOverflow, then the best answer is a direct link to the information
regaring the question.
The better answer is a concise message with multiple links to the relevant information.
The worst answer is a message with misleading or completely wrong information.
1. Pro tip 4: if you can fix the issue on yourself, then please do it and provide the corresponding pull request!
We are glad to get pull requests from VictoriaMetrics community.
## Unexpected query results
If you see unexpected or unreliable query results from VictoriaMetrics, then try the following steps:
1. Check whether simplified queries return unexpected results. For example, if the query looks like
`sum(rate(http_requests_total[5m])) by (job)`, then check whether the following queries return
`sum(rate(http_requests_total[5m])) by (job)`, then check whether the following queries return
expected results:
- Remove the outer `sum` and execute `rate(http_requests_total[5m])`,

View file

@ -89,6 +89,7 @@ To trigger [forced merge](https://docs.victoriametrics.com/Single-server-Victori
```console
curl -v -X POST http://vmstorage:8482/internal/force_merge
```
</div>
After the merge is complete, the data will be permanently deleted from the disk.

View file

@ -171,7 +171,7 @@ VMAlertmanagerSpec is a specification of the desired behavior of the VMAlertmana
| ----- | ----------- | ------ | -------- |
| podMetadata | PodMetadata configures Labels and Annotations which are propagated to the alertmanager pods. | *[EmbeddedObjectMetadata](#embeddedobjectmetadata) | false |
| image | Image - docker image settings for VMAlertmanager if no specified operator uses default config version | [Image](#image) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see https://kubernetes.io/docs/concepts/containers/images/#referring-to-an-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| secrets | Secrets is a list of Secrets in the same namespace as the VMAlertmanager object, which shall be mounted into the VMAlertmanager Pods. The Secrets are mounted into /etc/vm/secrets/&lt;secret-name&gt; | []string | false |
| configMaps | ConfigMaps is a list of ConfigMaps in the same namespace as the VMAlertmanager object, which shall be mounted into the VMAlertmanager Pods. The ConfigMaps are mounted into /etc/vm/configs/&lt;configmap-name&gt;. | []string | false |
| templates | Templates is a list of ConfigMap key references for ConfigMaps in the same namespace as the VMAlertmanager object, which shall be mounted into the VMAlertmanager Pods. The Templates are mounted into /etc/vm/templates/&lt;configmap-name&gt;/&lt;configmap-key&gt;. | [][ConfigMapKeyReference](#configmapkeyreference) | false |
@ -208,7 +208,7 @@ VMAlertmanagerSpec is a specification of the desired behavior of the VMAlertmana
| clusterAdvertiseAddress | ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918 | string | false |
| portName | PortName used for the pods and governing service. This defaults to web | string | false |
| serviceSpec | ServiceSpec that will be added to vmalertmanager service spec | *[ServiceSpec](#servicespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmalertmanager VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
@ -723,7 +723,7 @@ VMAgentSpec defines the desired state of VMAgent
| ----- | ----------- | ------ | -------- |
| podMetadata | PodMetadata configures Labels and Annotations which are propagated to the vmagent pods. | *[EmbeddedObjectMetadata](#embeddedobjectmetadata) | false |
| image | Image - docker image settings for VMAgent if no specified operator uses default config version | [Image](#image) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see https://kubernetes.io/docs/concepts/containers/images/#referring-to-an-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| secrets | Secrets is a list of Secrets in the same namespace as the vmagent object, which shall be mounted into the vmagent Pods. will be mounted at path /etc/vm/secrets | []string | false |
| configMaps | ConfigMaps is a list of ConfigMaps in the same namespace as the vmagent object, which shall be mounted into the vmagent Pods. will be mounted at path /etc/vm/configs | []string | false |
| logLevel | LogLevel for VMAgent to be configured with. INFO, WARN, ERROR, FATAL, PANIC | string | false |
@ -778,7 +778,7 @@ VMAgentSpec defines the desired state of VMAgent
| extraArgs | ExtraArgs that will be passed to VMAgent pod for example remoteWrite.tmpDataPath: /tmp it would be converted to flag --remoteWrite.tmpDataPath=/tmp | map[string]string | false |
| extraEnvs | ExtraEnvs that will be added to VMAgent pod | [][v1.EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) | false |
| serviceSpec | ServiceSpec that will be added to vmagent service spec | *[ServiceSpec](#servicespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmagent VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| shardCount | ShardCount - numbers of shards of VMAgent in this case operator will use 1 deployment/sts per shard with replicas count according to spec.replicas https://victoriametrics.github.io/vmagent.html#scraping-big-number-of-targets | *int | false |
| updateStrategy | UpdateStrategy - overrides default update strategy. works only for deployments, statefulset always use OnDelete. | *[appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#deploymentstrategy-v1-apps) | false |
| rollingUpdate | RollingUpdate - overrides deployment update params. | *[appsv1.RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#rollingupdatedeployment-v1-apps) | false |
@ -1091,7 +1091,7 @@ VMAlertSpec defines the desired state of VMAlert
| ----- | ----------- | ------ | -------- |
| podMetadata | PodMetadata configures Labels and Annotations which are propagated to the VMAlert pods. | *[EmbeddedObjectMetadata](#embeddedobjectmetadata) | false |
| image | Image - docker image settings for VMAlert if no specified operator uses default config version | [Image](#image) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see https://kubernetes.io/docs/concepts/containers/images/#referring-to-an-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| secrets | Secrets is a list of Secrets in the same namespace as the VMAlert object, which shall be mounted into the VMAlert Pods. The Secrets are mounted into /etc/vm/secrets/&lt;secret-name&gt;. | []string | false |
| configMaps | ConfigMaps is a list of ConfigMaps in the same namespace as the VMAlert object, which shall be mounted into the VMAlert Pods. The ConfigMaps are mounted into /etc/vm/configs/&lt;configmap-name&gt;. | []string | false |
| logFormat | LogFormat for VMAlert to be configured with. default or json | string | false |
@ -1130,7 +1130,7 @@ VMAlertSpec defines the desired state of VMAlert
| extraEnvs | ExtraEnvs that will be added to VMAlert pod | [][v1.EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) | false |
| externalLabels | ExternalLabels in the form &#39;name: value&#39; to add to all generated recording rules and alerts. | map[string]string | false |
| serviceSpec | ServiceSpec that will be added to vmalert service spec | *[ServiceSpec](#servicespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmalert VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| updateStrategy | UpdateStrategy - overrides default update strategy. | *[appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#deploymentstrategy-v1-apps) | false |
| rollingUpdate | RollingUpdate - overrides deployment update params. | *[appsv1.RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#rollingupdatedeployment-v1-apps) | false |
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
@ -1188,7 +1188,7 @@ VMSingleSpec defines the desired state of VMSingle
| ----- | ----------- | ------ | -------- |
| podMetadata | PodMetadata configures Labels and Annotations which are propagated to the VMSingle pods. | *[EmbeddedObjectMetadata](#embeddedobjectmetadata) | false |
| image | Image - docker image settings for VMSingle if no specified operator uses default config version | [Image](#image) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see https://kubernetes.io/docs/concepts/containers/images/#referring-to-an-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| secrets | Secrets is a list of Secrets in the same namespace as the VMSingle object, which shall be mounted into the VMSingle Pods. | []string | false |
| configMaps | ConfigMaps is a list of ConfigMaps in the same namespace as the VMSingle object, which shall be mounted into the VMSingle Pods. | []string | false |
| logLevel | LogLevel for victoria metrics single to be configured with. | string | false |
@ -1223,7 +1223,7 @@ VMSingleSpec defines the desired state of VMSingle
| extraArgs | ExtraArgs that will be passed to VMSingle pod for example remoteWrite.tmpDataPath: /tmp | map[string]string | false |
| extraEnvs | ExtraEnvs that will be added to VMSingle pod | [][v1.EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) | false |
| serviceSpec | ServiceSpec that will be added to vmsingle service spec | *[ServiceSpec](#servicespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmsingle VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
| startupProbe | StartupProbe that will be added to CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
@ -1701,7 +1701,7 @@ VMClusterSpec defines the desired state of VMCluster
| podSecurityPolicyName | PodSecurityPolicyName - defines name for podSecurityPolicy in case of empty value, prefixedName will be used. | string | false |
| serviceAccountName | ServiceAccountName is the name of the ServiceAccount to use to run the VMSelect, VMStorage and VMInsert Pods. | string | false |
| clusterVersion | ClusterVersion defines default images tag for all components. it can be overwritten with component specific image.tag value. | string | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see https://kubernetes.io/docs/concepts/containers/images/#referring-to-an-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| vmselect | | *[VMSelect](#vmselect) | false |
| vminsert | | *[VMInsert](#vminsert) | false |
| vmstorage | | *[VMStorage](#vmstorage) | false |
@ -1755,7 +1755,7 @@ VMClusterStatus defines the observed state of VMCluster
| runtimeClassName | RuntimeClassName - defines runtime class for kubernetes pod. https://kubernetes.io/docs/concepts/containers/runtime-class/ | *string | false |
| extraEnvs | ExtraEnvs that will be added to VMSelect pod | [][v1.EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) | false |
| serviceSpec | ServiceSpec that will be added to vminsert service spec | *[ServiceSpec](#servicespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vminsert VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| updateStrategy | UpdateStrategy - overrides default update strategy. | *[appsv1.DeploymentStrategyType](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#deploymentstrategy-v1-apps) | false |
| rollingUpdate | RollingUpdate - overrides deployment update params. | *[appsv1.RollingUpdateDeployment](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#rollingupdatedeployment-v1-apps) | false |
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
@ -1878,7 +1878,7 @@ VMClusterStatus defines the observed state of VMCluster
| extraArgs | | map[string]string | false |
| extraEnvs | ExtraEnvs that will be added to VMSelect pod | [][v1.EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) | false |
| serviceSpec | ServiceSpec that will be create additional service for vmstorage | *[ServiceSpec](#servicespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmstorage VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
| readinessProbe | ReadinessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |
@ -2073,7 +2073,7 @@ VMAuthSpec defines the desired state of VMAuth
| ----- | ----------- | ------ | -------- |
| podMetadata | PodMetadata configures Labels and Annotations which are propagated to the VMAuth pods. | *[EmbeddedObjectMetadata](#embeddedobjectmetadata) | false |
| image | Image - docker image settings for VMAuth if no specified operator uses default config version | [Image](#image) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| imagePullSecrets | ImagePullSecrets An optional list of references to secrets in the same namespace to use for pulling images from registries see https://kubernetes.io/docs/concepts/containers/images/#referring-to-an-imagepullsecrets-on-a-pod | [][v1.LocalObjectReference](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#localobjectreference-v1-core) | false |
| secrets | Secrets is a list of Secrets in the same namespace as the VMAuth object, which shall be mounted into the VMAuth Pods. | []string | false |
| configMaps | ConfigMaps is a list of ConfigMaps in the same namespace as the VMAuth object, which shall be mounted into the VMAuth Pods. | []string | false |
| logLevel | LogLevel for victoria metrics single to be configured with. | string | false |
@ -2103,8 +2103,8 @@ VMAuthSpec defines the desired state of VMAuth
| userNamespaceSelector | UserNamespaceSelector Namespaces to be selected for VMAuth discovery. Works in combination with Selector. NamespaceSelector nil - only objects at VMAuth namespace. Selector nil - only objects at NamespaceSelector namespaces. If both nil - behaviour controlled by selectAllByDefault | *[metav1.LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#labelselector-v1-meta) | false |
| extraArgs | ExtraArgs that will be passed to VMAuth pod for example remoteWrite.tmpDataPath: /tmp | map[string]string | false |
| extraEnvs | ExtraEnvs that will be added to VMAuth pod | [][v1.EnvVar](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#envvar-v1-core) | false |
| serviceSpec | ServiceSpec that will be added to vmsingle service spec | *[ServiceSpec](#servicespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmselect VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| serviceSpec | ServiceSpec that will be added to vmauth service spec | *[ServiceSpec](#servicespec) | false |
| serviceScrapeSpec | ServiceScrapeSpec that will be added to vmauth VMServiceScrape spec | *[VMServiceScrapeSpec](#vmservicescrapespec) | false |
| podDisruptionBudget | PodDisruptionBudget created by operator | *[EmbeddedPodDisruptionBudgetSpec](#embeddedpoddisruptionbudgetspec) | false |
| ingress | Ingress enables ingress configuration for VMAuth. | *[EmbeddedIngress](#embeddedingress) | false |
| livenessProbe | LivenessProbe that will be added CRD pod | *[v1.Probe](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#probe-v1-core) | false |

View file

@ -226,6 +226,7 @@ Single-node VictoriaMetrics:
```console
curl -X POST http://localhost:8428/api/v1/import/native -T filename.bin
```
</div>
Cluster version of VictoriaMetrics:
<div class="with-copy" markdown="1">
@ -252,6 +253,8 @@ Single-node VictoriaMetrics:
curl -d 'metric_name{foo="bar"} 123' -X POST http://localhost:8428/api/v1/import/prometheus
```
</div>
Cluster version of VictoriaMetrics:
<div class="with-copy" markdown="1">

View file

@ -920,7 +920,7 @@ The shortlist of configuration flags is the following:
-evaluationInterval duration
How often to evaluate the rules (default 1m0s)
-external.alert.source string
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . For example, link to Grafana: -external.alert.source='explore?orgId=1&left=["now-1h","now","VictoriaMetrics",{"expr":{{$expr|jsonEscape|queryEscape}} },{"mode":"Metrics"},{"ui":[true,true,true,"none"]}]'. Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.
External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service. Supports templating - see https://docs.victoriametrics.com/vmalert.html#templating . For example, link to Grafana: -external.alert.source='explore?orgId=1&left={"datasource":"VictoriaMetrics","queries":[{"expr":{{$expr|jsonEscape|queryEscape}},"refId":"A"}],"range":{"from":"now-1h","to":"now"}}'. Link to VMUI: -external.alert.source='vmui/#/?g0.expr={{.Expr|queryEscape}}'. If empty 'vmalert/alert?group_id={{.GroupID}}&alert_id={{.AlertID}}' is used.
-external.label array
Optional label in the form 'Name=value' to add to all generated recording rules and alerts. Pass multiple -label flags in order to add multiple label sets.
Supports an array of values separated by comma or specified via multiple flags.

View file

@ -279,7 +279,8 @@ If restore mark doesn't exist at `storageDataPath`(restore wasn't requested) `vm
```yaml
vmbackup:
restore:
onStart: "true"
onStart:
enabled: "true"
```
See operator `VMStorage` schema [here](https://docs.victoriametrics.com/operator/api.html#vmstorage) and `VMSingle` [here](https://docs.victoriametrics.com/operator/api.html#vmsinglespec).
2. Enter container running `vmbackupmanager`
@ -309,7 +310,8 @@ Clusters here are referred to as `source` and `destination`.
```yaml
vmbackup:
restore:
onStart: "true"
onStart:
enabled: "true"
```
Note: it is safe to leave this section in the cluster configuration, since it will be ignored if restore mark doesn't exist.
> Important! Use different `-dst` for *destination* cluster to avoid overwriting backup data of the *source* cluster.

20
go.mod
View file

@ -26,7 +26,7 @@ require (
github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.8.0
github.com/influxdata/influxdb v1.11.0
github.com/klauspost/compress v1.16.3
github.com/klauspost/compress v1.16.4
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/oklog/ulid v1.3.1
@ -36,13 +36,13 @@ require (
github.com/valyala/fastjson v1.6.4
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.2
github.com/valyala/gozstd v1.18.0
github.com/valyala/gozstd v1.19.0
github.com/valyala/histogram v1.2.0
github.com/valyala/quicktemplate v1.7.0
golang.org/x/net v0.8.0
golang.org/x/net v0.9.0
golang.org/x/oauth2 v0.6.0
golang.org/x/sys v0.6.0
google.golang.org/api v0.114.0
golang.org/x/sys v0.7.0
google.golang.org/api v0.116.0
gopkg.in/yaml.v2 v2.4.0
)
@ -50,11 +50,11 @@ require (
cloud.google.com/go v0.110.0 // indirect
cloud.google.com/go/compute v1.19.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v0.13.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect
cloud.google.com/go/iam v1.0.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.44.234 // indirect
github.com/aws/aws-sdk-go v1.44.238 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.18 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.1 // indirect
@ -109,11 +109,11 @@ require (
go.uber.org/goleak v1.2.1 // indirect
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/text v0.9.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633 // indirect
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd // indirect
google.golang.org/grpc v1.54.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

41
go.sum
View file

@ -27,8 +27,8 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
cloud.google.com/go/iam v1.0.0 h1:hlQJMovyJJwYjZcTohUH4o1L8Z8kYz+E+W/zktiLCBc=
cloud.google.com/go/iam v1.0.0/go.mod h1:ikbQ4f1r91wTmBmmOtBCOtuEOei6taatNXytzB7Cxew=
cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
@ -46,8 +46,8 @@ github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9Eb
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.1.0 h1:QkAcEIAKbNL4KoFr4SathZPhDhF4mVwpBMFlYjyAqy8=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 h1:sXr+ck84g/ZlZUOZiNELInmMgOsuGwdjjVkEIde0OtY=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
@ -86,8 +86,8 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.234 h1:8YbQ5AhpgV/cC7jYX8qS34Am/vcn2ZoIFJ1qIgwOL+0=
github.com/aws/aws-sdk-go v1.44.234/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.238 h1:qSWVXr/y/SsYyuvwVHYQpzcMKa2UzOjKgqPp7BTGfbo=
github.com/aws/aws-sdk-go v1.44.238/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.17.7 h1:CLSjnhJSTSogvqUGhIC6LqFKATMRexcxLZ0i/Nzk9Eg=
github.com/aws/aws-sdk-go-v2 v1.17.7/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
@ -315,8 +315,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.16.4 h1:91KN02FnsOYhuunwU4ssRe8lc2JosWmizWa91B5v1PU=
github.com/klauspost/compress v1.16.4/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -427,8 +427,8 @@ github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/gozstd v1.18.0 h1:f4BskcUZBnDrEJ2F+lVbNCMGOFBoGHEw71RBkCNR4IM=
github.com/valyala/gozstd v1.18.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
github.com/valyala/gozstd v1.19.0 h1:BS0M7sH3dcuyw2SQBrTLprAdGuNxfiH0c4IAM8kX07c=
github.com/valyala/gozstd v1.19.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
github.com/valyala/quicktemplate v1.7.0 h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTcjMJzCM=
@ -539,8 +539,8 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -605,12 +605,13 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw=
golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -619,8 +620,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -692,8 +693,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE=
google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
google.golang.org/api v0.116.0 h1:09tOPVufPwfm5W4aA8EizGHJ7BcoRDsIareM2a15gO4=
google.golang.org/api v0.116.0/go.mod h1:9cD4/t6uvd9naoEJFA+M96d0IuB6BqFuyhpw68+mRGg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -731,8 +732,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633 h1:0BOZf6qNozI3pkN3fJLwNubheHJYHhMh91GRFOWWK08=
google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd h1:sLpv7bNL1AsX3fdnWh9WVh7ejIzXdOc1RRHGeAmeStU=
google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=

View file

@ -21,7 +21,7 @@ func TestMarshalUnmarshalInt64Array(t *testing.T) {
v += int64(r.NormFloat64() * 1e6)
va = append(va, v)
}
for precisionBits := uint8(1); precisionBits < 17; precisionBits++ {
for precisionBits := uint8(1); precisionBits < 14; precisionBits++ {
testMarshalUnmarshalInt64Array(t, va, precisionBits, MarshalTypeZSTDNearestDelta)
}
for precisionBits := uint8(23); precisionBits < 65; precisionBits++ {

View file

@ -188,6 +188,22 @@ func (x *Labels) Get(name string) string {
return ""
}
// Set label value for label with given name
// If the label with the given name doesn't exist, it adds as the new label
func (x *Labels) Set(name, value string) {
if name == "" || value == "" {
return
}
labels := x.GetLabels()
for i, label := range labels {
if label.Name == name {
labels[i].Value = value
return
}
}
x.Add(name, value)
}
// InternStrings interns all the strings used in x labels.
func (x *Labels) InternStrings() {
labels := x.GetLabels()
@ -306,7 +322,7 @@ func MustNewLabelsFromString(metricWithLabels string) *Labels {
// NewLabelsFromString creates labels from s, which can have the form `metric{labels}`.
//
// This function must be used only in tests
// This function must be used only in non performance-critical code, since it allocates too much
func NewLabelsFromString(metricWithLabels string) (*Labels, error) {
stripDummyMetric := false
if strings.HasPrefix(metricWithLabels, "{") {

View file

@ -175,3 +175,22 @@ func TestLabelsRemoveLabelsWithDoubleUnderscorePrefix(t *testing.T) {
f(`{__meta_foo="bar",a="b",__name__="foo",__vm_filepath="aa"}`, `{a="b"}`)
f(`{__meta_foo="bdffr",foo="bar",__meta_xxx="basd"}`, `{foo="bar"}`)
}
func TestLabels_Set(t *testing.T) {
f := func(metric, name, value, resultExpected string) {
t.Helper()
labels := MustNewLabelsFromString(metric)
labels.Set(name, value)
result := labels.String()
if result != resultExpected {
t.Fatalf("unexpected result of RemoveLabelsWithDoubleUnderscorePrefix;\ngot\n%s\nwant\n%s", result, resultExpected)
}
}
f(`{}`, ``, ``, `{}`)
f(`{foo="bar"}`, `bar`, `baz`, `{foo="bar",bar="baz"}`)
f(`{__meta_foo="bar",a="b",__name__="foo",__vm_filepath="aa"}`, `__name__`, `bar`, `{__meta_foo="bar",a="b",__name__="bar",__vm_filepath="aa"}`)
f(`{__meta_foo="bdffr",foo="bar",__meta_xxx="basd"}`, `__name__`, `baz`, `{__meta_foo="bdffr",foo="bar",__meta_xxx="basd",__name__="baz"}`)
f(`http_request_total{a="b"}`, `__name__`, `metric`, `{__name__="metric",a="b"}`)
f(`http_request_total{a="b"}`, `a`, `c`, `{__name__="http_request_total",a="c"}`)
f(`http_request_total{a="b"}`, `ip`, `127.0.0.1`, `{__name__="http_request_total",a="b",ip="127.0.0.1"}`)
}

View file

@ -1,4 +1,4 @@
GO_VERSION ?=1.20.2
GO_VERSION ?=1.20.3
SNAP_BUILDER_IMAGE := local/snap-builder:2.0.0-$(shell echo $(GO_VERSION) | tr :/ __)

View file

@ -1,5 +1,13 @@
# Changes
## [1.0.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.13.0...iam/v1.0.0) (2023-04-04)
### Features
* **iam:** Promote to GA ([#7627](https://github.com/googleapis/google-cloud-go/issues/7627)) ([b351906](https://github.com/googleapis/google-cloud-go/commit/b351906a10e17a02d7f7e2551bc1585fd9dc3742))
## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.12.0...iam/v0.13.0) (2023-03-15)

View file

@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.21.9
// protoc-gen-go v1.28.1
// protoc v3.21.12
// source: google/iam/v1/iam_policy.proto
package iampb
@ -363,16 +363,16 @@ var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{
0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73,
0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d,
0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x86, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e,
0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69,
0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56,
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49,
0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69,
0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13,
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d,
0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (

View file

@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.21.9
// protoc-gen-go v1.28.1
// protoc v3.21.12
// source: google/iam/v1/options.proto
package iampb
@ -111,16 +111,16 @@ var file_google_iam_v1_options_proto_rawDesc = []byte{
0x12, 0x38, 0x0a, 0x18, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x6f,
0x6c, 0x69, 0x63, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
0x28, 0x05, 0x52, 0x16, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, 0x50, 0x6f, 0x6c,
0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x84, 0x01, 0x0a, 0x11, 0x63,
0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31,
0x42, 0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69,
0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56,
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
0x69, 0x63, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x7d, 0x0a, 0x11, 0x63, 0x6f,
0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42,
0x0c, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69,
0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13,
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d,
0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
}
var (

View file

@ -14,8 +14,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
// protoc v3.21.9
// protoc-gen-go v1.28.1
// protoc v3.21.12
// source: google/iam/v1/policy.proto
package iampb
@ -214,7 +214,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) {
// only if the expression evaluates to `true`. A condition can add constraints
// based on attributes of the request, the resource, or both. To learn which
// resources support conditions in their IAM policies, see the
// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
// [IAM
// documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
//
// **JSON example:**
//
@ -237,7 +238,8 @@ func (AuditConfigDelta_Action) EnumDescriptor() ([]byte, []int) {
// "condition": {
// "title": "expirable access",
// "description": "Does not grant access after Sep 2020",
// "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')",
// "expression": "request.time <
// timestamp('2020-10-01T00:00:00.000Z')",
// }
// }
// ],
@ -279,11 +281,11 @@ type Policy struct {
// Any operation that affects conditional role bindings must specify version
// `3`. This requirement applies to the following operations:
//
// - Getting a policy that includes a conditional role binding
// - Adding a conditional role binding to a policy
// - Changing a conditional role binding in a policy
// - Removing any role binding, with or without a condition, from a policy
// that includes conditions
// * Getting a policy that includes a conditional role binding
// * Adding a conditional role binding to a policy
// * Changing a conditional role binding in a policy
// * Removing any role binding, with or without a condition, from a policy
// that includes conditions
//
// **Important:** If you use IAM Conditions, you must include the `etag` field
// whenever you call `setIamPolicy`. If you omit this field, then IAM allows
@ -294,7 +296,8 @@ type Policy struct {
// specify any valid version or leave the field unset.
//
// To learn which resources support conditions in their IAM policies, see the
// [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
// [IAM
// documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
Version int32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
// Associates a list of `members`, or principals, with a `role`. Optionally,
// may specify a `condition` that determines how and when the `bindings` are
@ -396,43 +399,47 @@ type Binding struct {
// Specifies the principals requesting access for a Cloud Platform resource.
// `members` can have the following values:
//
// - `allUsers`: A special identifier that represents anyone who is
// on the internet; with or without a Google account.
// * `allUsers`: A special identifier that represents anyone who is
// on the internet; with or without a Google account.
//
// - `allAuthenticatedUsers`: A special identifier that represents anyone
// who is authenticated with a Google account or a service account.
// * `allAuthenticatedUsers`: A special identifier that represents anyone
// who is authenticated with a Google account or a service account.
//
// - `user:{emailid}`: An email address that represents a specific Google
// account. For example, `alice@example.com` .
// * `user:{emailid}`: An email address that represents a specific Google
// account. For example, `alice@example.com` .
//
// - `serviceAccount:{emailid}`: An email address that represents a service
// account. For example, `my-other-app@appspot.gserviceaccount.com`.
//
// - `group:{emailid}`: An email address that represents a Google group.
// For example, `admins@example.com`.
// * `serviceAccount:{emailid}`: An email address that represents a service
// account. For example, `my-other-app@appspot.gserviceaccount.com`.
//
// - `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
// identifier) representing a user that has been recently deleted. For
// example, `alice@example.com?uid=123456789012345678901`. If the user is
// recovered, this value reverts to `user:{emailid}` and the recovered user
// retains the role in the binding.
// * `group:{emailid}`: An email address that represents a Google group.
// For example, `admins@example.com`.
//
// - `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
// unique identifier) representing a service account that has been recently
// deleted. For example,
// `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
// If the service account is undeleted, this value reverts to
// `serviceAccount:{emailid}` and the undeleted service account retains the
// role in the binding.
// * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique
// identifier) representing a user that has been recently deleted. For
// example, `alice@example.com?uid=123456789012345678901`. If the user is
// recovered, this value reverts to `user:{emailid}` and the recovered user
// retains the role in the binding.
//
// * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus
// unique identifier) representing a service account that has been recently
// deleted. For example,
// `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`.
// If the service account is undeleted, this value reverts to
// `serviceAccount:{emailid}` and the undeleted service account retains the
// role in the binding.
//
// * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
// identifier) representing a Google group that has been recently
// deleted. For example, `admins@example.com?uid=123456789012345678901`. If
// the group is recovered, this value reverts to `group:{emailid}` and the
// recovered group retains the role in the binding.
//
//
// * `domain:{domain}`: The G Suite domain (primary) that represents all the
// users of that domain. For example, `google.com` or `example.com`.
//
// - `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique
// identifier) representing a Google group that has been recently
// deleted. For example, `admins@example.com?uid=123456789012345678901`. If
// the group is recovered, this value reverts to `group:{emailid}` and the
// recovered group retains the role in the binding.
//
// - `domain:{domain}`: The G Suite domain (primary) that represents all the
// users of that domain. For example, `google.com` or `example.com`.
Members []string `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
// The condition that is associated with this binding.
//
@ -640,7 +647,8 @@ type AuditLogConfig struct {
LogType AuditLogConfig_LogType `protobuf:"varint,1,opt,name=log_type,json=logType,proto3,enum=google.iam.v1.AuditLogConfig_LogType" json:"log_type,omitempty"`
// Specifies the identities that do not cause logging for this type of
// permission.
// Follows the same format of [Binding.members][google.iam.v1.Binding.members].
// Follows the same format of
// [Binding.members][google.iam.v1.Binding.members].
ExemptedMembers []string `protobuf:"bytes,2,rep,name=exempted_members,json=exemptedMembers,proto3" json:"exempted_members,omitempty"`
}
@ -999,16 +1007,15 @@ var file_google_iam_v1_policy_proto_rawDesc = []byte{
0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x12, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e,
0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07,
0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x56,
0x45, 0x10, 0x02, 0x42, 0x83, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63,
0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f,
0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x61, 0x6d, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13,
0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d,
0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x33,
0x45, 0x10, 0x02, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d,
0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d,
0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43,
0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56,
0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (

View file

@ -1846,6 +1846,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -3263,9 +3266,15 @@ var awsPartition = partition{
},
"arc-zonal-shift": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-1",
}: endpoint{},
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -3290,6 +3299,12 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "eu-west-3",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -5140,6 +5155,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -5155,12 +5173,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -5605,6 +5629,9 @@ var awsPartition = partition{
},
"codepipeline": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "af-south-1",
}: endpoint{},
endpointKey{
Region: "ap-east-1",
}: endpoint{},
@ -5635,6 +5662,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
@ -5695,6 +5725,9 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "me-south-1",
}: endpoint{},
endpointKey{
Region: "sa-east-1",
}: endpoint{},
@ -12402,12 +12435,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -12977,6 +13016,9 @@ var awsPartition = partition{
},
},
Endpoints: serviceEndpoints{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -14980,6 +15022,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -14995,12 +15040,18 @@ var awsPartition = partition{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{
Region: "eu-north-1",
}: endpoint{},
endpointKey{
Region: "eu-south-1",
}: endpoint{},
endpointKey{
Region: "eu-south-2",
}: endpoint{},
endpointKey{
Region: "eu-west-1",
}: endpoint{},
@ -19601,6 +19652,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-northeast-2",
}: endpoint{},
endpointKey{
Region: "ap-northeast-3",
}: endpoint{},
endpointKey{
Region: "ap-south-1",
}: endpoint{},
@ -22049,6 +22103,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-south-1",
}: endpoint{},
endpointKey{
Region: "ap-south-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-1",
}: endpoint{},
@ -32164,6 +32221,24 @@ var awsusgovPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-east-1",
}: endpoint{},
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com",
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.234"
const SDKVersion = "1.44.238"

View file

@ -615,6 +615,8 @@ Here are other packages of good quality and pure Go (no cgo wrappers or autoconv
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
# license

View file

@ -169,6 +169,10 @@ func (w *GzipResponseWriter) Write(b []byte) (int, error) {
return len(b), nil
}
func (w *GzipResponseWriter) Unwrap() http.ResponseWriter {
return w.ResponseWriter
}
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// startGzip initializes a GZIP writer and writes the buffer.
@ -919,6 +923,10 @@ func atoi(s string) (int, bool) {
return int(i64), err == nil
}
type unwrapper interface {
Unwrap() http.ResponseWriter
}
// newNoGzipResponseWriter will return a response writer that
// cleans up compression artifacts.
// Depending on whether http.Hijacker is supported the returned will as well.
@ -929,10 +937,12 @@ func newNoGzipResponseWriter(w http.ResponseWriter) http.ResponseWriter {
http.ResponseWriter
http.Hijacker
http.Flusher
unwrapper
}{
ResponseWriter: n,
Hijacker: hj,
Flusher: n,
unwrapper: n,
}
return x
}
@ -982,3 +992,7 @@ func (n *NoGzipResponseWriter) WriteHeader(statusCode int) {
}
n.ResponseWriter.WriteHeader(statusCode)
}
func (n *NoGzipResponseWriter) Unwrap() http.ResponseWriter {
return n.ResponseWriter
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

1055
vendor/github.com/klauspost/compress/s2/reader.go generated vendored Normal file

File diff suppressed because it is too large Load diff

1020
vendor/github.com/klauspost/compress/s2/writer.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -473,7 +473,7 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
return b.encodeLits(b.literals, rawAllLits)
}
// We want some difference to at least account for the headers.
saved := b.size - len(b.literals) - (b.size >> 5)
saved := b.size - len(b.literals) - (b.size >> 6)
if saved < 16 {
if org == nil {
return errIncompressible
@ -779,10 +779,13 @@ func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error {
}
b.output = wr.out
// Maybe even add a bigger margin.
if len(b.output)-3-bhOffset >= b.size {
// Maybe even add a bigger margin.
// Discard and encode as raw block.
b.output = b.encodeRawTo(b.output[:bhOffset], org)
b.popOffsets()
b.litEnc.Reuse = huff0.ReusePolicyNone
return errIncompressible
return nil
}
// Size is output minus block header.

View file

@ -455,12 +455,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
}
if len(next.b) > 0 {
n, err := d.current.crc.Write(next.b)
if err == nil {
if n != len(next.b) {
d.current.err = io.ErrShortWrite
}
}
d.current.crc.Write(next.b)
}
if next.err == nil && next.d != nil && next.d.hasCRC {
got := uint32(d.current.crc.Sum64())

View file

@ -34,7 +34,7 @@ type match struct {
est int32
}
const highScore = 25000
const highScore = maxMatchLen * 8
// estBits will estimate output bits from predefined tables.
func (m *match) estBits(bitsPerByte int32) {
@ -159,7 +159,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
@ -173,7 +172,6 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
_ = addLiterals
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
@ -188,7 +186,9 @@ encodeLoop:
panic("offset0 was 0")
}
const goodEnough = 100
const goodEnough = 250
cv := load6432(src, s)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
@ -201,11 +201,45 @@ encodeLoop:
return
}
if debugAsserts {
if offset <= 0 {
panic(offset)
}
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
}
}
cand := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
// Try to quick reject if we already have a long match.
if m.length > 16 {
left := len(src) - int(m.s+m.length)
// If we are too close to the end, keep as is.
if left <= 0 {
return
}
checkLen := m.length - (s - m.s) - 8
if left > 2 && checkLen > 4 {
// Check 4 bytes, 4 bytes from the end of the current match.
a := load3232(src, offset+checkLen)
b := load3232(src, s+checkLen)
if a != b {
return
}
}
}
l := 4 + e.matchlen(s+4, offset+4, src)
if rep < 0 {
// Extend candidate match backwards as far as possible.
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
s--
offset--
l++
}
}
cand := match{offset: offset, s: s, length: l, rep: rep}
cand.estBits(bitsPerByte)
if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
*m = cand
@ -219,17 +253,29 @@ encodeLoop:
improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
if canRepeat && best.length < goodEnough {
cv32 := uint32(cv >> 8)
spp := s + 1
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
if best.length > 0 {
cv32 = uint32(cv >> 24)
spp += 2
if s == nextEmit {
// Check repeats straight after a match.
improve(&best, s-offset2, s, uint32(cv), 1|4)
improve(&best, s-offset3, s, uint32(cv), 2|4)
if offset1 > 1 {
improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
}
}
// If either no match or a non-repeat match, check at + 1
if best.rep <= 0 {
cv32 := uint32(cv >> 8)
spp := s + 1
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
if best.rep < 0 {
cv32 = uint32(cv >> 24)
spp += 2
improve(&best, spp-offset1, spp, cv32, 1)
improve(&best, spp-offset2, spp, cv32, 2)
improve(&best, spp-offset3, spp, cv32, 3)
}
}
}
// Load next and check...
@ -244,41 +290,44 @@ encodeLoop:
if s >= sLimit {
break encodeLoop
}
cv = load6432(src, s)
continue
}
s++
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
cv = load6432(src, s)
cv2 := load6432(src, s+1)
cv = load6432(src, s+1)
cv2 := load6432(src, s+2)
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1
improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
// Long at s+1, s+2
improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
improve(&best, candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
improve(&best, candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
if false {
// Short at s+3.
// Too often worse...
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
}
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
// Start check at a fixed offset to allow for a few mismatches.
// For this compression level 2 yields the best results.
const skipBeginning = 2
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
// Start check at a fixed offset to allow for a few mismatches.
// For this compression level 2 yields the best results.
// We cannot do this if we have already indexed this position.
const skipBeginning = 2
if best.s > s-skipBeginning {
// See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match.
if sAt := best.s + best.length; sAt < sLimit {
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
candidateEnd := e.longTable[nextHashL]
if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
}
}
}
}
@ -292,51 +341,34 @@ encodeLoop:
// We have a match, we can store the forward value
if best.rep > 0 {
s = best.s
var seq seq
seq.matchLen = uint32(best.length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
start := best.s
// We end the search early, so we don't risk 0 literals
// and have to do special offset treatment.
startLimit := nextEmit + 1
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
if debugAsserts && s <= nextEmit {
panic("s <= nextEmit")
}
repIndex := best.offset
for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
repIndex--
start--
seq.matchLen++
}
addLiterals(&seq, start)
addLiterals(&seq, best.s)
// rep 0
seq.offset = uint32(best.rep)
// Repeat. If bit 4 is set, this is a non-lit repeat.
seq.offset = uint32(best.rep & 3)
if debugSequences {
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
index0 := s
// Index old s + 1 -> s - 1
index0 := s + 1
s = best.s + best.length
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
// Index skipped...
off := index0 + e.cur
for index0 < s-1 {
for index0 < s {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -346,17 +378,19 @@ encodeLoop:
index0++
}
switch best.rep {
case 2:
case 2, 4 | 1:
offset1, offset2 = offset2, offset1
case 3:
case 3, 4 | 2:
offset1, offset2, offset3 = offset3, offset1, offset2
case 4 | 3:
offset1, offset2, offset3 = offset1-1, offset1, offset2
}
cv = load6432(src, s)
continue
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
index0 := s + 1
s = best.s
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
@ -369,22 +403,9 @@ encodeLoop:
panic("invalid offset")
}
// Extend the n-byte match as long as possible.
l := best.length
// Extend backwards
tMin := s - e.maxMatchOff
if tMin < 0 {
tMin = 0
}
for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
s--
t--
l++
}
// Write our sequence
var seq seq
l := best.length
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
@ -401,10 +422,8 @@ encodeLoop:
break encodeLoop
}
// Index match start+1 (long) -> s - 1
index0 := s - l + 1
// every entry
for index0 < s-1 {
// Index old s + 1 -> s - 1
for index0 < s {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -413,50 +432,6 @@ encodeLoop:
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++
}
cv = load6432(src, s)
if !canRepeat {
continue
}
// Check offset 2
for {
o2 := s - offset2
if load3232(src, o2) != uint32(cv) {
// Do regular search
break
}
// Store this, since we have it.
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
// We have at least 4 byte match.
// No need to check backwards. We come straight from a match
l := 4 + e.matchlen(s+4, o2+4, src)
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
seq.matchLen = uint32(l) - zstdMinMatch
seq.litLen = 0
// Since litlen is always 0, this is offset 1.
seq.offset = 1
s += l
nextEmit = s
if debugSequences {
println("sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
// Swap offset 1 and 2.
offset1, offset2 = offset2, offset1
if s >= sLimit {
// Finished
break encodeLoop
}
cv = load6432(src, s)
}
}
if int(nextEmit) < len(src) {

View file

@ -277,23 +277,9 @@ func (e *Encoder) nextBlock(final bool) error {
s.eofWritten = true
}
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
blk.encodeRaw(src)
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
case nil:
default:
s.err = err
return err
s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if s.err != nil {
return s.err
}
_, s.err = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
@ -343,22 +329,8 @@ func (e *Encoder) nextBlock(final bool) error {
}
s.wWg.Done()
}()
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
blk.encodeRaw(src)
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
case nil:
default:
s.writeErr = err
s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if s.writeErr != nil {
return
}
_, s.writeErr = s.w.Write(blk.output)
@ -568,25 +540,15 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
err := errIncompressible
oldout := blk.output
if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
// Output directly to dst
blk.output = dst
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
}
// Output directly to dst
blk.output = dst
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
dst = blk.encodeRawTo(dst, src)
case nil:
dst = blk.output
default:
err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
if err != nil {
panic(err)
}
dst = blk.output
blk.output = oldout
} else {
enc.Reset(e.o.dict, false)
@ -605,25 +567,11 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
if len(src) == 0 {
blk.last = true
}
err := errIncompressible
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
}
switch err {
case errIncompressible:
if debugEncoder {
println("Storing incompressible block as raw")
}
dst = blk.encodeRawTo(dst, todo)
blk.popOffsets()
case nil:
dst = append(dst, blk.output...)
default:
err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
if err != nil {
panic(err)
}
dst = append(dst, blk.output...)
blk.reset(nil)
}
}

View file

@ -39,7 +39,7 @@ func (o *encoderOptions) setDefault() {
blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
allLitEntropy: true,
allLitEntropy: false,
lowMem: false,
}
}
@ -238,7 +238,7 @@ func WithEncoderLevel(l EncoderLevel) EOption {
}
}
if !o.customALEntropy {
o.allLitEntropy = l > SpeedFastest
o.allLitEntropy = l > SpeedDefault
}
return nil

View file

@ -293,13 +293,9 @@ func (d *frameDec) next(block *blockDec) error {
return nil
}
// checkCRC will check the checksum if the frame has one.
// checkCRC will check the checksum, assuming the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error {
if !d.HasCheckSum {
return nil
}
// We can overwrite upper tmp now
buf, err := d.rawInput.readSmall(4)
if err != nil {
@ -307,10 +303,6 @@ func (d *frameDec) checkCRC() error {
return err
}
if d.o.ignoreChecksum {
return nil
}
want := binary.LittleEndian.Uint32(buf[:4])
got := uint32(d.crc.Sum64())
@ -326,17 +318,13 @@ func (d *frameDec) checkCRC() error {
return nil
}
// consumeCRC reads the checksum data if the frame has one.
// consumeCRC skips over the checksum, assuming the frame has one.
func (d *frameDec) consumeCRC() error {
if d.HasCheckSum {
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
}
return nil
return err
}
// runDecoder will run the decoder for the remainder of the frame.
@ -415,15 +403,8 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if d.o.ignoreChecksum {
err = d.consumeCRC()
} else {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
}
}
d.crc.Write(dst[crcStart:])
err = d.checkCRC()
}
}
}

View file

@ -236,9 +236,12 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
maxBlockSize = s.windowSize
}
if debugDecoder {
println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
}
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
printf("reading sequence %d, exceeded available data\n", seqs-i)
printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
return io.ErrUnexpectedEOF
}
var ll, mo, ml int

View file

@ -5,6 +5,7 @@ package zstd
import (
"fmt"
"io"
"github.com/klauspost/compress/internal/cpuinfo"
)
@ -134,6 +135,9 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
ctx.ll, ctx.litRemain+ctx.ll)
case errorOverread:
return true, io.ErrUnexpectedEOF
case errorNotEnoughSpace:
size := ctx.outPosition + ctx.ll + ctx.ml
if debugDecoder {
@ -202,6 +206,9 @@ const errorNotEnoughLiterals = 4
// error reported when capacity of `out` is too small
const errorNotEnoughSpace = 5
// error reported when bits are overread.
const errorOverread = 6
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
@ -247,6 +254,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
litRemain: len(s.literals),
}
if debugDecoder {
println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
}
s.seqSize = 0
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
var errCode int
@ -277,6 +288,8 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
case errorNotEnoughLiterals:
ll := ctx.seqs[i].ll
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
case errorOverread:
return io.ErrUnexpectedEOF
}
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
@ -291,6 +304,9 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
if s.seqSize > maxBlockSize {
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
if debugDecoder {
println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
}
err := br.close()
if err != nil {
printf("Closing sequences: %v, %+v\n", err, *br)

View file

@ -38,7 +38,7 @@ sequenceDecs_decode_amd64_main_loop:
sequenceDecs_decode_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decode_amd64_fill_end
JLE sequenceDecs_decode_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_amd64_fill_end
SHLQ $0x08, DX
@ -49,6 +49,10 @@ sequenceDecs_decode_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decode_amd64_fill_byte_by_byte
sequenceDecs_decode_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decode_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -105,7 +109,7 @@ sequenceDecs_decode_amd64_ml_update_zero:
sequenceDecs_decode_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decode_amd64_fill_2_end
JLE sequenceDecs_decode_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_amd64_fill_2_end
SHLQ $0x08, DX
@ -116,6 +120,10 @@ sequenceDecs_decode_amd64_fill_2_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
sequenceDecs_decode_amd64_fill_2_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decode_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@ -320,6 +328,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
@ -356,7 +369,7 @@ sequenceDecs_decode_56_amd64_main_loop:
sequenceDecs_decode_56_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decode_56_amd64_fill_end
JLE sequenceDecs_decode_56_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_56_amd64_fill_end
SHLQ $0x08, DX
@ -367,6 +380,10 @@ sequenceDecs_decode_56_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
sequenceDecs_decode_56_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decode_56_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -613,6 +630,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
@ -649,7 +671,7 @@ sequenceDecs_decode_bmi2_main_loop:
sequenceDecs_decode_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decode_bmi2_fill_end
JLE sequenceDecs_decode_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_bmi2_fill_end
SHLQ $0x08, AX
@ -660,6 +682,10 @@ sequenceDecs_decode_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
sequenceDecs_decode_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decode_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -700,7 +726,7 @@ sequenceDecs_decode_bmi2_fill_end:
sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decode_bmi2_fill_2_end
JLE sequenceDecs_decode_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_bmi2_fill_2_end
SHLQ $0x08, AX
@ -711,6 +737,10 @@ sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
sequenceDecs_decode_bmi2_fill_2_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decode_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@ -889,6 +919,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
@ -925,7 +960,7 @@ sequenceDecs_decode_56_bmi2_main_loop:
sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decode_56_bmi2_fill_end
JLE sequenceDecs_decode_56_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_56_bmi2_fill_end
SHLQ $0x08, AX
@ -936,6 +971,10 @@ sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
sequenceDecs_decode_56_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decode_56_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -1140,6 +1179,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Requires: SSE
TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9
@ -1804,7 +1848,7 @@ sequenceDecs_decodeSync_amd64_main_loop:
sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_amd64_fill_end
JLE sequenceDecs_decodeSync_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_amd64_fill_end
SHLQ $0x08, DX
@ -1815,6 +1859,10 @@ sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
sequenceDecs_decodeSync_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -1871,7 +1919,7 @@ sequenceDecs_decodeSync_amd64_ml_update_zero:
sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_amd64_fill_2_end
JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_amd64_fill_2_end
SHLQ $0x08, DX
@ -1882,6 +1930,10 @@ sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
sequenceDecs_decodeSync_amd64_fill_2_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@ -2291,6 +2343,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@ -2356,7 +2413,7 @@ sequenceDecs_decodeSync_bmi2_main_loop:
sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_bmi2_fill_end
JLE sequenceDecs_decodeSync_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_bmi2_fill_end
SHLQ $0x08, AX
@ -2367,6 +2424,10 @@ sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
sequenceDecs_decodeSync_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -2407,7 +2468,7 @@ sequenceDecs_decodeSync_bmi2_fill_end:
sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
SHLQ $0x08, AX
@ -2418,6 +2479,10 @@ sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@ -2801,6 +2866,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@ -2866,7 +2936,7 @@ sequenceDecs_decodeSync_safe_amd64_main_loop:
sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
SHLQ $0x08, DX
@ -2877,6 +2947,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_amd64_fill_end:
// Update offset
MOVQ R9, AX
@ -2933,7 +3007,7 @@ sequenceDecs_decodeSync_safe_amd64_ml_update_zero:
sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
SHLQ $0x08, DX
@ -2944,6 +3018,10 @@ sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
ORQ AX, DX
JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
CMPQ BX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@ -3455,6 +3533,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@ -3520,7 +3603,7 @@ sequenceDecs_decodeSync_safe_bmi2_main_loop:
sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
SHLQ $0x08, AX
@ -3531,6 +3614,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@ -3571,7 +3658,7 @@ sequenceDecs_decodeSync_safe_bmi2_fill_end:
sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
SHLQ $0x08, AX
@ -3582,6 +3669,10 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
ORQ CX, AX
JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
CMPQ DX, $0x40
JA error_overread
sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@ -4067,6 +4158,11 @@ error_not_enough_literals:
MOVQ $0x00000004, ret+24(FP)
RET
// Return with overread error
error_overread:
MOVQ $0x00000006, ret+24(FP)
RET
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX

View file

@ -128,11 +128,11 @@ func matchLen(a, b []byte) (n int) {
}
func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[i:])
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
}
func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[i:])
return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
}
type byter interface {

View file

@ -3,7 +3,7 @@ GOARCH ?= $(shell go env GOARCH)
GOOS_GOARCH := $(GOOS)_$(GOARCH)
GOOS_GOARCH_NATIVE := $(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
LIBZSTD_NAME := libzstd_$(GOOS_GOARCH).a
ZSTD_VERSION ?= v1.5.4
ZSTD_VERSION ?= v1.5.5
MUSL_BUILDER_IMAGE=golang:1.20.1-alpine
BUILDER_IMAGE := local/builder_musl:2.0.0-$(shell echo $(MUSL_BUILDER_IMAGE) | tr : _)-1

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -106,7 +106,7 @@ extern "C" {
/*------ Version ------*/
#define ZSTD_VERSION_MAJOR 1
#define ZSTD_VERSION_MINOR 5
#define ZSTD_VERSION_RELEASE 4
#define ZSTD_VERSION_RELEASE 5
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
/*! ZSTD_versionNumber() :
@ -148,7 +148,8 @@ ZSTDLIB_API const char* ZSTD_versionString(void);
***************************************/
/*! ZSTD_compress() :
* Compresses `src` content as a single zstd compressed frame into already allocated `dst`.
* Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
* NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have
* enough space to successfully compress the data.
* @return : compressed size written into `dst` (<= `dstCapacity),
* or an error code if it fails (which can be tested using ZSTD_isError()). */
ZSTDLIB_API size_t ZSTD_compress( void* dst, size_t dstCapacity,
@ -578,7 +579,8 @@ ZSTDLIB_API size_t ZSTD_CCtx_reset(ZSTD_CCtx* cctx, ZSTD_ResetDirective reset);
* Should cctx hold data from a previously unfinished frame, everything about it is forgotten.
* - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()
* - The function is always blocking, returns when compression is completed.
* Hint : compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
* NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have
* enough space to successfully compress the data, though it is possible it fails for other reasons.
* @return : compressed size written into `dst` (<= `dstCapacity),
* or an error code if it fails (which can be tested using ZSTD_isError()).
*/
@ -1018,9 +1020,11 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
* Advanced dictionary and prefix API (Requires v1.4.0+)
*
* This API allows dictionaries to be used with ZSTD_compress2(),
* ZSTD_compressStream2(), and ZSTD_decompressDCtx(). Dictionaries are sticky, and
* only reset with the context is reset with ZSTD_reset_parameters or
* ZSTD_reset_session_and_parameters. Prefixes are single-use.
* ZSTD_compressStream2(), and ZSTD_decompressDCtx().
* Dictionaries are sticky, they remain valid when same context is re-used,
* they only reset when the context is reset
* with ZSTD_reset_parameters or ZSTD_reset_session_and_parameters.
* In contrast, Prefixes are single-use.
******************************************************************************/
@ -1041,7 +1045,11 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
* Use experimental ZSTD_CCtx_loadDictionary_byReference() to reference content instead.
* In such a case, dictionary buffer must outlive its users.
* Note 4 : Use ZSTD_CCtx_loadDictionary_advanced()
* to precisely select how dictionary content must be interpreted. */
* to precisely select how dictionary content must be interpreted.
* Note 5 : This method does not benefit from LDM (long distance mode).
* If you want to employ LDM on some large dictionary content,
* prefer employing ZSTD_CCtx_refPrefix() described below.
*/
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
/*! ZSTD_CCtx_refCDict() : Requires v1.4.0+
@ -1064,6 +1072,7 @@ ZSTDLIB_API size_t ZSTD_CCtx_refCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict);
* Decompression will need same prefix to properly regenerate data.
* Compressing with a prefix is similar in outcome as performing a diff and compressing it,
* but performs much faster, especially during decompression (compression speed is tunable with compression level).
* This method is compatible with LDM (long distance mode).
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
* Special: Adding any prefix (including NULL) invalidates any previous prefix or dictionary
* Note 1 : Prefix buffer is referenced. It **must** outlive compression.
@ -1387,7 +1396,7 @@ typedef enum {
} ZSTD_paramSwitch_e;
/***************************************
* Frame size functions
* Frame header and size functions
***************************************/
/*! ZSTD_findDecompressedSize() :
@ -1434,6 +1443,30 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size
* or an error code (if srcSize is too small) */
ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
typedef struct {
unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
unsigned blockSizeMax;
ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
unsigned headerSize;
unsigned dictID;
unsigned checksumFlag;
unsigned _reserved1;
unsigned _reserved2;
} ZSTD_frameHeader;
/*! ZSTD_getFrameHeader() :
* decode Frame Header, or requires larger `srcSize`.
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
* or an error code, which can be tested using ZSTD_isError() */
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
/*! ZSTD_getFrameHeader_advanced() :
* same as ZSTD_getFrameHeader(),
* with added capability to select a format (like ZSTD_f_zstd1_magicless) */
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
/*! ZSTD_decompressionMargin() :
* Zstd supports in-place decompression, where the input and output buffers overlap.
* In this case, the output buffer must be at least (Margin + Output_Size) bytes large,
@ -1803,12 +1836,26 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
/*! ZSTD_CCtx_setCParams() :
* Set all parameters provided within @cparams into the working @cctx.
* Set all parameters provided within @p cparams into the working @p cctx.
* Note : if modifying parameters during compression (MT mode only),
* note that changes to the .windowLog parameter will be ignored.
* @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
* @return 0 on success, or an error code (can be checked with ZSTD_isError()).
* On failure, no parameters are updated.
*/
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams);
/*! ZSTD_CCtx_setFParams() :
* Set all parameters provided within @p fparams into the working @p cctx.
* @return 0 on success, or an error code (can be checked with ZSTD_isError()).
*/
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setFParams(ZSTD_CCtx* cctx, ZSTD_frameParameters fparams);
/*! ZSTD_CCtx_setParams() :
* Set all parameters provided within @p params into the working @p cctx.
* @return 0 on success, or an error code (can be checked with ZSTD_isError()).
*/
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setParams(ZSTD_CCtx* cctx, ZSTD_parameters params);
/*! ZSTD_compress_advanced() :
* Note : this function is now DEPRECATED.
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
@ -2134,7 +2181,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
* This parameter can be used to set an upper bound on the blocksize
* that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper
* bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make
* compressBound() innacurate). Only currently meant to be used for testing.
* compressBound() inaccurate). Only currently meant to be used for testing.
*
*/
#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18
@ -2452,12 +2499,9 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
int compressionLevel);
/*! ZSTD_initCStream_advanced() :
* This function is DEPRECATED, and is approximately equivalent to:
* This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* // Pseudocode: Set each zstd parameter and leave the rest as-is.
* for ((param, value) : params) {
* ZSTD_CCtx_setParameter(zcs, param, value);
* }
* ZSTD_CCtx_setParams(zcs, params);
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_loadDictionary(zcs, dict, dictSize);
*
@ -2486,12 +2530,9 @@ ZSTDLIB_STATIC_API
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
/*! ZSTD_initCStream_usingCDict_advanced() :
* This function is DEPRECATED, and is approximately equivalent to:
* This function is DEPRECATED, and is equivalent to:
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
* // Pseudocode: Set each zstd frame parameter and leave the rest as-is.
* for ((fParam, value) : fParams) {
* ZSTD_CCtx_setParameter(zcs, fParam, value);
* }
* ZSTD_CCtx_setFParams(zcs, fParams);
* ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);
* ZSTD_CCtx_refCDict(zcs, cdict);
*
@ -2598,214 +2639,6 @@ ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions")
ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
/*********************************************************************
* Buffer-less and synchronous inner streaming functions
*
* This is an advanced API, giving full control over buffer management, for users which need direct control over memory.
* But it's also a complex one, with several restrictions, documented below.
* Prefer normal streaming API for an easier experience.
********************************************************************* */
/**
Buffer-less streaming compression (synchronous mode)
A ZSTD_CCtx object is required to track streaming operations.
Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
ZSTD_CCtx object can be re-used multiple times within successive compression operations.
Start by initializing a context.
Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
Then, consume your input using ZSTD_compressContinue().
There are some important considerations to keep in mind when using this advanced function :
- ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
- Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
Worst case evaluation is provided by ZSTD_compressBound().
ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
- ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
- ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
In which case, it will "discard" the relevant memory section from its history.
Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
`ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
*/
/*===== Buffer-less streaming compression functions =====*/
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.")
ZSTDLIB_STATIC_API
size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
ZSTD_DEPRECATED("use advanced API to access custom parameters")
ZSTDLIB_STATIC_API
size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTD_DEPRECATED("use advanced API to access custom parameters")
ZSTDLIB_STATIC_API
size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
/**
Buffer-less streaming decompression (synchronous mode)
A ZSTD_DCtx object is required to track streaming operations.
Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
A ZSTD_DCtx object can be re-used multiple times.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
As a consequence, check that values remain within valid application range.
For example, do not allocate memory blindly, check that `windowSize` is within expectation.
Each application can set its own limits, depending on local restrictions.
For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
ZSTD_decompressContinue() is very sensitive to contiguity,
if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
There are multiple ways to guarantee this condition.
The most memory efficient way is to use a round buffer of sufficient size.
Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
which can return an error code if required value is too large for current system (in 32-bits mode).
In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
At which point, decoding can resume from the beginning of the buffer.
Note that already decoded data stored in the buffer should be flushed before being overwritten.
There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
Finally, if you control the compression process, you can also ignore all buffer size rules,
as long as the encoder and decoder progress in "lock-step",
aka use exactly the same buffer sizes, break contiguity at the same place, etc.
Once buffers are setup, start decompression, with ZSTD_decompressBegin().
If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
It can also be an error code, which can be tested with ZSTD_isError().
A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
Context can then be reset to start a new decompression.
Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
This information is not required to properly decode a frame.
== Special case : skippable frames ==
Skippable frames allow integration of user-defined data into a flow of concatenated frames.
Skippable frames will be ignored (skipped) by decompressor.
The format of skippable frames is as follows :
a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
c) Frame Content - any content (User Data) of length equal to Frame Size
For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
*/
/*===== Buffer-less streaming decompression functions =====*/
typedef enum { ZSTD_frame, ZSTD_skippableFrame } ZSTD_frameType_e;
typedef struct {
unsigned long long frameContentSize; /* if == ZSTD_CONTENTSIZE_UNKNOWN, it means this field is not available. 0 means "empty" */
unsigned long long windowSize; /* can be very large, up to <= frameContentSize */
unsigned blockSizeMax;
ZSTD_frameType_e frameType; /* if == ZSTD_skippableFrame, frameContentSize is the size of skippable content */
unsigned headerSize;
unsigned dictID;
unsigned checksumFlag;
unsigned _reserved1;
unsigned _reserved2;
} ZSTD_frameHeader;
/*! ZSTD_getFrameHeader() :
* decode Frame Header, or requires larger `srcSize`.
* @return : 0, `zfhPtr` is correctly filled,
* >0, `srcSize` is too small, value is wanted `srcSize` amount,
* or an error code, which can be tested using ZSTD_isError() */
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize); /**< doesn't consume input */
/*! ZSTD_getFrameHeader_advanced() :
* same as ZSTD_getFrameHeader(),
* with added capability to select a format (like ZSTD_f_zstd1_magicless) */
ZSTDLIB_STATIC_API size_t ZSTD_getFrameHeader_advanced(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format);
ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/* misc */
ZSTD_DEPRECATED("This function will likely be removed in the next minor release. It is misleading and has very limited utility.")
ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
/* ============================ */
/** Block level API */
/* ============================ */
/*!
Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
A few rules to respect :
- Compressing and decompressing require a context structure
+ Use ZSTD_createCCtx() and ZSTD_createDCtx()
- It is necessary to init context before starting
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
- When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
===> In which case, nothing is produced into `dst` !
+ User __must__ test for such outcome and deal directly with uncompressed data
+ A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
Doing so would mess up with statistics history, leading to potential data corruption.
+ ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ In case of multiple successive blocks, should some of them be uncompressed,
decoder must be informed of their existence in order to follow proper history.
Use ZSTD_insertBlock() for such a case.
*/
/*===== Raw zstd block functions =====*/
ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
/* ********************* BLOCK-LEVEL SEQUENCE PRODUCER API *********************
*
* *** OVERVIEW ***
@ -2967,6 +2800,219 @@ ZSTD_registerSequenceProducer(
ZSTD_sequenceProducer_F* sequenceProducer
);
/*********************************************************************
* Buffer-less and synchronous inner streaming functions (DEPRECATED)
*
* This API is deprecated, and will be removed in a future version.
* It allows streaming (de)compression with user allocated buffers.
* However, it is hard to use, and not as well tested as the rest of
* our API.
*
* Please use the normal streaming API instead: ZSTD_compressStream2,
* and ZSTD_decompressStream.
* If there is functionality that you need, but it doesn't provide,
* please open an issue on our GitHub.
********************************************************************* */
/**
Buffer-less streaming compression (synchronous mode)
A ZSTD_CCtx object is required to track streaming operations.
Use ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.
ZSTD_CCtx object can be re-used multiple times within successive compression operations.
Start by initializing a context.
Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
Then, consume your input using ZSTD_compressContinue().
There are some important considerations to keep in mind when using this advanced function :
- ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.
- Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.
- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.
Worst case evaluation is provided by ZSTD_compressBound().
ZSTD_compressContinue() doesn't guarantee recover after a failed compression.
- ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).
It remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)
- ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.
In which case, it will "discard" the relevant memory section from its history.
Finish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.
It's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.
Without last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.
`ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again.
*/
/*===== Buffer-less streaming compression functions =====*/
ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.")
ZSTDLIB_STATIC_API
size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTD_DEPRECATED("The buffer-less API is deprecated in favor of the normal streaming API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
ZSTD_DEPRECATED("use advanced API to access custom parameters")
ZSTDLIB_STATIC_API
size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
ZSTD_DEPRECATED("use advanced API to access custom parameters")
ZSTDLIB_STATIC_API
size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
/**
Buffer-less streaming decompression (synchronous mode)
A ZSTD_DCtx object is required to track streaming operations.
Use ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.
A ZSTD_DCtx object can be re-used multiple times.
First typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
Data fragment must be large enough to ensure successful decoding.
`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
>0 : `srcSize` is too small, please provide at least result bytes on next attempt.
errorCode, which can be tested using ZSTD_isError().
It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
such as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).
Note that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.
As a consequence, check that values remain within valid application range.
For example, do not allocate memory blindly, check that `windowSize` is within expectation.
Each application can set its own limits, depending on local restrictions.
For extended interoperability, it is recommended to support `windowSize` of at least 8 MB.
ZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.
ZSTD_decompressContinue() is very sensitive to contiguity,
if 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,
or that previous contiguous segment is large enough to properly handle maximum back-reference distance.
There are multiple ways to guarantee this condition.
The most memory efficient way is to use a round buffer of sufficient size.
Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
which can return an error code if required value is too large for current system (in 32-bits mode).
In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
At which point, decoding can resume from the beginning of the buffer.
Note that already decoded data stored in the buffer should be flushed before being overwritten.
There are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.
Finally, if you control the compression process, you can also ignore all buffer size rules,
as long as the encoder and decoder progress in "lock-step",
aka use exactly the same buffer sizes, break contiguity at the same place, etc.
Once buffers are setup, start decompression, with ZSTD_decompressBegin().
If decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().
Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
It can also be an error code, which can be tested with ZSTD_isError().
A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.
Context can then be reset to start a new decompression.
Note : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().
This information is not required to properly decode a frame.
== Special case : skippable frames ==
Skippable frames allow integration of user-defined data into a flow of concatenated frames.
Skippable frames will be ignored (skipped) by decompressor.
The format of skippable frames is as follows :
a) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F
b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits
c) Frame Content - any content (User Data) of length equal to Frame Size
For skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.
For skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content.
*/
/*===== Buffer-less streaming decompression functions =====*/
ZSTDLIB_STATIC_API size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize); /**< when frame content size is not known, pass in frameContentSize == ZSTD_CONTENTSIZE_UNKNOWN */
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize);
ZSTDLIB_STATIC_API size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict);
ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
/* misc */
ZSTD_DEPRECATED("This function will likely be removed in the next minor release. It is misleading and has very limited utility.")
ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
/* ========================================= */
/** Block level API (DEPRECATED) */
/* ========================================= */
/*!
This API is deprecated in favor of the regular compression API.
You can get the frame header down to 2 bytes by setting:
- ZSTD_c_format = ZSTD_f_zstd1_magicless
- ZSTD_c_contentSizeFlag = 0
- ZSTD_c_checksumFlag = 0
- ZSTD_c_dictIDFlag = 0
This API is not as well tested as our normal API, so we recommend not using it.
We will be removing it in a future version. If the normal API doesn't provide
the functionality you need, please open a GitHub issue.
Block functions produce and decode raw zstd blocks, without frame metadata.
Frame metadata cost is typically ~12 bytes, which can be non-negligible for very small blocks (< 100 bytes).
But users will have to take in charge needed metadata to regenerate data, such as compressed and content sizes.
A few rules to respect :
- Compressing and decompressing require a context structure
+ Use ZSTD_createCCtx() and ZSTD_createDCtx()
- It is necessary to init context before starting
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.
Frame metadata is not that costly, and quickly becomes negligible as source size grows larger than a block.
- When a block is considered not compressible enough, ZSTD_compressBlock() result will be 0 (zero) !
===> In which case, nothing is produced into `dst` !
+ User __must__ test for such outcome and deal directly with uncompressed data
+ A block cannot be declared incompressible if ZSTD_compressBlock() return value was != 0.
Doing so would mess up with statistics history, leading to potential data corruption.
+ ZSTD_decompressBlock() _doesn't accept uncompressed data as input_ !!
+ In case of multiple successive blocks, should some of them be uncompressed,
decoder must be informed of their existence in order to follow proper history.
Use ZSTD_insertBlock() for such a case.
*/
/*===== Raw zstd block functions =====*/
ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_getBlockSize (const ZSTD_CCtx* cctx);
ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_compressBlock (ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
ZSTD_DEPRECATED("The block API is deprecated in favor of the normal compression API. See docs.")
ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
#if defined (__cplusplus)

View file

@ -88,13 +88,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
p.c.L = &p.mu
}
defer p.c.Signal()
if p.err != nil {
if p.err != nil || p.breakErr != nil {
return 0, errClosedPipeWrite
}
if p.breakErr != nil {
p.unread += len(d)
return len(d), nil // discard when there is no reader
}
return p.b.Write(d)
}

View file

@ -1822,15 +1822,18 @@ func (sc *serverConn) processData(f *DataFrame) error {
}
if len(data) > 0 {
st.bodyBytes += int64(len(data))
wrote, err := st.body.Write(data)
if err != nil {
// The handler has closed the request body.
// Return the connection-level flow control for the discarded data,
// but not the stream-level flow control.
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
return nil
}
if wrote != len(data) {
panic("internal error: bad Writer")
}
st.bodyBytes += int64(len(data))
}
// Return any padded flow control now, since we won't

View file

@ -560,10 +560,11 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
traceGotConn(req, cc, reused)
res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
roundTripErr := err
if req, err = shouldRetryRequest(req, err); err == nil {
// After the first retry, do exponential backoff with 10% jitter.
if retry == 0 {
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
}
backoff := float64(uint(1) << (uint(retry) - 1))
@ -572,7 +573,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
timer := backoffNewTimer(d)
select {
case <-timer.C:
t.vlogf("RoundTrip retrying after failure: %v", err)
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
timer.Stop()
@ -2555,6 +2556,9 @@ func (b transportResponseBody) Close() error {
cs := b.cs
cc := cs.cc
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
unread := cs.bufPipe.Len()
if unread > 0 {
cc.mu.Lock()
@ -2573,9 +2577,6 @@ func (b transportResponseBody) Close() error {
cc.wmu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
cs.abortStream(errClosedResponseBody)
select {
case <-cs.donec:
case <-cs.ctx.Done():

70
vendor/golang.org/x/sys/unix/ioctl_signed.go generated vendored Normal file
View file

@ -0,0 +1,70 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || solaris
// +build aix solaris
package unix
import (
"unsafe"
)
// ioctl itself should not be exposed directly, but additional get/set
// functions for specific types are permissible.
// IoctlSetInt performs an ioctl operation which sets an integer value
// on fd, using the specified request number.
func IoctlSetInt(fd int, req int, value int) error {
return ioctl(fd, req, uintptr(value))
}
// IoctlSetPointerInt performs an ioctl operation which sets an
// integer value on fd, using the specified request number. The ioctl
// argument is called with a pointer to the integer value, rather than
// passing the integer value directly.
func IoctlSetPointerInt(fd int, req int, value int) error {
v := int32(value)
return ioctlPtr(fd, req, unsafe.Pointer(&v))
}
// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
//
// To change fd's window size, the req argument should be TIOCSWINSZ.
func IoctlSetWinsize(fd int, req int, value *Winsize) error {
// TODO: if we get the chance, remove the req parameter and
// hardcode TIOCSWINSZ.
return ioctlPtr(fd, req, unsafe.Pointer(value))
}
// IoctlSetTermios performs an ioctl on fd with a *Termios.
//
// The req value will usually be TCSETA or TIOCSETA.
func IoctlSetTermios(fd int, req int, value *Termios) error {
// TODO: if we get the chance, remove the req parameter.
return ioctlPtr(fd, req, unsafe.Pointer(value))
}
// IoctlGetInt performs an ioctl operation which gets an integer value
// from fd, using the specified request number.
//
// A few ioctl requests use the return value as an output parameter;
// for those, IoctlRetInt should be used instead of this function.
func IoctlGetInt(fd int, req int) (int, error) {
var value int
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return value, err
}
func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
var value Winsize
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return &value, err
}
func IoctlGetTermios(fd int, req int) (*Termios, error) {
var value Termios
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return &value, err
}

View file

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build aix || darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd || solaris
// +build aix darwin dragonfly freebsd hurd linux netbsd openbsd solaris
//go:build darwin || dragonfly || freebsd || hurd || linux || netbsd || openbsd
// +build darwin dragonfly freebsd hurd linux netbsd openbsd
package unix

View file

@ -17,14 +17,14 @@ import (
// IoctlSetInt performs an ioctl operation which sets an integer value
// on fd, using the specified request number.
func IoctlSetInt(fd int, req uint, value int) error {
func IoctlSetInt(fd int, req int, value int) error {
return ioctl(fd, req, uintptr(value))
}
// IoctlSetWinsize performs an ioctl on fd with a *Winsize argument.
//
// To change fd's window size, the req argument should be TIOCSWINSZ.
func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
func IoctlSetWinsize(fd int, req int, value *Winsize) error {
// TODO: if we get the chance, remove the req parameter and
// hardcode TIOCSWINSZ.
return ioctlPtr(fd, req, unsafe.Pointer(value))
@ -33,7 +33,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error {
// IoctlSetTermios performs an ioctl on fd with a *Termios.
//
// The req value is expected to be TCSETS, TCSETSW, or TCSETSF
func IoctlSetTermios(fd int, req uint, value *Termios) error {
func IoctlSetTermios(fd int, req int, value *Termios) error {
if (req != TCSETS) && (req != TCSETSW) && (req != TCSETSF) {
return ENOSYS
}
@ -47,13 +47,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error {
//
// A few ioctl requests use the return value as an output parameter;
// for those, IoctlRetInt should be used instead of this function.
func IoctlGetInt(fd int, req uint) (int, error) {
func IoctlGetInt(fd int, req int) (int, error) {
var value int
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return value, err
}
func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
func IoctlGetWinsize(fd int, req int) (*Winsize, error) {
var value Winsize
err := ioctlPtr(fd, req, unsafe.Pointer(&value))
return &value, err
@ -62,7 +62,7 @@ func IoctlGetWinsize(fd int, req uint) (*Winsize, error) {
// IoctlGetTermios performs an ioctl on fd with a *Termios.
//
// The req value is expected to be TCGETS
func IoctlGetTermios(fd int, req uint) (*Termios, error) {
func IoctlGetTermios(fd int, req int) (*Termios, error) {
var value Termios
if req != TCGETS {
return &value, ENOSYS

View file

@ -66,6 +66,7 @@ includes_Darwin='
#include <sys/ptrace.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/un.h>
#include <sys/sockio.h>
#include <sys/sys_domain.h>
@ -521,6 +522,7 @@ ccflags="$@"
$2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ ||
$2 ~ /^NFC_.*_(MAX)?SIZE$/ ||
$2 ~ /^RAW_PAYLOAD_/ ||
$2 ~ /^[US]F_/ ||
$2 ~ /^TP_STATUS_/ ||
$2 ~ /^FALLOC_/ ||
$2 ~ /^ICMPV?6?_(FILTER|SEC)/ ||

View file

@ -408,8 +408,8 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 }
func (w WaitStatus) TrapCause() int { return -1 }
//sys ioctl(fd int, req uint, arg uintptr) (err error)
//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl
//sys ioctl(fd int, req int, arg uintptr) (err error)
//sys ioctlPtr(fd int, req int, arg unsafe.Pointer) (err error) = ioctl
// fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX
// There is no way to create a custom fcntl and to keep //sys fcntl easily,

View file

@ -8,7 +8,6 @@
package unix
//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = getrlimit64
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error) = setrlimit64
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek64
//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)

View file

@ -8,7 +8,6 @@
package unix
//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = lseek
//sys mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) = mmap64

View file

@ -613,6 +613,7 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
//sys Rmdir(path string) (err error)
//sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
//sys Setattrlist(path string, attrlist *Attrlist, attrBuf []byte, options int) (err error)
//sys Setegid(egid int) (err error)
//sysnb Seteuid(euid int) (err error)
//sysnb Setgid(gid int) (err error)
@ -622,7 +623,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
//sys Setprivexec(flag int) (err error)
//sysnb Setregid(rgid int, egid int) (err error)
//sysnb Setreuid(ruid int, euid int) (err error)
//sysnb Setrlimit(which int, lim *Rlimit) (err error)
//sysnb Setsid() (pid int, err error)
//sysnb Settimeofday(tp *Timeval) (err error)
//sysnb Setuid(uid int) (err error)
@ -676,7 +676,6 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
// Kqueue_from_portset_np
// Kqueue_portset
// Getattrlist
// Setattrlist
// Getdirentriesattr
// Searchfs
// Delete

View file

@ -326,7 +326,6 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
//sysnb Setreuid(ruid int, euid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(which int, lim *Rlimit) (err error)
//sysnb Setsid() (pid int, err error)
//sysnb Settimeofday(tp *Timeval) (err error)
//sysnb Setuid(uid int) (err error)

View file

@ -433,7 +433,6 @@ func Dup3(oldfd, newfd, flags int) error {
//sysnb Setreuid(ruid int, euid int) (err error)
//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
//sysnb Setresuid(ruid int, euid int, suid int) (err error)
//sysnb Setrlimit(which int, lim *Rlimit) (err error)
//sysnb Setsid() (pid int, err error)
//sysnb Settimeofday(tp *Timeval) (err error)
//sysnb Setuid(uid int) (err error)

View file

@ -1873,7 +1873,6 @@ func Getpgrp() (pid int) {
//sys OpenTree(dfd int, fileName string, flags uint) (r int, err error)
//sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error)
//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
//sysnb Prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64
//sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6
//sys read(fd int, p []byte) (n int, err error)
@ -1887,6 +1886,15 @@ func Getpgrp() (pid int) {
//sysnb Settimeofday(tv *Timeval) (err error)
//sys Setns(fd int, nstype int) (err error)
//go:linkname syscall_prlimit syscall.prlimit
func syscall_prlimit(pid, resource int, newlimit, old *syscall.Rlimit) error
func Prlimit(pid, resource int, newlimit, old *Rlimit) error {
// Just call the syscall version, because as of Go 1.21
// it will affect starting a new process.
return syscall_prlimit(pid, resource, (*syscall.Rlimit)(newlimit), (*syscall.Rlimit)(old))
}
// PrctlRetInt performs a prctl operation specified by option and further
// optional arguments arg2 through arg5 depending on option. It returns a
// non-negative integer that is returned by the prctl syscall.

View file

@ -97,33 +97,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) {
return
}
//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT
func Setrlimit(resource int, rlim *Rlimit) (err error) {
err = Prlimit(0, resource, rlim, nil)
if err != ENOSYS {
return err
}
rl := rlimit32{}
if rlim.Cur == rlimInf64 {
rl.Cur = rlimInf32
} else if rlim.Cur < uint64(rlimInf32) {
rl.Cur = uint32(rlim.Cur)
} else {
return EINVAL
}
if rlim.Max == rlimInf64 {
rl.Max = rlimInf32
} else if rlim.Max < uint64(rlimInf32) {
rl.Max = uint32(rlim.Max)
} else {
return EINVAL
}
return setrlimit(resource, &rl)
}
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
newoffset, errno := seek(fd, offset, whence)
if errno != 0 {

View file

@ -46,7 +46,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
//sys setfsgid(gid int) (prev int, err error)
//sys setfsuid(uid int) (prev int, err error)
//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
//sys Shutdown(fd int, how int) (err error)
//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)

View file

@ -171,33 +171,6 @@ func Getrlimit(resource int, rlim *Rlimit) (err error) {
return
}
//sysnb setrlimit(resource int, rlim *rlimit32) (err error) = SYS_SETRLIMIT
func Setrlimit(resource int, rlim *Rlimit) (err error) {
err = Prlimit(0, resource, rlim, nil)
if err != ENOSYS {
return err
}
rl := rlimit32{}
if rlim.Cur == rlimInf64 {
rl.Cur = rlimInf32
} else if rlim.Cur < uint64(rlimInf32) {
rl.Cur = uint32(rlim.Cur)
} else {
return EINVAL
}
if rlim.Max == rlimInf64 {
rl.Max = rlimInf32
} else if rlim.Max < uint64(rlimInf32) {
rl.Max = uint32(rlim.Max)
} else {
return EINVAL
}
return setrlimit(resource, &rl)
}
func (r *PtraceRegs) PC() uint64 { return uint64(r.Uregs[15]) }
func (r *PtraceRegs) SetPC(pc uint64) { r.Uregs[15] = uint32(pc) }

Some files were not shown because too many files have changed in this diff Show more