mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-10 15:14:09 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
5c8a01aecc
80 changed files with 1475 additions and 435 deletions
2
.github/workflows/check-licenses.yml
vendored
2
.github/workflows/check-licenses.yml
vendored
|
@ -17,7 +17,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@main
|
||||
with:
|
||||
go-version: 1.20.0
|
||||
go-version: 1.21.0
|
||||
id: go
|
||||
- name: Code checkout
|
||||
uses: actions/checkout@master
|
||||
|
|
2
.github/workflows/codeql-analysis.yml
vendored
2
.github/workflows/codeql-analysis.yml
vendored
|
@ -57,7 +57,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20.0
|
||||
go-version: 1.20.1
|
||||
check-latest: true
|
||||
cache: true
|
||||
if: ${{ matrix.language == 'go' }}
|
||||
|
|
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
|
@ -32,7 +32,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20.0
|
||||
go-version: 1.20.1
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
|
@ -56,7 +56,7 @@ jobs:
|
|||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20.0
|
||||
go-version: 1.20.1
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
|
@ -81,7 +81,7 @@ jobs:
|
|||
id: go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.20.0
|
||||
go-version: 1.20.1
|
||||
check-latest: true
|
||||
cache: true
|
||||
|
||||
|
|
|
@ -1161,7 +1161,7 @@ The shortlist of configuration flags is the following:
|
|||
-rule.configCheckInterval duration
|
||||
Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead
|
||||
-rule.maxResolveDuration duration
|
||||
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
|
||||
Limits the maximum duration for automatic alert expiration, which by default is 4 times evaluationInterval of the parent group.
|
||||
-rule.resendDelay duration
|
||||
Minimum amount of time to wait before resending an alert to notifier
|
||||
-rule.templates array
|
||||
|
|
|
@ -63,7 +63,7 @@ absolute path to all .tpl files in root.`)
|
|||
validateTemplates = flag.Bool("rule.validateTemplates", true, "Whether to validate annotation and label templates")
|
||||
validateExpressions = flag.Bool("rule.validateExpressions", true, "Whether to validate rules expressions via MetricsQL engine")
|
||||
maxResolveDuration = flag.Duration("rule.maxResolveDuration", 0, "Limits the maximum duration for automatic alert expiration, "+
|
||||
"which is by default equal to 3 evaluation intervals of the parent group.")
|
||||
"which by default is 4 times evaluationInterval of the parent group.")
|
||||
resendDelay = flag.Duration("rule.resendDelay", 0, "Minimum amount of time to wait before resending an alert to notifier")
|
||||
ruleUpdateEntriesLimit = flag.Int("rule.updateEntriesLimit", 20, "Defines the max number of rule's state updates stored in-memory. "+
|
||||
"Rule's updates are available on rule's Details page and are used for debugging purposes. The number of stored updates can be overriden per rule via update_entries_limit param.")
|
||||
|
|
|
@ -40,6 +40,7 @@ var aggrFuncs = map[string]aggrFunc{
|
|||
"outliersk": aggrFuncOutliersK,
|
||||
"quantile": aggrFuncQuantile,
|
||||
"quantiles": aggrFuncQuantiles,
|
||||
"share": aggrFuncShare,
|
||||
"stddev": newAggrFunc(aggrFuncStddev),
|
||||
"stdvar": newAggrFunc(aggrFuncStdvar),
|
||||
"sum": newAggrFunc(aggrFuncSum),
|
||||
|
@ -455,6 +456,37 @@ func aggrFuncMode(tss []*timeseries) []*timeseries {
|
|||
return tss[:1]
|
||||
}
|
||||
|
||||
func aggrFuncShare(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
tss, err := getAggrTimeseries(afa.args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
|
||||
for i := range tss[0].Values {
|
||||
// Calculate sum for non-negative points at position i.
|
||||
var sum float64
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) || v < 0 {
|
||||
continue
|
||||
}
|
||||
sum += v
|
||||
}
|
||||
// Divide every non-negative value at poisition i by sum in order to get its' share.
|
||||
for _, ts := range tss {
|
||||
v := ts.Values[i]
|
||||
if math.IsNaN(v) || v < 0 {
|
||||
ts.Values[i] = nan
|
||||
} else {
|
||||
ts.Values[i] = v / sum
|
||||
}
|
||||
}
|
||||
}
|
||||
return tss
|
||||
}
|
||||
return aggrFuncExt(afe, tss, &afa.ae.Modifier, afa.ae.Limit, true)
|
||||
}
|
||||
|
||||
func aggrFuncZScore(afa *aggrFuncArg) ([]*timeseries, error) {
|
||||
tss, err := getAggrTimeseries(afa.args)
|
||||
if err != nil {
|
||||
|
@ -491,10 +523,6 @@ func aggrFuncZScore(afa *aggrFuncArg) ([]*timeseries, error) {
|
|||
ts.Values[i] = (v - avg) / stddev
|
||||
}
|
||||
}
|
||||
// Remove MetricGroup from all the tss.
|
||||
for _, ts := range tss {
|
||||
ts.MetricName.ResetMetricGroup()
|
||||
}
|
||||
return tss
|
||||
}
|
||||
return aggrFuncExt(afe, tss, &afa.ae.Modifier, afa.ae.Limit, true)
|
||||
|
|
|
@ -4795,6 +4795,85 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`share()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(round(share((
|
||||
label_set(time()/100+10, "k", "v1"),
|
||||
label_set(time()/200+5, "k", "v2"),
|
||||
label_set(time()/110-10, "k", "v3"),
|
||||
label_set(time()/90-5, "k", "v4"),
|
||||
)), 0.001), "k")`
|
||||
r1 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.554, 0.521, 0.487, 0.462, 0.442, 0.426},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r1.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("k"),
|
||||
Value: []byte("v1"),
|
||||
}}
|
||||
r2 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.277, 0.26, 0.243, 0.231, 0.221, 0.213},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r2.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("k"),
|
||||
Value: []byte("v2"),
|
||||
}}
|
||||
r3 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{nan, 0.022, 0.055, 0.081, 0.1, 0.116},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r3.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("k"),
|
||||
Value: []byte("v3"),
|
||||
}}
|
||||
r4 := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{0.169, 0.197, 0.214, 0.227, 0.237, 0.245},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
r4.MetricName.Tags = []storage.Tag{{
|
||||
Key: []byte("k"),
|
||||
Value: []byte("v4"),
|
||||
}}
|
||||
resultExpected := []netstorage.Result{r1, r2, r3, r4}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`sum(share())`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `round(sum(share((
|
||||
label_set(time()/100+10, "k", "v1"),
|
||||
label_set(time()/200+5, "k", "v2"),
|
||||
label_set(time()/110-10, "k", "v3"),
|
||||
label_set(time()/90-5, "k", "v4"),
|
||||
))), 0.001)`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{1, 1, 1, 1, 1, 1},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`sum(share() by (k))`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `round(sum(share((
|
||||
label_set(time()/100+10, "k", "v1"),
|
||||
label_set(time()/200+5, "k", "v2", "a", "b"),
|
||||
label_set(time()/110-10, "k", "v1", "a", "b"),
|
||||
label_set(time()/90-5, "k", "v2"),
|
||||
)) by (k)), 0.001)`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{2, 2, 2, 2, 2, 2},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`zscore()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `sort_by_label(round(zscore((
|
||||
|
@ -6503,6 +6582,17 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r1, r2}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`range_trim_outliers()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `range_trim_outliers(0.5, time())`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{nan, nan, 1400, 1600, nan, nan},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`range_trim_spikes()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `range_trim_spikes(0.2, time())`
|
||||
|
@ -6514,6 +6604,28 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`range_trim_zscore()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `range_trim_zscore(0.9, time())`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{nan, 1200, 1400, 1600, 1800, nan},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`range_zscore()`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `round(range_zscore(time()), 0.1)`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{-1.5, -0.9, -0.3, 0.3, 0.9, 1.5},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`range_quantile(0.5)`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `range_quantile(0.5, time())`
|
||||
|
@ -7059,6 +7171,17 @@ func TestExecSuccess(t *testing.T) {
|
|||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`range_mad(time())`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `range_mad(time())`
|
||||
r := netstorage.Result{
|
||||
MetricName: metricNameExpected,
|
||||
Values: []float64{300, 300, 300, 300, 300, 300},
|
||||
Timestamps: timestampsExpected,
|
||||
}
|
||||
resultExpected := []netstorage.Result{r}
|
||||
f(q, resultExpected)
|
||||
})
|
||||
t.Run(`range_max(time())`, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
q := `range_max(time())`
|
||||
|
@ -8317,8 +8440,12 @@ func TestExecError(t *testing.T) {
|
|||
f(`end(1)`)
|
||||
f(`step(1)`)
|
||||
f(`running_sum(1, 2)`)
|
||||
f(`range_mad()`)
|
||||
f(`range_sum(1, 2)`)
|
||||
f(`range_trim_outliers()`)
|
||||
f(`range_trim_spikes()`)
|
||||
f(`range_trim_zscore()`)
|
||||
f(`range_zscore()`)
|
||||
f(`range_first(1, 2)`)
|
||||
f(`range_last(1, 2)`)
|
||||
f(`range_linear_regression(1, 2)`)
|
||||
|
@ -8373,6 +8500,7 @@ func TestExecError(t *testing.T) {
|
|||
f(`rate_over_sum()`)
|
||||
f(`zscore_over_time()`)
|
||||
f(`mode()`)
|
||||
f(`share()`)
|
||||
f(`zscore()`)
|
||||
f(`prometheus_buckets()`)
|
||||
f(`buckets_limit()`)
|
||||
|
|
|
@ -1219,18 +1219,21 @@ func rollupMAD(rfa *rollupFuncArg) float64 {
|
|||
// There is no need in handling NaNs here, since they must be cleaned up
|
||||
// before calling rollup funcs.
|
||||
|
||||
return mad(rfa.values)
|
||||
}
|
||||
|
||||
func mad(values []float64) float64 {
|
||||
// See https://en.wikipedia.org/wiki/Median_absolute_deviation
|
||||
values := rfa.values
|
||||
median := quantile(0.5, values)
|
||||
a := getFloat64s()
|
||||
ds := a.A[:0]
|
||||
for _, v := range values {
|
||||
ds = append(ds, math.Abs(v-median))
|
||||
}
|
||||
mad := quantile(0.5, ds)
|
||||
v := quantile(0.5, ds)
|
||||
a.A = ds
|
||||
putFloat64s(a)
|
||||
return mad
|
||||
return v
|
||||
}
|
||||
|
||||
func rollupHistogram(rfa *rollupFuncArg) float64 {
|
||||
|
|
|
@ -89,6 +89,7 @@ var transformFuncs = map[string]transformFunc{
|
|||
"range_first": transformRangeFirst,
|
||||
"range_last": transformRangeLast,
|
||||
"range_linear_regression": transformRangeLinearRegression,
|
||||
"range_mad": transformRangeMAD,
|
||||
"range_max": newTransformFuncRange(runningMax),
|
||||
"range_min": newTransformFuncRange(runningMin),
|
||||
"range_normalize": transformRangeNormalize,
|
||||
|
@ -96,7 +97,10 @@ var transformFuncs = map[string]transformFunc{
|
|||
"range_stddev": transformRangeStddev,
|
||||
"range_stdvar": transformRangeStdvar,
|
||||
"range_sum": newTransformFuncRange(runningSum),
|
||||
"range_trim_outliers": transformRangeTrimOutliers,
|
||||
"range_trim_spikes": transformRangeTrimSpikes,
|
||||
"range_trim_zscore": transformRangeTrimZscore,
|
||||
"range_zscore": transformRangeZscore,
|
||||
"remove_resets": transformRemoveResets,
|
||||
"round": transformRound,
|
||||
"running_avg": newTransformFuncRunning(runningAvg),
|
||||
|
@ -1275,6 +1279,92 @@ func transformRangeNormalize(tfa *transformFuncArg) ([]*timeseries, error) {
|
|||
return rvs, nil
|
||||
}
|
||||
|
||||
func transformRangeTrimZscore(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
zs, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
z := float64(0)
|
||||
if len(zs) > 0 {
|
||||
z = math.Abs(zs[0])
|
||||
}
|
||||
// Trim samples with z-score above z.
|
||||
rvs := args[1]
|
||||
for _, ts := range rvs {
|
||||
values := ts.Values
|
||||
qStddev := stddev(values)
|
||||
avg := mean(values)
|
||||
for i, v := range values {
|
||||
zCurr := math.Abs(v-avg) / qStddev
|
||||
if zCurr > z {
|
||||
values[i] = nan
|
||||
}
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func transformRangeZscore(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if err := expectTransformArgsNum(args, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rvs := args[0]
|
||||
for _, ts := range rvs {
|
||||
values := ts.Values
|
||||
qStddev := stddev(values)
|
||||
avg := mean(values)
|
||||
for i, v := range values {
|
||||
values[i] = (v - avg) / qStddev
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func mean(values []float64) float64 {
|
||||
var sum float64
|
||||
var n int
|
||||
for _, v := range values {
|
||||
if !math.IsNaN(v) {
|
||||
sum += v
|
||||
n++
|
||||
}
|
||||
}
|
||||
return sum / float64(n)
|
||||
}
|
||||
|
||||
func transformRangeTrimOutliers(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ks, err := getScalar(args[0], 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k := float64(0)
|
||||
if len(ks) > 0 {
|
||||
k = ks[0]
|
||||
}
|
||||
// Trim samples satisfying the `abs(v - range_median(q)) > k*range_mad(q)`
|
||||
rvs := args[1]
|
||||
for _, ts := range rvs {
|
||||
values := ts.Values
|
||||
dMax := k * mad(values)
|
||||
qMedian := quantile(0.5, values)
|
||||
for i, v := range values {
|
||||
if math.Abs(v-qMedian) > dMax {
|
||||
values[i] = nan
|
||||
}
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func transformRangeTrimSpikes(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if err := expectTransformArgsNum(args, 2); err != nil {
|
||||
|
@ -1344,6 +1434,22 @@ func transformRangeLinearRegression(tfa *transformFuncArg) ([]*timeseries, error
|
|||
return rvs, nil
|
||||
}
|
||||
|
||||
func transformRangeMAD(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if err := expectTransformArgsNum(args, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rvs := args[0]
|
||||
for _, ts := range rvs {
|
||||
values := ts.Values
|
||||
v := mad(values)
|
||||
for i := range values {
|
||||
values[i] = v
|
||||
}
|
||||
}
|
||||
return rvs, nil
|
||||
}
|
||||
|
||||
func transformRangeStddev(tfa *transformFuncArg) ([]*timeseries, error) {
|
||||
args := tfa.args
|
||||
if err := expectTransformArgsNum(args, 1); err != nil {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.b9c2d13c.css",
|
||||
"main.js": "./static/js/main.a0c293ea.js",
|
||||
"main.js": "./static/js/main.44784d74.js",
|
||||
"static/js/27.c1ccfd29.chunk.js": "./static/js/27.c1ccfd29.chunk.js",
|
||||
"static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf",
|
||||
"static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf",
|
||||
|
@ -9,6 +9,6 @@
|
|||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.b9c2d13c.css",
|
||||
"static/js/main.a0c293ea.js"
|
||||
"static/js/main.44784d74.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.a0c293ea.js"></script><link href="./static/css/main.b9c2d13c.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.44784d74.js"></script><link href="./static/css/main.b9c2d13c.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
|
@ -1,4 +1,4 @@
|
|||
FROM node:18-alpine3.15
|
||||
FROM node:18-alpine3.17
|
||||
|
||||
RUN apk update && apk upgrade
|
||||
RUN apk add --no-cache bash bash-doc bash-completion libtool autoconf automake nasm pkgconfig libpng gcc make g++ zlib-dev gawk
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.19.5 as build-web-stage
|
||||
FROM golang:1.20.1 as build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
|
|
|
@ -62,11 +62,16 @@ const ChartTooltip: FC<ChartTooltipProps> = ({
|
|||
const showQueryNum = groups.size > 1;
|
||||
const group = metrics[seriesIdx-1]?.group || 0;
|
||||
|
||||
const metric = metrics[seriesIdx-1]?.metric || {};
|
||||
|
||||
const fields = useMemo(() => {
|
||||
const fullMetricName = useMemo(() => {
|
||||
const metric = metrics[seriesIdx-1]?.metric || {};
|
||||
const labelNames = Object.keys(metric).filter(x => x != "__name__");
|
||||
return labelNames.map(key => `${key}=${JSON.stringify(metric[key])}`);
|
||||
const labels = labelNames.map(key => `${key}=${JSON.stringify(metric[key])}`);
|
||||
let metricName = metric["__name__"] || "";
|
||||
if (labels.length > 0) {
|
||||
metricName += "{" + labels.join(",") + "}";
|
||||
}
|
||||
return metricName;
|
||||
}, [metrics, seriesIdx]);
|
||||
|
||||
const handleClose = () => {
|
||||
|
@ -177,19 +182,12 @@ const ChartTooltip: FC<ChartTooltipProps> = ({
|
|||
style={{ background: color }}
|
||||
/>
|
||||
<div>
|
||||
curr:<b>{valueFormat}{unit}</b>, median:<b>{calculations.median}</b><br/>
|
||||
min:<b>{calculations.min}</b>, max:<b>{calculations.max}</b>, last:<b>{calculations.last}</b>
|
||||
<b>{valueFormat}{unit}</b><br/>
|
||||
median:<b>{calculations.median}</b>, min:<b>{calculations.min}</b>, max:<b>{calculations.max}</b>
|
||||
</div>
|
||||
</div>
|
||||
<div className="vm-chart-tooltip-info">
|
||||
{metric["__name__"]}
|
||||
{
|
||||
{fields.map((f, i) => (
|
||||
<span key="{i}">
|
||||
{f}{i +1 < fields.length && ","}
|
||||
</span>
|
||||
))}
|
||||
}
|
||||
{fullMetricName}
|
||||
</div>
|
||||
</div>
|
||||
), targetPortal);
|
||||
|
|
|
@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
|
|||
|
||||
ROOT_IMAGE ?= alpine:3.17.2
|
||||
CERTS_IMAGE := alpine:3.17.2
|
||||
GO_BUILDER_IMAGE := golang:1.20.0-alpine
|
||||
GO_BUILDER_IMAGE := golang:1.20.1-alpine
|
||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||
BASE_IMAGE := local/base:1.1.4-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ with listed targets for scraping.
|
|||
|
||||
## vmalert
|
||||
|
||||
vmalert evaluates alerting rules [alerts.yml(https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml)
|
||||
vmalert evaluates alerting rules [alerts.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml)
|
||||
to track VictoriaMetrics health state. It is connected with AlertManager for firing alerts,
|
||||
and with VictoriaMetrics for executing queries and storing alert's state.
|
||||
|
||||
|
@ -86,7 +86,7 @@ and with VictoriaMetrics for executing queries and storing alert's state.
|
|||
## alertmanager
|
||||
|
||||
AlertManager accepts notifications from `vmalert` and fires alerts.
|
||||
All notifications are blackholed according to `alertmanager.yml` config.
|
||||
All notifications are blackholed according to [alertmanager.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alertmanager.yml) config.
|
||||
|
||||
[Web interface link](http://localhost:9093/).
|
||||
|
||||
|
@ -108,4 +108,4 @@ Grafana is provisioned by default with following entities:
|
|||
* `VictoriaMetrics - vmagent` dashboard
|
||||
* `VictoriaMetrics - vmalert` dashboard
|
||||
|
||||
Remember to pick `VictoriaMetrics - cluster` datasource when viewing `VictoriaMetrics - cluster` dashboard.
|
||||
Remember to pick `VictoriaMetrics - cluster` datasource when viewing `VictoriaMetrics - cluster` dashboard.
|
||||
|
|
|
@ -16,12 +16,18 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
## tip
|
||||
|
||||
* SECURITY: upgrade base docker image (alpine) from 3.17.1 to 3.17.2. See [alpine 3.17.2 release notes](https://alpinelinux.org/posts/Alpine-3.17.2-released.html).
|
||||
* SECURITY: upgrade Go builder from Go1.20.0 to Go1.20.1. See [the list of issues addressed in Go1.20.1](https://github.com/golang/go/issues?q=milestone%3AGo1.20.1+label%3ACherryPickApproved).
|
||||
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): add the ability to limit the number of concurrent requests on a per-user basis via `-maxConcurrentPerUserRequests` command-line flag and via `max_concurrent_requests` config option. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3346) and [these docs](https://docs.victoriametrics.com/vmauth.html#concurrency-limiting).
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): automatically retry failing `GET` requests on all [the configured backends](https://docs.victoriametrics.com/vmauth.html#load-balancing). Previously the backend error has been immediately returned to the client without retrying the request on the remaining backends.
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): choose the backend with the minimum number of concurrently executed requests [among the configured backends](https://docs.victoriametrics.com/vmauth.html#load-balancing) in a round-robin manner for serving the incoming requests. This allows spreading the load among backends more evenly, while improving the response time.
|
||||
* FEATURE: [vmalert enterprise](https://docs.victoriametrics.com/vmalert.html): add ability to read alerting and recording rules from S3, GCS or S3-compatible object storage. See [these docs](https://docs.victoriametrics.com/vmalert.html#reading-rules-from-object-storage).
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add [share(q)](https://docs.victoriametrics.com/MetricsQL.html#share) aggregate function.
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `mad_over_time(m[d])` function for calculating the [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) over raw samples on the lookbehind window `d`. See [this feature request](https://github.com/prometheus/prometheus/issues/5514).
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `range_mad(q)` function for calculating the [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) over points per each time series returned by `q`.
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `range_zscore(q)` function for calculating [z-score](https://en.wikipedia.org/wiki/Standard_score) over points per each time series returned from `q`.
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `range_trim_outliers(k, q)` function for dropping outliers located farther than `k*range_mad(q)` from the `range_median(q)`. This should help removing outliers during query time at [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3759).
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `range_trim_zscore(z, q)` function for dropping outliers located farther than `z*range_stddev(q)` from `range_avg(q)`. This should help removing outliers during query time at [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3759).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): show `median` instead of `avg` in graph tooltip and line legend, since `median` is more tolerant against spikes. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3706).
|
||||
|
||||
* BUGFIX: prevent from possible data ingestion slowdown and query performance slowdown during [background merges of big parts](https://docs.victoriametrics.com/#storage) on systems with small number of CPU cores (1 or 2 CPU cores). The issue has been introduced in [v1.85.0](https://docs.victoriametrics.com/CHANGELOG.html#v1850) when implementing [this feature](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3337). See also [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3790).
|
||||
|
|
|
@ -508,7 +508,7 @@ See also [duration_over_time](#duration_over_time) and [lag](#lag).
|
|||
`mad_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation)
|
||||
over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
See also [mad](#mad).
|
||||
See also [mad](#mad) and [range_mad](#range_mad).
|
||||
|
||||
#### max_over_time
|
||||
|
||||
|
@ -809,12 +809,14 @@ See also [min_over_time](#min_over_time).
|
|||
|
||||
#### zscore_over_time
|
||||
|
||||
`zscore_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which calculates returns [z-score](https://en.wikipedia.org/wiki/Standard_score)
|
||||
`zscore_over_time(series_selector[d])` is a [rollup function](#rollup-functions), which returns [z-score](https://en.wikipedia.org/wiki/Standard_score)
|
||||
for raw samples on the given lookbehind window `d`. It is calculated independently per each time series returned
|
||||
from the given [series_selector](https://docs.victoriametrics.com/keyConcepts.html#filtering).
|
||||
|
||||
Metric names are stripped from the resulting rollups. Add [keep_metric_names](#keep_metric_names) modifier in order to keep metric names.
|
||||
|
||||
See also [zscore](#zscore) and [range_trim_zscore](#range_trim_zscore).
|
||||
|
||||
|
||||
### Transform functions
|
||||
|
||||
|
@ -1221,6 +1223,13 @@ See also [rand](#rand) and [rand_exponential](#rand_exponential).
|
|||
`range_linear_regression(q)` is a [transform function](#transform-functions), which calculates [simple linear regression](https://en.wikipedia.org/wiki/Simple_linear_regression)
|
||||
over the selected time range per each time series returned by `q`. This function is useful for capacity planning and predictions.
|
||||
|
||||
#### range_mad
|
||||
|
||||
`range_mad(q)` is a [transform function](#transform-functions), which calculates the [median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation)
|
||||
across points per each time series returned by `q`.
|
||||
|
||||
See also [mad](#mad) and [mad_over_time](#mad_over_time).
|
||||
|
||||
#### range_max
|
||||
|
||||
`range_max(q)` is a [transform function](#transform-functions), which calculates the max value across points per each time series returned by `q`.
|
||||
|
@ -1238,6 +1247,8 @@ over the selected time range per each time series returned by `q`. This function
|
|||
`range_normalize(q1, ...)` is a [transform function](#transform-functions), which normalizes values for time series returned by `q1, ...` into `[0 ... 1]` range.
|
||||
This function is useful for correlating time series with distinct value ranges.
|
||||
|
||||
See also [share](#share).
|
||||
|
||||
#### range_quantile
|
||||
|
||||
`range_quantile(phi, q)` is a [transform function](#transform-functions), which returns `phi`-quantile across points per each time series returned by `q`.
|
||||
|
@ -1257,11 +1268,32 @@ per each time series returned by `q` on the selected time range.
|
|||
|
||||
`range_sum(q)` is a [transform function](#transform-functions), which calculates the sum of points per each time series returned by `q`.
|
||||
|
||||
#### range_trim_outliers
|
||||
|
||||
`range_trim_outliers(k, q)` is a [transform function](#transform-functions), which drops points located farther than `k*range_mad(q)`
|
||||
from the `range_median(q)`. E.g. it is equivalent to the following query: `q ifnot (abs(q - range_median(q)) > k*range_mad(q))`.
|
||||
|
||||
See also [range_trim_spikes](#range_trim_spikes) and [range_trim_zscore](#range_trim_zscore).
|
||||
|
||||
#### range_trim_spikes
|
||||
|
||||
`range_trim_spikes(phi, q)` is a [transform function](#transform-functions), which drops `phi` percent of biggest spikes from time series returned by `q`.
|
||||
The `phi` must be in the range `[0..1]`, where `0` means `0%` and `1` means `100%`.
|
||||
|
||||
See also [range_trim_outliers](#range_trim_outliers) and [range_trim_zscore](#range_trim_zscore).
|
||||
|
||||
#### range_trim_zscore
|
||||
|
||||
`range_trim_zscore(z, q)` is a [transform function](#transform-functions), which drops points located farther than `z*range_stddev(q)`
|
||||
from the `range_avg(q)`. E.g. it is equivalent to the following query: `q ifnot (abs(q - range_avg(q)) > z*range_avg(q))`.
|
||||
|
||||
See also [range_trim_outliers](#range_trim_outliers) and [range_trim_spikes](#range_trim_spikes).
|
||||
|
||||
#### range_zscore
|
||||
|
||||
`range_zscore(q)` is a [transform function](#transform-functions), which calculates [z-score](https://en.wikipedia.org/wiki/Standard_score)
|
||||
for points returned by `q`, e.g. it is equivalent to the following query: `(q - range_avg(q)) / range_stddev(q)`.
|
||||
|
||||
#### remove_resets
|
||||
|
||||
`remove_resets(q)` is a [transform function](#transform-functions), which removes counter resets from time series returned by `q`.
|
||||
|
@ -1731,7 +1763,7 @@ See also [limit_offset](#limit_offset).
|
|||
`mad(q) by (group_labels)` is [aggregate function](#aggregate-functions), which returns the [Median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation)
|
||||
per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
|
||||
See also [outliers_mad](#outliers_mad) and [stddev](#stddev).
|
||||
See also [range_mad](#range_mad), [mad_over_time](#mad_over_time), [outliers_mad](#outliers_mad) and [stddev](#stddev).
|
||||
|
||||
#### max
|
||||
|
||||
|
@ -1788,6 +1820,24 @@ The aggregate is calculated individually per each group of points with the same
|
|||
|
||||
See also [quantile](#quantile).
|
||||
|
||||
#### share
|
||||
|
||||
`share(q) by (group_labels)` is [aggregate function](#aggregate-functions), which returns shares in the range `[0..1]`
|
||||
for every non-negative points returned by `q` per each timestamp, so the sum of shares per each `group_labels` equals 1.
|
||||
|
||||
This function is useful for normalizing [histogram bucket](https://docs.victoriametrics.com/keyConcepts.html#histogram) shares
|
||||
into `[0..1]` range:
|
||||
|
||||
```metricsql
|
||||
share(
|
||||
sum(
|
||||
rate(http_request_duration_seconds_bucket[5m])
|
||||
) by (le, vmrange)
|
||||
)
|
||||
```
|
||||
|
||||
See also [range_normalize](#range_normalize).
|
||||
|
||||
#### stddev
|
||||
|
||||
`stddev(q) by (group_labels)` is [aggregate function](#aggregate-functions), which calculates standard deviation per each `group_labels`
|
||||
|
@ -1872,6 +1922,8 @@ See also [bottomk_min](#bottomk_min).
|
|||
per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp.
|
||||
This function is useful for detecting anomalies in the group of related time series.
|
||||
|
||||
See also [zscore_over_time](#zscore_over_time) and [range_trim_zscore](#range_trim_zscore).
|
||||
|
||||
## Subqueries
|
||||
|
||||
MetricsQL supports and extends PromQL subqueries. See [this article](https://valyala.medium.com/prometheus-subqueries-in-victoriametrics-9b1492b720b3) for details.
|
||||
|
|
|
@ -1165,7 +1165,7 @@ The shortlist of configuration flags is the following:
|
|||
-rule.configCheckInterval duration
|
||||
Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead
|
||||
-rule.maxResolveDuration duration
|
||||
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
|
||||
Limits the maximum duration for automatic alert expiration, which by default is 4 times evaluationInterval of the parent group.
|
||||
-rule.resendDelay duration
|
||||
Minimum amount of time to wait before resending an alert to notifier
|
||||
-rule.templates array
|
||||
|
|
38
go.mod
38
go.mod
|
@ -12,11 +12,11 @@ require (
|
|||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.23.1
|
||||
github.com/VictoriaMetrics/metricsql v0.53.0
|
||||
github.com/VictoriaMetrics/metricsql v0.56.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.4
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.12
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.51
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.2
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.13
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.53
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.3
|
||||
github.com/cespare/xxhash/v2 v2.2.0
|
||||
github.com/cheggaaa/pb/v3 v3.1.0
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
|
@ -32,42 +32,42 @@ require (
|
|||
github.com/oklog/ulid v1.3.1
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/prometheus v0.42.0
|
||||
github.com/urfave/cli/v2 v2.24.3
|
||||
github.com/urfave/cli/v2 v2.24.4
|
||||
github.com/valyala/fastjson v1.6.4
|
||||
github.com/valyala/fastrand v1.1.0
|
||||
github.com/valyala/fasttemplate v1.2.2
|
||||
github.com/valyala/gozstd v1.17.0
|
||||
github.com/valyala/gozstd v1.18.0
|
||||
github.com/valyala/histogram v1.2.0
|
||||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.6.0
|
||||
golang.org/x/net v0.7.0
|
||||
golang.org/x/oauth2 v0.5.0
|
||||
golang.org/x/sys v0.5.0
|
||||
google.golang.org/api v0.109.0
|
||||
google.golang.org/api v0.110.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.109.0 // indirect
|
||||
cloud.google.com/go v0.110.0 // indirect
|
||||
cloud.google.com/go/compute v1.18.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v0.10.0 // indirect
|
||||
cloud.google.com/go/iam v0.12.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.2 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.198 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.204 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.12 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.13 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.19 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.20 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.23 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.22 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3 // indirect
|
||||
github.com/aws/smithy-go v1.13.5 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
|
@ -82,7 +82,7 @@ require (
|
|||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.2 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
|
@ -106,14 +106,14 @@ require (
|
|||
go.opentelemetry.io/otel/metric v0.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.13.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/goleak v1.2.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
||||
go.uber.org/goleak v1.2.1 // indirect
|
||||
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb // indirect
|
||||
golang.org/x/sync v0.1.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc // indirect
|
||||
google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44 // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
|
|
79
go.sum
79
go.sum
|
@ -13,8 +13,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
|
|||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk=
|
||||
cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE=
|
||||
cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
|
||||
cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
@ -27,8 +27,8 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB
|
|||
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/iam v0.10.0 h1:fpP/gByFs6US1ma53v7VxhvbJpO2Aapng6wabJ99MuI=
|
||||
cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM=
|
||||
cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE=
|
||||
cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
|
||||
cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
|
@ -69,8 +69,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR
|
|||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||
github.com/VictoriaMetrics/metrics v1.23.1 h1:/j8DzeJBxSpL2qSIdqnRFLvQQhbJyJbbEi22yMm7oL0=
|
||||
github.com/VictoriaMetrics/metrics v1.23.1/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc=
|
||||
github.com/VictoriaMetrics/metricsql v0.53.0 h1:R//oEGo+G0DtmNxF111ClM2e2pjC4sG14geyZzXfbjU=
|
||||
github.com/VictoriaMetrics/metricsql v0.53.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
|
||||
github.com/VictoriaMetrics/metricsql v0.56.1 h1:j+W4fA/gtozsZb4PlKDU0Ma2VOgl88xla4FEf29w94g=
|
||||
github.com/VictoriaMetrics/metricsql v0.56.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0=
|
||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
|
@ -87,28 +87,28 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
|
|||
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
|
||||
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.44.198 h1:kgnvxQv4/kP5M0nbxBx0Ac0so9ndr9f8Ti0g+NmPQF8=
|
||||
github.com/aws/aws-sdk-go v1.44.198/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go v1.44.204 h1:7/tPUXfNOHB390A63t6fJIwmlwVQAkAwcbzKsU2/6OQ=
|
||||
github.com/aws/aws-sdk-go v1.44.204/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.4 h1:wyC6p9Yfq6V2y98wfDsj6OnNQa4w2BLGCLIxzNhwOGY=
|
||||
github.com/aws/aws-sdk-go-v2 v1.17.4/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.12 h1:fKs/I4wccmfrNRO9rdrbMO1NgLxct6H9rNMiPdBxHWw=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.12/go.mod h1:J36fOhj1LQBr+O4hJCiT8FwVvieeoSGOtPuvhKlsNu8=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.12 h1:Cb+HhuEnV19zHRaYYVglwvdHGMJWbdsyP4oHhw04xws=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.12/go.mod h1:37HG2MBroXK3jXfxVGtbM2J48ra2+Ltu+tmwr/jO0KA=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.13 h1:v0xlYqbO6/EVlM8tUn2QEOA7btQxcgidEq2JRDBPTho=
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.13/go.mod h1:r39wGSZB7wPDW1i54JyQXUpc5KsWjh5z/3S5D9eCqDg=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.13 h1:zw1KAc1kl00NYd3ofVmFrb09qnYlSQMeh+fmlQRAihI=
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.13.13/go.mod h1:DW9nbIIF9MrIja0cBQrUpeWYQMSlNmP8fevLUyF9W38=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.22 h1:3aMfcTmoXtTZnaT86QlVaYh+BRMbvrrmZwIQ5jWqCZQ=
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.22/go.mod h1:YGSIJyQ6D6FjKMQh16hVFSIUD54L4F7zTGePqYMYYJU=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.51 h1:iTFYCAdKzSAjGnVIUe88Hxvix0uaBqr0Rv7qJEOX5hE=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.51/go.mod h1:7Grl2gV+dx9SWrUIgwwlUvU40t7+lOSbx34XwfmsTkY=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.53 h1:h1MmqGtYgkf49DhG2BSjGukpm8c+BJ9CL+bBbdFGzlk=
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.53/go.mod h1:mlWLxwKZNeEwE+3Pko07lSr1NvHZwUtdzmo9AiGn7QU=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28 h1:r+XwaCLpIvCKjBIYy/HVZujQS9tsz5ohHG3ZIe0wKoE=
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28/go.mod h1:3lwChorpIM/BhImY/hy+Z6jekmN92cXGPI1QJasVPYY=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22 h1:7AwGYXDdqRQYsluvKFmWoqpcOQJ4bH634SkYf3FNj/A=
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.22/go.mod h1:EqK7gVrIGAHyZItrD1D8B0ilgwMD1GiWAmbU4u/JHNk=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29 h1:J4xhFd6zHhdF9jPP0FQJ6WknzBboGMBNjKOv4iTuw4A=
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29/go.mod h1:TwuqRBGzxjQJIwH16/fOZodwXt2Zxa9/cwJC5ke4j7s=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.19 h1:FGvpyTg2LKEmMrLlpjOgkoNp9XF5CGeyAyo33LdqZW8=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.19/go.mod h1:8W88sW3PjamQpKFUQvHWWKay6ARsNvZnzU7+a4apubw=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.20 h1:YIvKIfPXQVp0EhXUV644kmQo6cQPPSRmC44A1HSoJeg=
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.20/go.mod h1:8W88sW3PjamQpKFUQvHWWKay6ARsNvZnzU7+a4apubw=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.23 h1:c5+bNdV8E4fIPteWx4HZSkqI07oY9exbfQ7JH7Yx4PI=
|
||||
|
@ -117,12 +117,12 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22 h1:LjFQf8hFu
|
|||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.22/go.mod h1:xt0Au8yPIwYXf/GYPy/vl4K3CgwhfQMYbrH7DlUUIws=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.22 h1:ISLJ2BKXe4zzyZ7mp5ewKECiw0U7KpLgS3S6OxY9Cm0=
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.22/go.mod h1:QFVbqK54XArazLvn2wvWMRBi/jGrWii46qbr5DyPGjc=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.2 h1:5EQWIFO+Hc8E2hFcXQJ1vm6ufl/PMt/6RVRDZRju2vM=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.2/go.mod h1:SXDHd6fI2RhqB7vmAzyYQCTQnpZrIprVJvYxpzW3JAM=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.1 h1:lQKN/LNa3qqu2cDOQZybP7oL4nMGGiFqob0jZJaR8/4=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.1/go.mod h1:IgV8l3sj22nQDd5qcAGY0WenwCzCphqdbFOpfktZPrI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.1 h1:0bLhH6DRAqox+g0LatcjGKjjhU6Eudyys6HB6DJVPj8=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.1/go.mod h1:O1YSOg3aekZibh2SngvCRRG+cRHKKlYgxf/JBF/Kr/k=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.3 h1:PVieHTwugdlHedlxLpYLQsOZAq736RScuEb/m4zhzc4=
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.3/go.mod h1:XN3YcdmnWYZ3Hrnojvo5p2mc/wfF973nkq3ClXPDMHk=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.2 h1:EN102fWY7hI5u/2FPheTrwwMHkSXfl49RYkeEnJsrCU=
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.12.2/go.mod h1:IgV8l3sj22nQDd5qcAGY0WenwCzCphqdbFOpfktZPrI=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.2 h1:f1lmlce7r13CX1BPyPqt9oh/H+uqOWc9367lDoGGwNQ=
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.2/go.mod h1:O1YSOg3aekZibh2SngvCRRG+cRHKKlYgxf/JBF/Kr/k=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3 h1:s49mSnsBZEXjfGBkRfmK+nPqzT7Lt3+t2SmAKNyHblw=
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.18.3/go.mod h1:b+psTJn33Q4qGoDaM7ZiOVVG8uVjGI6HaZ8WBHdgDgU=
|
||||
github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8=
|
||||
|
@ -257,7 +257,7 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
|||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
|
||||
github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
|
@ -269,8 +269,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
|
|||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.2 h1:jUqbmxlR+gGPQq/uvQviKpS1bSQecfs2t7o6F14sk9s=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.2/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
|
||||
|
@ -423,8 +423,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/urfave/cli/v2 v2.24.3 h1:7Q1w8VN8yE0MJEHP06bv89PjYsN4IHWED2s1v/Zlfm0=
|
||||
github.com/urfave/cli/v2 v2.24.3/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
||||
github.com/urfave/cli/v2 v2.24.4 h1:0gyJJEBYtCV87zI/x2nZCPyDxD51K6xM8SkwjHFCNEU=
|
||||
github.com/urfave/cli/v2 v2.24.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
|
@ -434,8 +434,8 @@ github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G
|
|||
github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/gozstd v1.17.0 h1:M4Ds4MIrw+pD+s6vYtuFZ8D3iEw9htzfdytOV3C3iQU=
|
||||
github.com/valyala/gozstd v1.17.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
||||
github.com/valyala/gozstd v1.18.0 h1:f4BskcUZBnDrEJ2F+lVbNCMGOFBoGHEw71RBkCNR4IM=
|
||||
github.com/valyala/gozstd v1.18.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ=
|
||||
github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ=
|
||||
github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY=
|
||||
github.com/valyala/quicktemplate v1.7.0 h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTcjMJzCM=
|
||||
|
@ -466,8 +466,8 @@ go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFh
|
|||
go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
||||
go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
@ -487,8 +487,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w=
|
||||
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -501,7 +501,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
|
|||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
|
@ -547,8 +546,8 @@ golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.6.0 h1:L4ZwwTvKW9gr0ZMS1yrHD9GZhIuVjOBBnaKH+SPQK0Q=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -702,8 +701,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
|||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8=
|
||||
google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
|
||||
google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU=
|
||||
google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -741,8 +740,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc h1:ijGwO+0vL2hJt5gaygqP2j6PfflOBrRot0IczKbmtio=
|
||||
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
|
||||
google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44 h1:EfLuoKW5WfkgVdDy7dTK8qSbH37AX5mj/MFh+bGPz14=
|
||||
google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
GO_VERSION ?=1.20.0
|
||||
GO_VERSION ?=1.20.1
|
||||
SNAP_BUILDER_IMAGE := local/snap-builder:2.0.0-$(shell echo $(GO_VERSION) | tr :/ __)
|
||||
|
||||
|
||||
|
|
14
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
14
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
|
@ -1,5 +1,19 @@
|
|||
# Changes
|
||||
|
||||
## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.11.0...iam/v0.12.0) (2023-02-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** Migrate to new stubs ([a61ddcd](https://github.com/googleapis/google-cloud-go/commit/a61ddcd3041c7af4a15109dc4431f9b327c497fb))
|
||||
|
||||
## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.10.0...iam/v0.11.0) (2023-02-16)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** Start generating proto stubs ([970d763](https://github.com/googleapis/google-cloud-go/commit/970d763531b54b2bc75d7ff26a20b6e05150cab8))
|
||||
|
||||
## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.9.0...iam/v0.10.0) (2023-01-04)
|
||||
|
||||
|
||||
|
|
2
vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
generated
vendored
2
vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.9
|
||||
// protoc v3.21.12
|
||||
// source: google/iam/v1/iam_policy.proto
|
||||
|
||||
package iampb
|
||||
|
|
2
vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
generated
vendored
2
vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.9
|
||||
// protoc v3.21.12
|
||||
// source: google/iam/v1/options.proto
|
||||
|
||||
package iampb
|
||||
|
|
2
vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
generated
vendored
2
vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
generated
vendored
|
@ -15,7 +15,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.21.9
|
||||
// protoc v3.21.12
|
||||
// source: google/iam/v1/policy.proto
|
||||
|
||||
package iampb
|
||||
|
|
2
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
2
vendor/cloud.google.com/go/iam/iam.go
generated
vendored
|
@ -26,8 +26,8 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
pb "cloud.google.com/go/iam/apiv1/iampb"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
pb "google.golang.org/genproto/googleapis/iam/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
|
18
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
18
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
|
@ -719,6 +719,15 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/discoveryengine/apiv1beta": {
|
||||
"distribution_name": "cloud.google.com/go/discoveryengine/apiv1beta",
|
||||
"description": "Discovery Engine API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/discoveryengine/latest/apiv1beta",
|
||||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/dlp/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/dlp/apiv2",
|
||||
"description": "Cloud Data Loss Prevention (DLP) API",
|
||||
|
@ -1088,6 +1097,15 @@
|
|||
"release_level": "beta",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha": {
|
||||
"distribution_name": "cloud.google.com/go/maps/mapsplatformdatasets/apiv1alpha",
|
||||
"description": "Maps Platform Datasets API",
|
||||
"language": "Go",
|
||||
"client_library_type": "generated",
|
||||
"docs_url": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/mapsplatformdatasets/apiv1alpha",
|
||||
"release_level": "alpha",
|
||||
"library_type": "GAPIC_AUTO"
|
||||
},
|
||||
"cloud.google.com/go/maps/routing/apiv2": {
|
||||
"distribution_name": "cloud.google.com/go/maps/routing/apiv2",
|
||||
"description": "Routes API",
|
||||
|
|
17
vendor/cloud.google.com/go/internal/README.md
generated
vendored
17
vendor/cloud.google.com/go/internal/README.md
generated
vendored
|
@ -24,3 +24,20 @@ To kick off a build locally run from the repo root:
|
|||
```bash
|
||||
gcloud builds submit --project=cloud-devrel-kokoro-resources --config=internal/cloudbuild.yaml
|
||||
```
|
||||
|
||||
### Updating OwlBot SHA
|
||||
|
||||
You may want to manually update the which version of the post processor will be
|
||||
used -- to do this you need to update the SHA in the OwlBot lock file. Start by
|
||||
running the following commands:
|
||||
|
||||
```bash
|
||||
docker pull gcr.io/cloud-devrel-public-resources/owlbot-go:latest
|
||||
docker inspect --format='{{index .RepoDigests 0}}' gcr.io/cloud-devrel-public-resources/owlbot-go:latest
|
||||
```
|
||||
|
||||
This will give you a SHA. You can use this value to update the value in
|
||||
`.github/.OwlBot.lock.yaml`.
|
||||
|
||||
*Note*: OwlBot will eventually open a pull request to update this value if it
|
||||
discovers a new version of the container.
|
||||
|
|
1
vendor/github.com/VictoriaMetrics/metricsql/aggr.go
generated
vendored
1
vendor/github.com/VictoriaMetrics/metricsql/aggr.go
generated
vendored
|
@ -29,6 +29,7 @@ var aggrFuncs = map[string]bool{
|
|||
"outliersk": true,
|
||||
"quantile": true,
|
||||
"quantiles": true,
|
||||
"share": true,
|
||||
"stddev": true,
|
||||
"stdvar": true,
|
||||
"sum": true,
|
||||
|
|
3
vendor/github.com/VictoriaMetrics/metricsql/optimizer.go
generated
vendored
3
vendor/github.com/VictoriaMetrics/metricsql/optimizer.go
generated
vendored
|
@ -392,7 +392,8 @@ func getTransformArgIdxForOptimization(funcName string, args []Expr) int {
|
|||
return -1
|
||||
case "limit_offset":
|
||||
return 2
|
||||
case "buckets_limit", "histogram_quantile", "histogram_share", "range_quantile", "range_trim_spikes":
|
||||
case "buckets_limit", "histogram_quantile", "histogram_share", "range_quantile",
|
||||
"range_trim_outliers", "range_trim_spikes", "range_trim_zscore":
|
||||
return 1
|
||||
case "histogram_quantiles":
|
||||
return len(args) - 1
|
||||
|
|
4
vendor/github.com/VictoriaMetrics/metricsql/transform.go
generated
vendored
4
vendor/github.com/VictoriaMetrics/metricsql/transform.go
generated
vendored
|
@ -74,6 +74,7 @@ var transformFuncs = map[string]bool{
|
|||
"range_first": true,
|
||||
"range_last": true,
|
||||
"range_linear_regression": true,
|
||||
"range_mad": true,
|
||||
"range_max": true,
|
||||
"range_min": true,
|
||||
"range_normalize": true,
|
||||
|
@ -81,7 +82,10 @@ var transformFuncs = map[string]bool{
|
|||
"range_stddev": true,
|
||||
"range_stdvar": true,
|
||||
"range_sum": true,
|
||||
"range_trim_outliers": true,
|
||||
"range_trim_spikes": true,
|
||||
"range_trim_zscore": true,
|
||||
"range_zscore": true,
|
||||
"remove_resets": true,
|
||||
"round": true,
|
||||
"running_avg": true,
|
||||
|
|
4
vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,7 @@
|
|||
# v1.18.13 (2023-02-15)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.18.12 (2023-02-03)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
|||
package config
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.18.12"
|
||||
const goModuleVersion = "1.18.13"
|
||||
|
|
4
vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,7 @@
|
|||
# v1.13.13 (2023-02-15)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.13.12 (2023-02-03)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
|||
package credentials
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.13.12"
|
||||
const goModuleVersion = "1.13.13"
|
||||
|
|
8
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
generated
vendored
8
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,11 @@
|
|||
# v1.11.53 (2023-02-15)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.11.52 (2023-02-14)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.11.51 (2023-02-03)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
|||
package manager
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.11.51"
|
||||
const goModuleVersion = "1.11.53"
|
||||
|
|
4
vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,7 @@
|
|||
# v1.0.20 (2023-02-14)
|
||||
|
||||
* No change notes available for this release.
|
||||
|
||||
# v1.0.19 (2023-02-03)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
|||
package v4a
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.0.19"
|
||||
const goModuleVersion = "1.0.20"
|
||||
|
|
4
vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
generated
vendored
4
vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,7 @@
|
|||
# v1.30.3 (2023-02-14)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
||||
# v1.30.2 (2023-02-03)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
|||
package s3
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.30.2"
|
||||
const goModuleVersion = "1.30.3"
|
||||
|
|
5
vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
generated
vendored
5
vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,8 @@
|
|||
# v1.12.2 (2023-02-15)
|
||||
|
||||
* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.
|
||||
* **Bug Fix**: Correct error type parsing for restJson services.
|
||||
|
||||
# v1.12.1 (2023-02-03)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
|
48
vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
generated
vendored
48
vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
generated
vendored
|
@ -86,9 +86,9 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp
|
|||
errorCode := "UnknownError"
|
||||
errorMessage := errorCode
|
||||
|
||||
code := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
headerCode := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(headerCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(headerCode)
|
||||
}
|
||||
|
||||
var buff [1024]byte
|
||||
|
@ -97,7 +97,7 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp
|
|||
body := io.TeeReader(errorBody, ringBuffer)
|
||||
decoder := json.NewDecoder(body)
|
||||
decoder.UseNumber()
|
||||
code, message, err := restjson.GetErrorInfo(decoder)
|
||||
jsonCode, message, err := restjson.GetErrorInfo(decoder)
|
||||
if err != nil {
|
||||
var snapshot bytes.Buffer
|
||||
io.Copy(&snapshot, ringBuffer)
|
||||
|
@ -109,8 +109,8 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp
|
|||
}
|
||||
|
||||
errorBody.Seek(0, io.SeekStart)
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
if len(headerCode) == 0 && len(jsonCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(jsonCode)
|
||||
}
|
||||
if len(message) != 0 {
|
||||
errorMessage = message
|
||||
|
@ -242,9 +242,9 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon
|
|||
errorCode := "UnknownError"
|
||||
errorMessage := errorCode
|
||||
|
||||
code := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
headerCode := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(headerCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(headerCode)
|
||||
}
|
||||
|
||||
var buff [1024]byte
|
||||
|
@ -253,7 +253,7 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon
|
|||
body := io.TeeReader(errorBody, ringBuffer)
|
||||
decoder := json.NewDecoder(body)
|
||||
decoder.UseNumber()
|
||||
code, message, err := restjson.GetErrorInfo(decoder)
|
||||
jsonCode, message, err := restjson.GetErrorInfo(decoder)
|
||||
if err != nil {
|
||||
var snapshot bytes.Buffer
|
||||
io.Copy(&snapshot, ringBuffer)
|
||||
|
@ -265,8 +265,8 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon
|
|||
}
|
||||
|
||||
errorBody.Seek(0, io.SeekStart)
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
if len(headerCode) == 0 && len(jsonCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(jsonCode)
|
||||
}
|
||||
if len(message) != 0 {
|
||||
errorMessage = message
|
||||
|
@ -407,9 +407,9 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response,
|
|||
errorCode := "UnknownError"
|
||||
errorMessage := errorCode
|
||||
|
||||
code := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
headerCode := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(headerCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(headerCode)
|
||||
}
|
||||
|
||||
var buff [1024]byte
|
||||
|
@ -418,7 +418,7 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response,
|
|||
body := io.TeeReader(errorBody, ringBuffer)
|
||||
decoder := json.NewDecoder(body)
|
||||
decoder.UseNumber()
|
||||
code, message, err := restjson.GetErrorInfo(decoder)
|
||||
jsonCode, message, err := restjson.GetErrorInfo(decoder)
|
||||
if err != nil {
|
||||
var snapshot bytes.Buffer
|
||||
io.Copy(&snapshot, ringBuffer)
|
||||
|
@ -430,8 +430,8 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response,
|
|||
}
|
||||
|
||||
errorBody.Seek(0, io.SeekStart)
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
if len(headerCode) == 0 && len(jsonCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(jsonCode)
|
||||
}
|
||||
if len(message) != 0 {
|
||||
errorMessage = message
|
||||
|
@ -550,9 +550,9 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada
|
|||
errorCode := "UnknownError"
|
||||
errorMessage := errorCode
|
||||
|
||||
code := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
headerCode := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(headerCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(headerCode)
|
||||
}
|
||||
|
||||
var buff [1024]byte
|
||||
|
@ -561,7 +561,7 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada
|
|||
body := io.TeeReader(errorBody, ringBuffer)
|
||||
decoder := json.NewDecoder(body)
|
||||
decoder.UseNumber()
|
||||
code, message, err := restjson.GetErrorInfo(decoder)
|
||||
jsonCode, message, err := restjson.GetErrorInfo(decoder)
|
||||
if err != nil {
|
||||
var snapshot bytes.Buffer
|
||||
io.Copy(&snapshot, ringBuffer)
|
||||
|
@ -573,8 +573,8 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada
|
|||
}
|
||||
|
||||
errorBody.Seek(0, io.SeekStart)
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
if len(headerCode) == 0 && len(jsonCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(jsonCode)
|
||||
}
|
||||
if len(message) != 0 {
|
||||
errorMessage = message
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
|||
package sso
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.12.1"
|
||||
const goModuleVersion = "1.12.2"
|
||||
|
|
5
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
generated
vendored
5
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,8 @@
|
|||
# v1.14.2 (2023-02-15)
|
||||
|
||||
* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.
|
||||
* **Bug Fix**: Correct error type parsing for restJson services.
|
||||
|
||||
# v1.14.1 (2023-02-03)
|
||||
|
||||
* **Dependency Update**: Updated to the latest SDK module versions
|
||||
|
|
36
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
generated
vendored
36
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
generated
vendored
|
@ -85,9 +85,9 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m
|
|||
errorCode := "UnknownError"
|
||||
errorMessage := errorCode
|
||||
|
||||
code := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
headerCode := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(headerCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(headerCode)
|
||||
}
|
||||
|
||||
var buff [1024]byte
|
||||
|
@ -96,7 +96,7 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m
|
|||
body := io.TeeReader(errorBody, ringBuffer)
|
||||
decoder := json.NewDecoder(body)
|
||||
decoder.UseNumber()
|
||||
code, message, err := restjson.GetErrorInfo(decoder)
|
||||
jsonCode, message, err := restjson.GetErrorInfo(decoder)
|
||||
if err != nil {
|
||||
var snapshot bytes.Buffer
|
||||
io.Copy(&snapshot, ringBuffer)
|
||||
|
@ -108,8 +108,8 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m
|
|||
}
|
||||
|
||||
errorBody.Seek(0, io.SeekStart)
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
if len(headerCode) == 0 && len(jsonCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(jsonCode)
|
||||
}
|
||||
if len(message) != 0 {
|
||||
errorMessage = message
|
||||
|
@ -306,9 +306,9 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response
|
|||
errorCode := "UnknownError"
|
||||
errorMessage := errorCode
|
||||
|
||||
code := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
headerCode := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(headerCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(headerCode)
|
||||
}
|
||||
|
||||
var buff [1024]byte
|
||||
|
@ -317,7 +317,7 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response
|
|||
body := io.TeeReader(errorBody, ringBuffer)
|
||||
decoder := json.NewDecoder(body)
|
||||
decoder.UseNumber()
|
||||
code, message, err := restjson.GetErrorInfo(decoder)
|
||||
jsonCode, message, err := restjson.GetErrorInfo(decoder)
|
||||
if err != nil {
|
||||
var snapshot bytes.Buffer
|
||||
io.Copy(&snapshot, ringBuffer)
|
||||
|
@ -329,8 +329,8 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response
|
|||
}
|
||||
|
||||
errorBody.Seek(0, io.SeekStart)
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
if len(headerCode) == 0 && len(jsonCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(jsonCode)
|
||||
}
|
||||
if len(message) != 0 {
|
||||
errorMessage = message
|
||||
|
@ -519,9 +519,9 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt
|
|||
errorCode := "UnknownError"
|
||||
errorMessage := errorCode
|
||||
|
||||
code := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
headerCode := response.Header.Get("X-Amzn-ErrorType")
|
||||
if len(headerCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(headerCode)
|
||||
}
|
||||
|
||||
var buff [1024]byte
|
||||
|
@ -530,7 +530,7 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt
|
|||
body := io.TeeReader(errorBody, ringBuffer)
|
||||
decoder := json.NewDecoder(body)
|
||||
decoder.UseNumber()
|
||||
code, message, err := restjson.GetErrorInfo(decoder)
|
||||
jsonCode, message, err := restjson.GetErrorInfo(decoder)
|
||||
if err != nil {
|
||||
var snapshot bytes.Buffer
|
||||
io.Copy(&snapshot, ringBuffer)
|
||||
|
@ -542,8 +542,8 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt
|
|||
}
|
||||
|
||||
errorBody.Seek(0, io.SeekStart)
|
||||
if len(code) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(code)
|
||||
if len(headerCode) == 0 && len(jsonCode) != 0 {
|
||||
errorCode = restjson.SanitizeErrorCode(jsonCode)
|
||||
}
|
||||
if len(message) != 0 {
|
||||
errorMessage = message
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
generated
vendored
|
@ -3,4 +3,4 @@
|
|||
package ssooidc
|
||||
|
||||
// goModuleVersion is the tagged release for this module
|
||||
const goModuleVersion = "1.14.1"
|
||||
const goModuleVersion = "1.14.2"
|
||||
|
|
24
vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
generated
vendored
24
vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
generated
vendored
|
@ -226,12 +226,24 @@ func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider))
|
|||
return credentials.NewCredentials(p)
|
||||
}
|
||||
|
||||
type credentialProcessResponse struct {
|
||||
Version int
|
||||
AccessKeyID string `json:"AccessKeyId"`
|
||||
// A CredentialProcessResponse is the AWS credentials format that must be
|
||||
// returned when executing an external credential_process.
|
||||
type CredentialProcessResponse struct {
|
||||
// As of this writing, the Version key must be set to 1. This might
|
||||
// increment over time as the structure evolves.
|
||||
Version int
|
||||
|
||||
// The access key ID that identifies the temporary security credentials.
|
||||
AccessKeyID string `json:"AccessKeyId"`
|
||||
|
||||
// The secret access key that can be used to sign requests.
|
||||
SecretAccessKey string
|
||||
SessionToken string
|
||||
Expiration *time.Time
|
||||
|
||||
// The token that users must pass to the service API to use the temporary credentials.
|
||||
SessionToken string
|
||||
|
||||
// The date on which the current credentials expire.
|
||||
Expiration *time.Time
|
||||
}
|
||||
|
||||
// Retrieve executes the 'credential_process' and returns the credentials.
|
||||
|
@ -242,7 +254,7 @@ func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
|
|||
}
|
||||
|
||||
// Serialize and validate response
|
||||
resp := &credentialProcessResponse{}
|
||||
resp := &CredentialProcessResponse{}
|
||||
if err = json.Unmarshal(out, resp); err != nil {
|
||||
return credentials.Value{ProviderName: ProviderName}, awserr.New(
|
||||
ErrCodeProcessProviderParse,
|
||||
|
|
163
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
163
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -2605,6 +2605,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -4060,6 +4063,21 @@ var awsPartition = partition{
|
|||
},
|
||||
"cases": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "fips-us-east-1",
|
||||
}: endpoint{
|
||||
|
@ -6588,6 +6606,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "eu-west-2",
|
||||
}: endpoint{},
|
||||
|
@ -9523,6 +9544,15 @@ var awsPartition = partition{
|
|||
}: endpoint{
|
||||
Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-2",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
|
@ -9685,6 +9715,15 @@ var awsPartition = partition{
|
|||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-ap-south-2",
|
||||
}: endpoint{
|
||||
Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "ap-south-2",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-ap-southeast-1",
|
||||
}: endpoint{
|
||||
|
@ -12633,6 +12672,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-south-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-1",
|
||||
}: endpoint{},
|
||||
|
@ -14319,6 +14361,11 @@ var awsPartition = partition{
|
|||
}: endpoint{
|
||||
Hostname: "kendra-ranking.ap-east-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-1",
|
||||
}: endpoint{
|
||||
Hostname: "kendra-ranking.ap-northeast-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-northeast-2",
|
||||
}: endpoint{
|
||||
|
@ -14329,6 +14376,11 @@ var awsPartition = partition{
|
|||
}: endpoint{
|
||||
Hostname: "kendra-ranking.ap-northeast-3.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-south-1",
|
||||
}: endpoint{
|
||||
Hostname: "kendra-ranking.ap-south-1.api.aws",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "ap-south-2",
|
||||
}: endpoint{
|
||||
|
@ -19755,6 +19807,9 @@ var awsPartition = partition{
|
|||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-4",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
@ -30306,6 +30361,21 @@ var awscnPartition = partition{
|
|||
},
|
||||
},
|
||||
},
|
||||
"servicequotas": service{
|
||||
Defaults: endpointDefaults{
|
||||
defaultKey{}: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
},
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "cn-north-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "cn-northwest-1",
|
||||
}: endpoint{},
|
||||
},
|
||||
},
|
||||
"sms": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
|
@ -32471,6 +32541,24 @@ var awsusgovPartition = partition{
|
|||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1-fips",
|
||||
}: endpoint{
|
||||
Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{
|
||||
|
@ -32479,6 +32567,24 @@ var awsusgovPartition = partition{
|
|||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1-fips",
|
||||
}: endpoint{
|
||||
Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
"elasticfilesystem": service{
|
||||
|
@ -32916,21 +33022,43 @@ var awsusgovPartition = partition{
|
|||
"glacier": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Region: "fips-us-gov-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "glacier.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "glacier.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "glacier.us-gov-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{
|
||||
Protocols: []string{"http", "https"},
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "glacier.us-gov-west-1.amazonaws.com",
|
||||
Protocols: []string{"http", "https"},
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -34151,20 +34279,40 @@ var awsusgovPartition = partition{
|
|||
"outposts": service{
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Region: "fips-us-gov-east-1",
|
||||
}: endpoint{
|
||||
Hostname: "outposts.us-gov-east-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-east-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Region: "fips-us-gov-west-1",
|
||||
}: endpoint{
|
||||
Hostname: "outposts.us-gov-west-1.amazonaws.com",
|
||||
CredentialScope: credentialScope{
|
||||
Region: "us-gov-west-1",
|
||||
},
|
||||
Deprecated: boxedTrue,
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "outposts.us-gov-east-1.amazonaws.com",
|
||||
},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
Variant: fipsVariant,
|
||||
}: endpoint{
|
||||
Hostname: "outposts.us-gov-west-1.amazonaws.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -34679,6 +34827,9 @@ var awsusgovPartition = partition{
|
|||
},
|
||||
},
|
||||
Endpoints: serviceEndpoints{
|
||||
endpointKey{
|
||||
Region: "us-gov-east-1",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "us-gov-west-1",
|
||||
}: endpoint{},
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.198"
|
||||
const SDKVersion = "1.44.204"
|
||||
|
|
16
vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
generated
vendored
16
vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go
generated
vendored
|
@ -25,8 +25,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/rpc"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
@ -54,17 +52,6 @@ func (c *Connection) Close() error {
|
|||
return werr
|
||||
}
|
||||
|
||||
// If ECP Logging is enabled return true
|
||||
// Otherwise return false
|
||||
func enableECPLogging() bool {
|
||||
if os.Getenv("ENABLE_ENTERPRISE_CERTIFICATE_LOGS") != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
log.SetOutput(ioutil.Discard)
|
||||
return false
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(crypto.SHA256)
|
||||
gob.Register(&rsa.PSSOptions{})
|
||||
|
@ -97,7 +84,7 @@ func (k *Key) Close() error {
|
|||
}
|
||||
// Wait for cmd to exit and release resources. Since the process is forcefully killed, this
|
||||
// will return a non-nil error (varies by OS), which we will ignore.
|
||||
k.cmd.Wait()
|
||||
_ = k.cmd.Wait()
|
||||
// The Pipes connecting the RPC client should have been closed when the signer subprocess was killed.
|
||||
// Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault.
|
||||
if err := k.client.Close(); err.Error() != "close |0: file already closed" {
|
||||
|
@ -132,7 +119,6 @@ var ErrCredUnavailable = errors.New("Cred is unavailable")
|
|||
//
|
||||
// The config file also specifies which certificate the signer should use.
|
||||
func Cred(configFilePath string) (*Key, error) {
|
||||
enableECPLogging()
|
||||
if configFilePath == "" {
|
||||
configFilePath = util.GetDefaultConfigFilePath()
|
||||
}
|
||||
|
|
4
vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
generated
vendored
4
vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go
generated
vendored
|
@ -17,7 +17,7 @@ package util
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
|
@ -50,7 +50,7 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
byteValue, err := ioutil.ReadAll(jsonFile)
|
||||
byteValue, err := io.ReadAll(jsonFile)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
6
vendor/github.com/urfave/cli/v2/flag.go
generated
vendored
6
vendor/github.com/urfave/cli/v2/flag.go
generated
vendored
|
@ -4,7 +4,7 @@ import (
|
|||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime"
|
||||
|
@ -178,7 +178,7 @@ func flagSet(name string, flags []Flag, spec separatorSpec) (*flag.FlagSet, erro
|
|||
return nil, err
|
||||
}
|
||||
}
|
||||
set.SetOutput(ioutil.Discard)
|
||||
set.SetOutput(io.Discard)
|
||||
return set, nil
|
||||
}
|
||||
|
||||
|
@ -384,7 +384,7 @@ func flagFromEnvOrFile(envVars []string, filePath string) (value string, fromWhe
|
|||
}
|
||||
for _, fileVar := range strings.Split(filePath, ",") {
|
||||
if fileVar != "" {
|
||||
if data, err := ioutil.ReadFile(fileVar); err == nil {
|
||||
if data, err := os.ReadFile(fileVar); err == nil {
|
||||
return string(data), fmt.Sprintf("file %q", filePath), true
|
||||
}
|
||||
}
|
||||
|
|
4
vendor/github.com/valyala/gozstd/Makefile
generated
vendored
4
vendor/github.com/valyala/gozstd/Makefile
generated
vendored
|
@ -3,8 +3,8 @@ GOARCH ?= $(shell go env GOARCH)
|
|||
GOOS_GOARCH := $(GOOS)_$(GOARCH)
|
||||
GOOS_GOARCH_NATIVE := $(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH)
|
||||
LIBZSTD_NAME := libzstd_$(GOOS_GOARCH).a
|
||||
ZSTD_VERSION ?= v1.5.2
|
||||
MUSL_BUILDER_IMAGE=golang:1.8.1-alpine
|
||||
ZSTD_VERSION ?= v1.5.4
|
||||
MUSL_BUILDER_IMAGE=golang:1.20.1-alpine
|
||||
BUILDER_IMAGE := local/builder_musl:2.0.0-$(shell echo $(MUSL_BUILDER_IMAGE) | tr : _)-1
|
||||
|
||||
.PHONY: libzstd.a $(LIBZSTD_NAME)
|
||||
|
|
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_amd64.a
generated
vendored
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_amd64.a
generated
vendored
Binary file not shown.
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_arm.a
generated
vendored
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_arm.a
generated
vendored
Binary file not shown.
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_arm64.a
generated
vendored
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_arm64.a
generated
vendored
Binary file not shown.
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_musl_amd64.a
generated
vendored
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_musl_amd64.a
generated
vendored
Binary file not shown.
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_musl_arm64.a
generated
vendored
BIN
vendor/github.com/valyala/gozstd/libzstd_linux_musl_arm64.a
generated
vendored
Binary file not shown.
7
vendor/github.com/valyala/gozstd/reader.go
generated
vendored
7
vendor/github.com/valyala/gozstd/reader.go
generated
vendored
|
@ -15,7 +15,12 @@ package gozstd
|
|||
// See https://github.com/golang/go/issues/24450 .
|
||||
|
||||
static size_t ZSTD_initDStream_usingDDict_wrapper(uintptr_t ds, uintptr_t dict) {
|
||||
return ZSTD_initDStream_usingDDict((ZSTD_DStream*)ds, (ZSTD_DDict*)dict);
|
||||
ZSTD_DStream *zds = (ZSTD_DStream *)ds;
|
||||
size_t rv = ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
|
||||
if (rv != 0) {
|
||||
return rv;
|
||||
}
|
||||
return ZSTD_DCtx_refDDict(zds, (ZSTD_DDict *)dict);
|
||||
}
|
||||
|
||||
static size_t ZSTD_freeDStream_wrapper(uintptr_t ds) {
|
||||
|
|
84
vendor/github.com/valyala/gozstd/zdict.h
generated
vendored
84
vendor/github.com/valyala/gozstd/zdict.h
generated
vendored
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) Yann Collet, Facebook, Inc.
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
|
@ -8,32 +8,43 @@
|
|||
* You may select, at your option, one of the above-listed licenses.
|
||||
*/
|
||||
|
||||
#ifndef DICTBUILDER_H_001
|
||||
#define DICTBUILDER_H_001
|
||||
|
||||
#if defined (__cplusplus)
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef ZSTD_ZDICT_H
|
||||
#define ZSTD_ZDICT_H
|
||||
|
||||
/*====== Dependencies ======*/
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* ===== ZDICTLIB_API : control library symbols visibility ===== */
|
||||
#ifndef ZDICTLIB_VISIBILITY
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define ZDICTLIB_VISIBILITY __attribute__ ((visibility ("default")))
|
||||
#ifndef ZDICTLIB_VISIBLE
|
||||
/* Backwards compatibility with old macro name */
|
||||
# ifdef ZDICTLIB_VISIBILITY
|
||||
# define ZDICTLIB_VISIBLE ZDICTLIB_VISIBILITY
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZDICTLIB_VISIBLE __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define ZDICTLIB_VISIBILITY
|
||||
# define ZDICTLIB_VISIBLE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef ZDICTLIB_HIDDEN
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZDICTLIB_HIDDEN __attribute__ ((visibility ("hidden")))
|
||||
# else
|
||||
# define ZDICTLIB_HIDDEN
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBILITY
|
||||
# define ZDICTLIB_API __declspec(dllexport) ZDICTLIB_VISIBLE
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
# define ZDICTLIB_API __declspec(dllimport) ZDICTLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define ZDICTLIB_API ZDICTLIB_VISIBILITY
|
||||
# define ZDICTLIB_API ZDICTLIB_VISIBLE
|
||||
#endif
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -110,8 +121,8 @@ extern "C" {
|
|||
* The zstd CLI defaults to a 110KB dictionary. You likely don't need a
|
||||
* dictionary larger than that. But, most use cases can get away with a
|
||||
* smaller dictionary. The advanced dictionary builders can automatically
|
||||
* shrink the dictionary for you, and select a the smallest size that
|
||||
* doesn't hurt compression ratio too much. See the `shrinkDict` parameter.
|
||||
* shrink the dictionary for you, and select the smallest size that doesn't
|
||||
* hurt compression ratio too much. See the `shrinkDict` parameter.
|
||||
* A smaller dictionary can save memory, and potentially speed up
|
||||
* compression.
|
||||
*
|
||||
|
@ -201,9 +212,9 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCap
|
|||
const size_t* samplesSizes, unsigned nbSamples);
|
||||
|
||||
typedef struct {
|
||||
int compressionLevel; /*< optimize for a specific zstd compression level; 0 means default */
|
||||
unsigned notificationLevel; /*< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
|
||||
unsigned dictID; /*< force dictID value; 0 means auto mode (32-bits random value)
|
||||
int compressionLevel; /**< optimize for a specific zstd compression level; 0 means default */
|
||||
unsigned notificationLevel; /**< Write log to stderr; 0 = none (default); 1 = errors; 2 = progression; 3 = details; 4 = debug; */
|
||||
unsigned dictID; /**< force dictID value; 0 means auto mode (32-bits random value)
|
||||
* NOTE: The zstd format reserves some dictionary IDs for future use.
|
||||
* You may use them in private settings, but be warned that they
|
||||
* may be used by zstd in a public dictionary registry in the future.
|
||||
|
@ -260,9 +271,21 @@ ZDICTLIB_API size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictS
|
|||
ZDICTLIB_API unsigned ZDICT_isError(size_t errorCode);
|
||||
ZDICTLIB_API const char* ZDICT_getErrorName(size_t errorCode);
|
||||
|
||||
#endif /* ZSTD_ZDICT_H */
|
||||
|
||||
#if defined(ZDICT_STATIC_LINKING_ONLY) && !defined(ZSTD_ZDICT_H_STATIC)
|
||||
#define ZSTD_ZDICT_H_STATIC
|
||||
|
||||
#ifdef ZDICT_STATIC_LINKING_ONLY
|
||||
/* This can be overridden externally to hide static symbols. */
|
||||
#ifndef ZDICTLIB_STATIC_API
|
||||
# if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZDICTLIB_STATIC_API __declspec(dllexport) ZDICTLIB_VISIBLE
|
||||
# elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZDICTLIB_STATIC_API __declspec(dllimport) ZDICTLIB_VISIBLE
|
||||
# else
|
||||
# define ZDICTLIB_STATIC_API ZDICTLIB_VISIBLE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* ====================================================================================
|
||||
* The definitions in this section are considered experimental.
|
||||
|
@ -318,7 +341,7 @@ typedef struct {
|
|||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover(
|
||||
void *dictBuffer, size_t dictBufferCapacity,
|
||||
const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
|
||||
ZDICT_cover_params_t parameters);
|
||||
|
@ -340,7 +363,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_cover(
|
|||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_optimizeTrainFromBuffer_cover() requires about 8 bytes of memory for each input byte and additionally another 5 bytes of memory for each byte of memory for each thread.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover(
|
||||
void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_cover_params_t* parameters);
|
||||
|
@ -361,7 +384,7 @@ ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_cover(
|
|||
* In general, it's recommended to provide a few thousands samples, though this can vary a lot.
|
||||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer,
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer,
|
||||
size_t dictBufferCapacity, const void *samplesBuffer,
|
||||
const size_t *samplesSizes, unsigned nbSamples,
|
||||
ZDICT_fastCover_params_t parameters);
|
||||
|
@ -384,7 +407,7 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_fastCover(void *dictBuffer,
|
|||
* See ZDICT_trainFromBuffer() for details on failure modes.
|
||||
* Note: ZDICT_optimizeTrainFromBuffer_fastCover() requires about 6 * 2^f bytes of memory for each thread.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_fastCover(void* dictBuffer,
|
||||
size_t dictBufferCapacity, const void* samplesBuffer,
|
||||
const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_fastCover_params_t* parameters);
|
||||
|
@ -409,7 +432,7 @@ typedef struct {
|
|||
* It's recommended that total size of all samples be about ~x100 times the target size of dictionary.
|
||||
* Note: ZDICT_trainFromBuffer_legacy() will send notifications into stderr if instructed to, using notificationLevel>0.
|
||||
*/
|
||||
ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
|
||||
ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_legacy(
|
||||
void* dictBuffer, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,
|
||||
ZDICT_legacy_params_t parameters);
|
||||
|
@ -421,32 +444,31 @@ ZDICTLIB_API size_t ZDICT_trainFromBuffer_legacy(
|
|||
or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||
Otherwise, it's also possible to manually define ZDICT_DISABLE_DEPRECATE_WARNINGS */
|
||||
#ifdef ZDICT_DISABLE_DEPRECATE_WARNINGS
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API /* disable deprecation warnings */
|
||||
# define ZDICT_DEPRECATED(message) /* disable deprecation warnings */
|
||||
#else
|
||||
# define ZDICT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define ZDICT_DEPRECATED(message) [[deprecated(message)]] ZDICTLIB_API
|
||||
# define ZDICT_DEPRECATED(message) [[deprecated(message)]]
|
||||
# elif defined(__clang__) || (ZDICT_GCC_VERSION >= 405)
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated(message)))
|
||||
# define ZDICT_DEPRECATED(message) __attribute__((deprecated(message)))
|
||||
# elif (ZDICT_GCC_VERSION >= 301)
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __attribute__((deprecated))
|
||||
# define ZDICT_DEPRECATED(message) __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API __declspec(deprecated(message))
|
||||
# define ZDICT_DEPRECATED(message) __declspec(deprecated(message))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement ZDICT_DEPRECATED for this compiler")
|
||||
# define ZDICT_DEPRECATED(message) ZDICTLIB_API
|
||||
# define ZDICT_DEPRECATED(message)
|
||||
# endif
|
||||
#endif /* ZDICT_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
ZDICT_DEPRECATED("use ZDICT_finalizeDictionary() instead")
|
||||
ZDICTLIB_STATIC_API
|
||||
size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,
|
||||
const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples);
|
||||
|
||||
|
||||
#endif /* ZDICT_STATIC_LINKING_ONLY */
|
||||
#endif /* ZSTD_ZDICT_H_STATIC */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* DICTBUILDER_H_001 */
|
||||
|
|
579
vendor/github.com/valyala/gozstd/zstd.h
generated
vendored
579
vendor/github.com/valyala/gozstd/zstd.h
generated
vendored
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) Yann Collet, Facebook, Inc.
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
|
@ -14,21 +14,31 @@ extern "C" {
|
|||
#ifndef ZSTD_H_235446
|
||||
#define ZSTD_H_235446
|
||||
|
||||
/* ====== Dependency ======*/
|
||||
/* ====== Dependencies ======*/
|
||||
#include <limits.h> /* INT_MAX */
|
||||
#include <stddef.h> /* size_t */
|
||||
|
||||
|
||||
/* ===== ZSTDLIB_API : control library symbols visibility ===== */
|
||||
#ifndef ZSTDLIB_VISIBLE
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
/* Backwards compatibility with old macro name */
|
||||
# ifdef ZSTDLIB_VISIBILITY
|
||||
# define ZSTDLIB_VISIBLE ZSTDLIB_VISIBILITY
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZSTDLIB_VISIBLE __attribute__ ((visibility ("default")))
|
||||
# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden")))
|
||||
# else
|
||||
# define ZSTDLIB_VISIBLE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef ZSTDLIB_HIDDEN
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZSTDLIB_HIDDEN __attribute__ ((visibility ("hidden")))
|
||||
# else
|
||||
# define ZSTDLIB_HIDDEN
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZSTDLIB_API __declspec(dllexport) ZSTDLIB_VISIBLE
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
|
@ -37,6 +47,28 @@ extern "C" {
|
|||
# define ZSTDLIB_API ZSTDLIB_VISIBLE
|
||||
#endif
|
||||
|
||||
/* Deprecation warnings :
|
||||
* Should these warnings be a problem, it is generally possible to disable them,
|
||||
* typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||
* Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
|
||||
*/
|
||||
#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
|
||||
# define ZSTD_DEPRECATED(message) /* disable deprecation warnings */
|
||||
#else
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define ZSTD_DEPRECATED(message) [[deprecated(message)]]
|
||||
# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
|
||||
# define ZSTD_DEPRECATED(message) __attribute__((deprecated(message)))
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||
# define ZSTD_DEPRECATED(message) __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define ZSTD_DEPRECATED(message) __declspec(deprecated(message))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
|
||||
# define ZSTD_DEPRECATED(message)
|
||||
# endif
|
||||
#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
Introduction
|
||||
|
@ -74,7 +106,7 @@ extern "C" {
|
|||
/*------ Version ------*/
|
||||
#define ZSTD_VERSION_MAJOR 1
|
||||
#define ZSTD_VERSION_MINOR 5
|
||||
#define ZSTD_VERSION_RELEASE 2
|
||||
#define ZSTD_VERSION_RELEASE 4
|
||||
#define ZSTD_VERSION_NUMBER (ZSTD_VERSION_MAJOR *100*100 + ZSTD_VERSION_MINOR *100 + ZSTD_VERSION_RELEASE)
|
||||
|
||||
/*! ZSTD_versionNumber() :
|
||||
|
@ -165,7 +197,9 @@ ZSTDLIB_API unsigned long long ZSTD_getFrameContentSize(const void *src, size_t
|
|||
* "empty", "unknown" and "error" results to the same return value (0),
|
||||
* while ZSTD_getFrameContentSize() gives them separate return values.
|
||||
* @return : decompressed size of `src` frame content _if known and not empty_, 0 otherwise. */
|
||||
ZSTDLIB_API unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
|
||||
ZSTD_DEPRECATED("Replaced by ZSTD_getFrameContentSize")
|
||||
ZSTDLIB_API
|
||||
unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize);
|
||||
|
||||
/*! ZSTD_findFrameCompressedSize() : Requires v1.4.0+
|
||||
* `src` should point to the start of a ZSTD frame or skippable frame.
|
||||
|
@ -177,8 +211,30 @@ ZSTDLIB_API size_t ZSTD_findFrameCompressedSize(const void* src, size_t srcSize)
|
|||
|
||||
|
||||
/*====== Helper functions ======*/
|
||||
#define ZSTD_COMPRESSBOUND(srcSize) ((srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
|
||||
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
|
||||
/* ZSTD_compressBound() :
|
||||
* maximum compressed size in worst case single-pass scenario.
|
||||
* When invoking `ZSTD_compress()` or any other one-pass compression function,
|
||||
* it's recommended to provide @dstCapacity >= ZSTD_compressBound(srcSize)
|
||||
* as it eliminates one potential failure scenario,
|
||||
* aka not enough room in dst buffer to write the compressed frame.
|
||||
* Note : ZSTD_compressBound() itself can fail, if @srcSize > ZSTD_MAX_INPUT_SIZE .
|
||||
* In which case, ZSTD_compressBound() will return an error code
|
||||
* which can be tested using ZSTD_isError().
|
||||
*
|
||||
* ZSTD_COMPRESSBOUND() :
|
||||
* same as ZSTD_compressBound(), but as a macro.
|
||||
* It can be used to produce constants, which can be useful for static allocation,
|
||||
* for example to size a static array on stack.
|
||||
* Will produce constant value 0 if srcSize too large.
|
||||
*/
|
||||
#define ZSTD_MAX_INPUT_SIZE ((sizeof(size_t)==8) ? 0xFF00FF00FF00FF00LLU : 0xFF00FF00U)
|
||||
#define ZSTD_COMPRESSBOUND(srcSize) (((size_t)(srcSize) >= ZSTD_MAX_INPUT_SIZE) ? 0 : (srcSize) + ((srcSize)>>8) + (((srcSize) < (128<<10)) ? (((128<<10) - (srcSize)) >> 11) /* margin, from 64 to 0 */ : 0)) /* this formula ensures that bound(A) + bound(B) <= bound(A+B) as long as A and B >= 128 KB */
|
||||
ZSTDLIB_API size_t ZSTD_compressBound(size_t srcSize); /*!< maximum compressed size in worst case single-pass scenario */
|
||||
/* ZSTD_isError() :
|
||||
* Most ZSTD_* functions returning a size_t value can be tested for error,
|
||||
* using ZSTD_isError().
|
||||
* @return 1 if error, 0 otherwise
|
||||
*/
|
||||
ZSTDLIB_API unsigned ZSTD_isError(size_t code); /*!< tells if a `size_t` function result is an error code */
|
||||
ZSTDLIB_API const char* ZSTD_getErrorName(size_t code); /*!< provides readable string from an error code */
|
||||
ZSTDLIB_API int ZSTD_minCLevel(void); /*!< minimum negative compression level allowed, requires v1.4.0+ */
|
||||
|
@ -421,6 +477,9 @@ typedef enum {
|
|||
* ZSTD_c_validateSequences
|
||||
* ZSTD_c_useBlockSplitter
|
||||
* ZSTD_c_useRowMatchFinder
|
||||
* ZSTD_c_prefetchCDictTables
|
||||
* ZSTD_c_enableSeqProducerFallback
|
||||
* ZSTD_c_maxBlockSize
|
||||
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
|
||||
* note : never ever use experimentalParam? names directly;
|
||||
* also, the enums values themselves are unstable and can still change.
|
||||
|
@ -439,7 +498,11 @@ typedef enum {
|
|||
ZSTD_c_experimentalParam12=1009,
|
||||
ZSTD_c_experimentalParam13=1010,
|
||||
ZSTD_c_experimentalParam14=1011,
|
||||
ZSTD_c_experimentalParam15=1012
|
||||
ZSTD_c_experimentalParam15=1012,
|
||||
ZSTD_c_experimentalParam16=1013,
|
||||
ZSTD_c_experimentalParam17=1014,
|
||||
ZSTD_c_experimentalParam18=1015,
|
||||
ZSTD_c_experimentalParam19=1016
|
||||
} ZSTD_cParameter;
|
||||
|
||||
typedef struct {
|
||||
|
@ -502,7 +565,7 @@ typedef enum {
|
|||
* They will be used to compress next frame.
|
||||
* Resetting session never fails.
|
||||
* - The parameters : changes all parameters back to "default".
|
||||
* This removes any reference to any dictionary too.
|
||||
* This also removes any reference to any dictionary or external sequence producer.
|
||||
* Parameters can only be changed between 2 sessions (i.e. no compression is currently ongoing)
|
||||
* otherwise the reset fails, and function returns an error value (which can be tested using ZSTD_isError())
|
||||
* - Both : similar to resetting the session, followed by resetting parameters.
|
||||
|
@ -552,13 +615,15 @@ typedef enum {
|
|||
* ZSTD_d_stableOutBuffer
|
||||
* ZSTD_d_forceIgnoreChecksum
|
||||
* ZSTD_d_refMultipleDDicts
|
||||
* ZSTD_d_disableHuffmanAssembly
|
||||
* Because they are not stable, it's necessary to define ZSTD_STATIC_LINKING_ONLY to access them.
|
||||
* note : never ever use experimentalParam? names directly
|
||||
*/
|
||||
ZSTD_d_experimentalParam1=1000,
|
||||
ZSTD_d_experimentalParam2=1001,
|
||||
ZSTD_d_experimentalParam3=1002,
|
||||
ZSTD_d_experimentalParam4=1003
|
||||
ZSTD_d_experimentalParam4=1003,
|
||||
ZSTD_d_experimentalParam5=1004
|
||||
|
||||
} ZSTD_dParameter;
|
||||
|
||||
|
@ -737,8 +802,6 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output
|
|||
* This following is a legacy streaming API, available since v1.0+ .
|
||||
* It can be replaced by ZSTD_CCtx_reset() and ZSTD_compressStream2().
|
||||
* It is redundant, but remains fully supported.
|
||||
* Streaming in combination with advanced parameters and dictionary compression
|
||||
* can only be used through the new API.
|
||||
******************************************************************************/
|
||||
|
||||
/*!
|
||||
|
@ -747,6 +810,9 @@ ZSTDLIB_API size_t ZSTD_CStreamOutSize(void); /**< recommended size for output
|
|||
* ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);
|
||||
* ZSTD_CCtx_refCDict(zcs, NULL); // clear the dictionary (if any)
|
||||
* ZSTD_CCtx_setParameter(zcs, ZSTD_c_compressionLevel, compressionLevel);
|
||||
*
|
||||
* Note that ZSTD_initCStream() clears any previously set dictionary. Use the new API
|
||||
* to compress with a dictionary.
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_initCStream(ZSTD_CStream* zcs, int compressionLevel);
|
||||
/*!
|
||||
|
@ -797,13 +863,31 @@ ZSTDLIB_API size_t ZSTD_freeDStream(ZSTD_DStream* zds); /* accept NULL pointer
|
|||
|
||||
/*===== Streaming decompression functions =====*/
|
||||
|
||||
/* This function is redundant with the advanced API and equivalent to:
|
||||
/*! ZSTD_initDStream() :
|
||||
* Initialize/reset DStream state for new decompression operation.
|
||||
* Call before new decompression operation using same DStream.
|
||||
*
|
||||
* Note : This function is redundant with the advanced API and equivalent to:
|
||||
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
|
||||
* ZSTD_DCtx_refDDict(zds, NULL);
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_initDStream(ZSTD_DStream* zds);
|
||||
|
||||
/*! ZSTD_decompressStream() :
|
||||
* Streaming decompression function.
|
||||
* Call repetitively to consume full input updating it as necessary.
|
||||
* Function will update both input and output `pos` fields exposing current state via these fields:
|
||||
* - `input.pos < input.size`, some input remaining and caller should provide remaining input
|
||||
* on the next call.
|
||||
* - `output.pos < output.size`, decoder finished and flushed all remaining buffers.
|
||||
* - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,
|
||||
* call ZSTD_decompressStream() again to flush remaining data to output.
|
||||
* Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.
|
||||
*
|
||||
* @return : 0 when a frame is completely decoded and fully flushed,
|
||||
* or an error code, which can be tested using ZSTD_isError(),
|
||||
* or any other value > 0, which means there is some decoding or flushing to do to complete current frame.
|
||||
*/
|
||||
ZSTDLIB_API size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input);
|
||||
|
||||
ZSTDLIB_API size_t ZSTD_DStreamInSize(void); /*!< recommended size for input buffer */
|
||||
|
@ -922,7 +1006,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict);
|
|||
* If @return == 0, the dictID could not be decoded.
|
||||
* This could for one of the following reasons :
|
||||
* - The frame does not require a dictionary to be decoded (most common case).
|
||||
* - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden information.
|
||||
* - The frame was built with dictID intentionally removed. Whatever dictionary is necessary is a hidden piece of information.
|
||||
* Note : this use case also happens when using a non-conformant dictionary.
|
||||
* - `srcSize` is too small, and as a result, the frame header could not be decoded (only possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`).
|
||||
* - This is not a Zstandard frame.
|
||||
|
@ -946,8 +1030,9 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
|
|||
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
|
||||
* Special: Loading a NULL (or 0-size) dictionary invalidates previous dictionary,
|
||||
* meaning "return to no-dictionary mode".
|
||||
* Note 1 : Dictionary is sticky, it will be used for all future compressed frames.
|
||||
* To return to "no-dictionary" situation, load a NULL dictionary (or reset parameters).
|
||||
* Note 1 : Dictionary is sticky, it will be used for all future compressed frames,
|
||||
* until parameters are reset, a new dictionary is loaded, or the dictionary
|
||||
* is explicitly invalidated by loading a NULL dictionary.
|
||||
* Note 2 : Loading a dictionary involves building tables.
|
||||
* It's also a CPU consuming operation, with non-negligible impact on latency.
|
||||
* Tables are dependent on compression parameters, and for this reason,
|
||||
|
@ -960,7 +1045,7 @@ ZSTDLIB_API unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize);
|
|||
ZSTDLIB_API size_t ZSTD_CCtx_loadDictionary(ZSTD_CCtx* cctx, const void* dict, size_t dictSize);
|
||||
|
||||
/*! ZSTD_CCtx_refCDict() : Requires v1.4.0+
|
||||
* Reference a prepared dictionary, to be used for all next compressed frames.
|
||||
* Reference a prepared dictionary, to be used for all future compressed frames.
|
||||
* Note that compression parameters are enforced from within CDict,
|
||||
* and supersede any compression parameter previously set within CCtx.
|
||||
* The parameters ignored are labelled as "superseded-by-cdict" in the ZSTD_cParameter enum docs.
|
||||
|
@ -995,9 +1080,9 @@ ZSTDLIB_API size_t ZSTD_CCtx_refPrefix(ZSTD_CCtx* cctx,
|
|||
const void* prefix, size_t prefixSize);
|
||||
|
||||
/*! ZSTD_DCtx_loadDictionary() : Requires v1.4.0+
|
||||
* Create an internal DDict from dict buffer,
|
||||
* to be used to decompress next frames.
|
||||
* The dictionary remains valid for all future frames, until explicitly invalidated.
|
||||
* Create an internal DDict from dict buffer, to be used to decompress all future frames.
|
||||
* The dictionary remains valid for all future frames, until explicitly invalidated, or
|
||||
* a new dictionary is loaded.
|
||||
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
|
||||
* Special : Adding a NULL (or 0-size) dictionary invalidates any previous dictionary,
|
||||
* meaning "return to no-dictionary mode".
|
||||
|
@ -1021,9 +1106,10 @@ ZSTDLIB_API size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, s
|
|||
* The memory for the table is allocated on the first call to refDDict, and can be
|
||||
* freed with ZSTD_freeDCtx().
|
||||
*
|
||||
* If called with ZSTD_d_refMultipleDDicts disabled (the default), only one dictionary
|
||||
* will be managed, and referencing a dictionary effectively "discards" any previous one.
|
||||
*
|
||||
* @result : 0, or an error code (which can be tested with ZSTD_isError()).
|
||||
* Note 1 : Currently, only one dictionary can be managed.
|
||||
* Referencing a new dictionary effectively "discards" any previous one.
|
||||
* Special: referencing a NULL DDict means "return to no-dictionary mode".
|
||||
* Note 2 : DDict is just referenced, its lifetime must outlive its usage from DCtx.
|
||||
*/
|
||||
|
@ -1086,28 +1172,6 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
|||
# endif
|
||||
#endif
|
||||
|
||||
/* Deprecation warnings :
|
||||
* Should these warnings be a problem, it is generally possible to disable them,
|
||||
* typically with -Wno-deprecated-declarations for gcc or _CRT_SECURE_NO_WARNINGS in Visual.
|
||||
* Otherwise, it's also possible to define ZSTD_DISABLE_DEPRECATE_WARNINGS.
|
||||
*/
|
||||
#ifdef ZSTD_DISABLE_DEPRECATE_WARNINGS
|
||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API /* disable deprecation warnings */
|
||||
#else
|
||||
# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
|
||||
# define ZSTD_DEPRECATED(message) [[deprecated(message)]] ZSTDLIB_STATIC_API
|
||||
# elif (defined(GNUC) && (GNUC > 4 || (GNUC == 4 && GNUC_MINOR >= 5))) || defined(__clang__)
|
||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated(message)))
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 3)
|
||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __attribute__((deprecated))
|
||||
# elif defined(_MSC_VER)
|
||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API __declspec(deprecated(message))
|
||||
# else
|
||||
# pragma message("WARNING: You need to implement ZSTD_DEPRECATED for this compiler")
|
||||
# define ZSTD_DEPRECATED(message) ZSTDLIB_STATIC_API
|
||||
# endif
|
||||
#endif /* ZSTD_DISABLE_DEPRECATE_WARNINGS */
|
||||
|
||||
/****************************************************************************************
|
||||
* experimental API (static linking only)
|
||||
****************************************************************************************
|
||||
|
@ -1142,6 +1206,7 @@ ZSTDLIB_API size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict);
|
|||
#define ZSTD_TARGETLENGTH_MIN 0 /* note : comparing this constant to an unsigned results in a tautological test */
|
||||
#define ZSTD_STRATEGY_MIN ZSTD_fast
|
||||
#define ZSTD_STRATEGY_MAX ZSTD_btultra2
|
||||
#define ZSTD_BLOCKSIZE_MAX_MIN (1 << 10) /* The minimum valid max blocksize. Maximum blocksizes smaller than this make compressBound() inaccurate. */
|
||||
|
||||
|
||||
#define ZSTD_OVERLAPLOG_MIN 0
|
||||
|
@ -1369,33 +1434,89 @@ ZSTDLIB_STATIC_API unsigned long long ZSTD_decompressBound(const void* src, size
|
|||
* or an error code (if srcSize is too small) */
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize);
|
||||
|
||||
/*! ZSTD_decompressionMargin() :
|
||||
* Zstd supports in-place decompression, where the input and output buffers overlap.
|
||||
* In this case, the output buffer must be at least (Margin + Output_Size) bytes large,
|
||||
* and the input buffer must be at the end of the output buffer.
|
||||
*
|
||||
* _______________________ Output Buffer ________________________
|
||||
* | |
|
||||
* | ____ Input Buffer ____|
|
||||
* | | |
|
||||
* v v v
|
||||
* |---------------------------------------|-----------|----------|
|
||||
* ^ ^ ^
|
||||
* |___________________ Output_Size ___________________|_ Margin _|
|
||||
*
|
||||
* NOTE: See also ZSTD_DECOMPRESSION_MARGIN().
|
||||
* NOTE: This applies only to single-pass decompression through ZSTD_decompress() or
|
||||
* ZSTD_decompressDCtx().
|
||||
* NOTE: This function supports multi-frame input.
|
||||
*
|
||||
* @param src The compressed frame(s)
|
||||
* @param srcSize The size of the compressed frame(s)
|
||||
* @returns The decompression margin or an error that can be checked with ZSTD_isError().
|
||||
*/
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_decompressionMargin(const void* src, size_t srcSize);
|
||||
|
||||
/*! ZSTD_DECOMPRESS_MARGIN() :
|
||||
* Similar to ZSTD_decompressionMargin(), but instead of computing the margin from
|
||||
* the compressed frame, compute it from the original size and the blockSizeLog.
|
||||
* See ZSTD_decompressionMargin() for details.
|
||||
*
|
||||
* WARNING: This macro does not support multi-frame input, the input must be a single
|
||||
* zstd frame. If you need that support use the function, or implement it yourself.
|
||||
*
|
||||
* @param originalSize The original uncompressed size of the data.
|
||||
* @param blockSize The block size == MIN(windowSize, ZSTD_BLOCKSIZE_MAX).
|
||||
* Unless you explicitly set the windowLog smaller than
|
||||
* ZSTD_BLOCKSIZELOG_MAX you can just use ZSTD_BLOCKSIZE_MAX.
|
||||
*/
|
||||
#define ZSTD_DECOMPRESSION_MARGIN(originalSize, blockSize) ((size_t)( \
|
||||
ZSTD_FRAMEHEADERSIZE_MAX /* Frame header */ + \
|
||||
4 /* checksum */ + \
|
||||
((originalSize) == 0 ? 0 : 3 * (((originalSize) + (blockSize) - 1) / blockSize)) /* 3 bytes per block */ + \
|
||||
(blockSize) /* One block of margin */ \
|
||||
))
|
||||
|
||||
typedef enum {
|
||||
ZSTD_sf_noBlockDelimiters = 0, /* Representation of ZSTD_Sequence has no block delimiters, sequences only */
|
||||
ZSTD_sf_explicitBlockDelimiters = 1 /* Representation of ZSTD_Sequence contains explicit block delimiters */
|
||||
} ZSTD_sequenceFormat_e;
|
||||
|
||||
/*! ZSTD_sequenceBound() :
|
||||
* `srcSize` : size of the input buffer
|
||||
* @return : upper-bound for the number of sequences that can be generated
|
||||
* from a buffer of srcSize bytes
|
||||
*
|
||||
* note : returns number of sequences - to get bytes, multiply by sizeof(ZSTD_Sequence).
|
||||
*/
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_sequenceBound(size_t srcSize);
|
||||
|
||||
/*! ZSTD_generateSequences() :
|
||||
* Generate sequences using ZSTD_compress2, given a source buffer.
|
||||
* Generate sequences using ZSTD_compress2(), given a source buffer.
|
||||
*
|
||||
* Each block will end with a dummy sequence
|
||||
* with offset == 0, matchLength == 0, and litLength == length of last literals.
|
||||
* litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)
|
||||
* simply acts as a block delimiter.
|
||||
*
|
||||
* zc can be used to insert custom compression params.
|
||||
* This function invokes ZSTD_compress2
|
||||
* @zc can be used to insert custom compression params.
|
||||
* This function invokes ZSTD_compress2().
|
||||
*
|
||||
* The output of this function can be fed into ZSTD_compressSequences() with CCtx
|
||||
* setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters
|
||||
* @return : number of sequences generated
|
||||
*/
|
||||
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* outSeqs,
|
||||
size_t outSeqsSize, const void* src, size_t srcSize);
|
||||
ZSTDLIB_STATIC_API size_t
|
||||
ZSTD_generateSequences( ZSTD_CCtx* zc,
|
||||
ZSTD_Sequence* outSeqs, size_t outSeqsSize,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
/*! ZSTD_mergeBlockDelimiters() :
|
||||
* Given an array of ZSTD_Sequence, remove all sequences that represent block delimiters/last literals
|
||||
* by merging them into into the literals of the next sequence.
|
||||
* by merging them into the literals of the next sequence.
|
||||
*
|
||||
* As such, the final generated result has no explicit representation of block boundaries,
|
||||
* and the final last literals segment is not represented in the sequences.
|
||||
|
@ -1407,7 +1528,9 @@ ZSTDLIB_STATIC_API size_t ZSTD_generateSequences(ZSTD_CCtx* zc, ZSTD_Sequence* o
|
|||
ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, size_t seqsSize);
|
||||
|
||||
/*! ZSTD_compressSequences() :
|
||||
* Compress an array of ZSTD_Sequence, generated from the original source buffer, into dst.
|
||||
* Compress an array of ZSTD_Sequence, associated with @src buffer, into dst.
|
||||
* @src contains the entire input (not just the literals).
|
||||
* If @srcSize > sum(sequence.length), the remaining bytes are considered all literals
|
||||
* If a dictionary is included, then the cctx should reference the dict. (see: ZSTD_CCtx_refCDict(), ZSTD_CCtx_loadDictionary(), etc.)
|
||||
* The entire source is compressed into a single frame.
|
||||
*
|
||||
|
@ -1432,17 +1555,18 @@ ZSTDLIB_STATIC_API size_t ZSTD_mergeBlockDelimiters(ZSTD_Sequence* sequences, si
|
|||
* Note: Repcodes are, as of now, always re-calculated within this function, so ZSTD_Sequence::rep is unused.
|
||||
* Note 2: Once we integrate ability to ingest repcodes, the explicit block delims mode must respect those repcodes exactly,
|
||||
* and cannot emit an RLE block that disagrees with the repcode history
|
||||
* @return : final compressed size or a ZSTD error.
|
||||
* @return : final compressed size, or a ZSTD error code.
|
||||
*/
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_compressSequences(ZSTD_CCtx* const cctx, void* dst, size_t dstSize,
|
||||
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
|
||||
const void* src, size_t srcSize);
|
||||
ZSTDLIB_STATIC_API size_t
|
||||
ZSTD_compressSequences( ZSTD_CCtx* cctx, void* dst, size_t dstSize,
|
||||
const ZSTD_Sequence* inSeqs, size_t inSeqsSize,
|
||||
const void* src, size_t srcSize);
|
||||
|
||||
|
||||
/*! ZSTD_writeSkippableFrame() :
|
||||
* Generates a zstd skippable frame containing data given by src, and writes it to dst buffer.
|
||||
*
|
||||
* Skippable frames begin with a a 4-byte magic number. There are 16 possible choices of magic number,
|
||||
* Skippable frames begin with a 4-byte magic number. There are 16 possible choices of magic number,
|
||||
* ranging from ZSTD_MAGIC_SKIPPABLE_START to ZSTD_MAGIC_SKIPPABLE_START+15.
|
||||
* As such, the parameter magicVariant controls the exact skippable frame magic number variant used, so
|
||||
* the magic number used will be ZSTD_MAGIC_SKIPPABLE_START + magicVariant.
|
||||
|
@ -1500,8 +1624,11 @@ ZSTDLIB_API unsigned ZSTD_isSkippableFrame(const void* buffer, size_t size);
|
|||
* and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().
|
||||
* Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.
|
||||
*
|
||||
* Note 2 : only single-threaded compression is supported.
|
||||
* Note : only single-threaded compression is supported.
|
||||
* ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
|
||||
*
|
||||
* Note 2 : ZSTD_estimateCCtxSize* functions are not compatible with the Block-Level Sequence Producer API at this time.
|
||||
* Size estimates assume that no external sequence producer is registered.
|
||||
*/
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize(int compressionLevel);
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_estimateCCtxSize_usingCParams(ZSTD_compressionParameters cParams);
|
||||
|
@ -1520,7 +1647,12 @@ ZSTDLIB_STATIC_API size_t ZSTD_estimateDCtxSize(void);
|
|||
* or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();
|
||||
* Note : if streaming is init with function ZSTD_init?Stream_usingDict(),
|
||||
* an internal ?Dict will be created, which additional size is not estimated here.
|
||||
* In this case, get total size by adding ZSTD_estimate?DictSize */
|
||||
* In this case, get total size by adding ZSTD_estimate?DictSize
|
||||
* Note 2 : only single-threaded compression is supported.
|
||||
* ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.
|
||||
* Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.
|
||||
* Size estimates assume that no external sequence producer is registered.
|
||||
*/
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize(int compressionLevel);
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCParams(ZSTD_compressionParameters cParams);
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_estimateCStreamSize_usingCCtxParams(const ZSTD_CCtx_params* params);
|
||||
|
@ -1670,22 +1802,31 @@ ZSTDLIB_STATIC_API size_t ZSTD_checkCParams(ZSTD_compressionParameters params);
|
|||
* This function never fails (wide contract) */
|
||||
ZSTDLIB_STATIC_API ZSTD_compressionParameters ZSTD_adjustCParams(ZSTD_compressionParameters cPar, unsigned long long srcSize, size_t dictSize);
|
||||
|
||||
/*! ZSTD_CCtx_setCParams() :
|
||||
* Set all parameters provided within @cparams into the working @cctx.
|
||||
* Note : if modifying parameters during compression (MT mode only),
|
||||
* note that changes to the .windowLog parameter will be ignored.
|
||||
* @return 0 on success, or an error code (can be checked with ZSTD_isError()) */
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_CCtx_setCParams(ZSTD_CCtx* cctx, ZSTD_compressionParameters cparams);
|
||||
|
||||
/*! ZSTD_compress_advanced() :
|
||||
* Note : this function is now DEPRECATED.
|
||||
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_setParameter() and other parameter setters.
|
||||
* This prototype will generate compilation warnings. */
|
||||
ZSTD_DEPRECATED("use ZSTD_compress2")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_compress_advanced(ZSTD_CCtx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize,
|
||||
ZSTD_parameters params);
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict,size_t dictSize,
|
||||
ZSTD_parameters params);
|
||||
|
||||
/*! ZSTD_compress_usingCDict_advanced() :
|
||||
* Note : this function is now DEPRECATED.
|
||||
* It can be replaced by ZSTD_compress2(), in combination with ZSTD_CCtx_loadDictionary() and other parameter setters.
|
||||
* This prototype will generate compilation warnings. */
|
||||
ZSTD_DEPRECATED("use ZSTD_compress2 with ZSTD_CCtx_loadDictionary")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_compress_usingCDict_advanced(ZSTD_CCtx* cctx,
|
||||
void* dst, size_t dstCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
|
@ -1829,13 +1970,16 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
|
|||
* Experimental parameter.
|
||||
* Default is 0 == disabled. Set to 1 to enable.
|
||||
*
|
||||
* Tells the compressor that the ZSTD_inBuffer will ALWAYS be the same
|
||||
* between calls, except for the modifications that zstd makes to pos (the
|
||||
* caller must not modify pos). This is checked by the compressor, and
|
||||
* compression will fail if it ever changes. This means the only flush
|
||||
* mode that makes sense is ZSTD_e_end, so zstd will error if ZSTD_e_end
|
||||
* is not used. The data in the ZSTD_inBuffer in the range [src, src + pos)
|
||||
* MUST not be modified during compression or you will get data corruption.
|
||||
* Tells the compressor that input data presented with ZSTD_inBuffer
|
||||
* will ALWAYS be the same between calls.
|
||||
* Technically, the @src pointer must never be changed,
|
||||
* and the @pos field can only be updated by zstd.
|
||||
* However, it's possible to increase the @size field,
|
||||
* allowing scenarios where more data can be appended after compressions starts.
|
||||
* These conditions are checked by the compressor,
|
||||
* and compression will fail if they are not respected.
|
||||
* Also, data in the ZSTD_inBuffer within the range [src, src + pos)
|
||||
* MUST not be modified during compression or it will result in data corruption.
|
||||
*
|
||||
* When this flag is enabled zstd won't allocate an input window buffer,
|
||||
* because the user guarantees it can reference the ZSTD_inBuffer until
|
||||
|
@ -1843,18 +1987,15 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
|
|||
* large enough to fit a block (see ZSTD_c_stableOutBuffer). This will also
|
||||
* avoid the memcpy() from the input buffer to the input window buffer.
|
||||
*
|
||||
* NOTE: ZSTD_compressStream2() will error if ZSTD_e_end is not used.
|
||||
* That means this flag cannot be used with ZSTD_compressStream().
|
||||
*
|
||||
* NOTE: So long as the ZSTD_inBuffer always points to valid memory, using
|
||||
* this flag is ALWAYS memory safe, and will never access out-of-bounds
|
||||
* memory. However, compression WILL fail if you violate the preconditions.
|
||||
* memory. However, compression WILL fail if conditions are not respected.
|
||||
*
|
||||
* WARNING: The data in the ZSTD_inBuffer in the range [dst, dst + pos) MUST
|
||||
* not be modified during compression or you will get data corruption. This
|
||||
* is because zstd needs to reference data in the ZSTD_inBuffer to find
|
||||
* WARNING: The data in the ZSTD_inBuffer in the range [src, src + pos) MUST
|
||||
* not be modified during compression or it will result in data corruption.
|
||||
* This is because zstd needs to reference data in the ZSTD_inBuffer to find
|
||||
* matches. Normally zstd maintains its own window buffer for this purpose,
|
||||
* but passing this flag tells zstd to use the user provided buffer.
|
||||
* but passing this flag tells zstd to rely on user provided buffer instead.
|
||||
*/
|
||||
#define ZSTD_c_stableInBuffer ZSTD_c_experimentalParam9
|
||||
|
||||
|
@ -1899,7 +2040,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
|
|||
* Without validation, providing a sequence that does not conform to the zstd spec will cause
|
||||
* undefined behavior, and may produce a corrupted block.
|
||||
*
|
||||
* With validation enabled, a if sequence is invalid (see doc/zstd_compression_format.md for
|
||||
* With validation enabled, if sequence is invalid (see doc/zstd_compression_format.md for
|
||||
* specifics regarding offset/matchlength requirements) then the function will bail out and
|
||||
* return an error.
|
||||
*
|
||||
|
@ -1949,6 +2090,79 @@ ZSTDLIB_STATIC_API size_t ZSTD_CCtx_refPrefix_advanced(ZSTD_CCtx* cctx, const vo
|
|||
*/
|
||||
#define ZSTD_c_deterministicRefPrefix ZSTD_c_experimentalParam15
|
||||
|
||||
/* ZSTD_c_prefetchCDictTables
|
||||
* Controlled with ZSTD_paramSwitch_e enum. Default is ZSTD_ps_auto.
|
||||
*
|
||||
* In some situations, zstd uses CDict tables in-place rather than copying them
|
||||
* into the working context. (See docs on ZSTD_dictAttachPref_e above for details).
|
||||
* In such situations, compression speed is seriously impacted when CDict tables are
|
||||
* "cold" (outside CPU cache). This parameter instructs zstd to prefetch CDict tables
|
||||
* when they are used in-place.
|
||||
*
|
||||
* For sufficiently small inputs, the cost of the prefetch will outweigh the benefit.
|
||||
* For sufficiently large inputs, zstd will by default memcpy() CDict tables
|
||||
* into the working context, so there is no need to prefetch. This parameter is
|
||||
* targeted at a middle range of input sizes, where a prefetch is cheap enough to be
|
||||
* useful but memcpy() is too expensive. The exact range of input sizes where this
|
||||
* makes sense is best determined by careful experimentation.
|
||||
*
|
||||
* Note: for this parameter, ZSTD_ps_auto is currently equivalent to ZSTD_ps_disable,
|
||||
* but in the future zstd may conditionally enable this feature via an auto-detection
|
||||
* heuristic for cold CDicts.
|
||||
* Use ZSTD_ps_disable to opt out of prefetching under any circumstances.
|
||||
*/
|
||||
#define ZSTD_c_prefetchCDictTables ZSTD_c_experimentalParam16
|
||||
|
||||
/* ZSTD_c_enableSeqProducerFallback
|
||||
* Allowed values are 0 (disable) and 1 (enable). The default setting is 0.
|
||||
*
|
||||
* Controls whether zstd will fall back to an internal sequence producer if an
|
||||
* external sequence producer is registered and returns an error code. This fallback
|
||||
* is block-by-block: the internal sequence producer will only be called for blocks
|
||||
* where the external sequence producer returns an error code. Fallback parsing will
|
||||
* follow any other cParam settings, such as compression level, the same as in a
|
||||
* normal (fully-internal) compression operation.
|
||||
*
|
||||
* The user is strongly encouraged to read the full Block-Level Sequence Producer API
|
||||
* documentation (below) before setting this parameter. */
|
||||
#define ZSTD_c_enableSeqProducerFallback ZSTD_c_experimentalParam17
|
||||
|
||||
/* ZSTD_c_maxBlockSize
|
||||
* Allowed values are between 1KB and ZSTD_BLOCKSIZE_MAX (128KB).
|
||||
* The default is ZSTD_BLOCKSIZE_MAX, and setting to 0 will set to the default.
|
||||
*
|
||||
* This parameter can be used to set an upper bound on the blocksize
|
||||
* that overrides the default ZSTD_BLOCKSIZE_MAX. It cannot be used to set upper
|
||||
* bounds greater than ZSTD_BLOCKSIZE_MAX or bounds lower than 1KB (will make
|
||||
* compressBound() innacurate). Only currently meant to be used for testing.
|
||||
*
|
||||
*/
|
||||
#define ZSTD_c_maxBlockSize ZSTD_c_experimentalParam18
|
||||
|
||||
/* ZSTD_c_searchForExternalRepcodes
|
||||
* This parameter affects how zstd parses external sequences, such as sequences
|
||||
* provided through the compressSequences() API or from an external block-level
|
||||
* sequence producer.
|
||||
*
|
||||
* If set to ZSTD_ps_enable, the library will check for repeated offsets in
|
||||
* external sequences, even if those repcodes are not explicitly indicated in
|
||||
* the "rep" field. Note that this is the only way to exploit repcode matches
|
||||
* while using compressSequences() or an external sequence producer, since zstd
|
||||
* currently ignores the "rep" field of external sequences.
|
||||
*
|
||||
* If set to ZSTD_ps_disable, the library will not exploit repeated offsets in
|
||||
* external sequences, regardless of whether the "rep" field has been set. This
|
||||
* reduces sequence compression overhead by about 25% while sacrificing some
|
||||
* compression ratio.
|
||||
*
|
||||
* The default value is ZSTD_ps_auto, for which the library will enable/disable
|
||||
* based on compression level.
|
||||
*
|
||||
* Note: for now, this param only has an effect if ZSTD_c_blockDelimiters is
|
||||
* set to ZSTD_sf_explicitBlockDelimiters. That may change in the future.
|
||||
*/
|
||||
#define ZSTD_c_searchForExternalRepcodes ZSTD_c_experimentalParam19
|
||||
|
||||
/*! ZSTD_CCtx_getParameter() :
|
||||
* Get the requested compression parameter value, selected by enum ZSTD_cParameter,
|
||||
* and store it into int* value.
|
||||
|
@ -2105,7 +2319,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
|
|||
* in the range [dst, dst + pos) MUST not be modified during decompression
|
||||
* or you will get data corruption.
|
||||
*
|
||||
* When this flags is enabled zstd won't allocate an output buffer, because
|
||||
* When this flag is enabled zstd won't allocate an output buffer, because
|
||||
* it can write directly to the ZSTD_outBuffer, but it will still allocate
|
||||
* an input buffer large enough to fit any compressed block. This will also
|
||||
* avoid the memcpy() from the internal output buffer to the ZSTD_outBuffer.
|
||||
|
@ -2158,6 +2372,17 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
|
|||
*/
|
||||
#define ZSTD_d_refMultipleDDicts ZSTD_d_experimentalParam4
|
||||
|
||||
/* ZSTD_d_disableHuffmanAssembly
|
||||
* Set to 1 to disable the Huffman assembly implementation.
|
||||
* The default value is 0, which allows zstd to use the Huffman assembly
|
||||
* implementation if available.
|
||||
*
|
||||
* This parameter can be used to disable Huffman assembly at runtime.
|
||||
* If you want to disable it at compile time you can define the macro
|
||||
* ZSTD_DISABLE_ASM.
|
||||
*/
|
||||
#define ZSTD_d_disableHuffmanAssembly ZSTD_d_experimentalParam5
|
||||
|
||||
|
||||
/*! ZSTD_DCtx_setFormat() :
|
||||
* This function is REDUNDANT. Prefer ZSTD_DCtx_setParameter().
|
||||
|
@ -2166,6 +2391,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_DCtx_getParameter(ZSTD_DCtx* dctx, ZSTD_dParamete
|
|||
* such ZSTD_f_zstd1_magicless for example.
|
||||
* @return : 0, or an error code (which can be tested using ZSTD_isError()). */
|
||||
ZSTD_DEPRECATED("use ZSTD_DCtx_setParameter() instead")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format);
|
||||
|
||||
/*! ZSTD_decompressStream_simpleArgs() :
|
||||
|
@ -2202,6 +2428,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressStream_simpleArgs (
|
|||
* This prototype will generate compilation warnings.
|
||||
*/
|
||||
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
|
||||
int compressionLevel,
|
||||
unsigned long long pledgedSrcSize);
|
||||
|
@ -2219,6 +2446,7 @@ size_t ZSTD_initCStream_srcSize(ZSTD_CStream* zcs,
|
|||
* This prototype will generate compilation warnings.
|
||||
*/
|
||||
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
|
||||
const void* dict, size_t dictSize,
|
||||
int compressionLevel);
|
||||
|
@ -2239,6 +2467,7 @@ size_t ZSTD_initCStream_usingDict(ZSTD_CStream* zcs,
|
|||
* This prototype will generate compilation warnings.
|
||||
*/
|
||||
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
|
||||
const void* dict, size_t dictSize,
|
||||
ZSTD_parameters params,
|
||||
|
@ -2253,6 +2482,7 @@ size_t ZSTD_initCStream_advanced(ZSTD_CStream* zcs,
|
|||
* This prototype will generate compilation warnings.
|
||||
*/
|
||||
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
|
||||
|
||||
/*! ZSTD_initCStream_usingCDict_advanced() :
|
||||
|
@ -2271,6 +2501,7 @@ size_t ZSTD_initCStream_usingCDict(ZSTD_CStream* zcs, const ZSTD_CDict* cdict);
|
|||
* This prototype will generate compilation warnings.
|
||||
*/
|
||||
ZSTD_DEPRECATED("use ZSTD_CCtx_reset and ZSTD_CCtx_refCDict, see zstd.h for detailed instructions")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
|
||||
const ZSTD_CDict* cdict,
|
||||
ZSTD_frameParameters fParams,
|
||||
|
@ -2295,6 +2526,7 @@ size_t ZSTD_initCStream_usingCDict_advanced(ZSTD_CStream* zcs,
|
|||
* This prototype will generate compilation warnings.
|
||||
*/
|
||||
ZSTD_DEPRECATED("use ZSTD_CCtx_reset, see zstd.h for detailed instructions")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_resetCStream(ZSTD_CStream* zcs, unsigned long long pledgedSrcSize);
|
||||
|
||||
|
||||
|
@ -2340,8 +2572,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_toFlushNow(ZSTD_CCtx* cctx);
|
|||
* ZSTD_DCtx_loadDictionary(zds, dict, dictSize);
|
||||
*
|
||||
* note: no dictionary will be used if dict == NULL or dictSize < 8
|
||||
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
|
||||
*/
|
||||
ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_loadDictionary, see zstd.h for detailed instructions")
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize);
|
||||
|
||||
/*!
|
||||
|
@ -2351,8 +2583,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const vo
|
|||
* ZSTD_DCtx_refDDict(zds, ddict);
|
||||
*
|
||||
* note : ddict is referenced, it must outlive decompression session
|
||||
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
|
||||
*/
|
||||
ZSTD_DEPRECATED("use ZSTD_DCtx_reset + ZSTD_DCtx_refDDict, see zstd.h for detailed instructions")
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const ZSTD_DDict* ddict);
|
||||
|
||||
/*!
|
||||
|
@ -2361,8 +2593,8 @@ ZSTDLIB_STATIC_API size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* zds, const Z
|
|||
* ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);
|
||||
*
|
||||
* re-use decompression parameters from previous init; saves dictionary loading
|
||||
* Note : this prototype will be marked as deprecated and generate compilation warnings on reaching v1.5.x
|
||||
*/
|
||||
ZSTD_DEPRECATED("use ZSTD_DCtx_reset, see zstd.h for detailed instructions")
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
|
||||
|
||||
|
||||
|
@ -2383,7 +2615,6 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
|
|||
|
||||
Start by initializing a context.
|
||||
Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.
|
||||
It's also possible to duplicate a reference context which has already been initialized, using ZSTD_copyCCtx()
|
||||
|
||||
Then, consume your input using ZSTD_compressContinue().
|
||||
There are some important considerations to keep in mind when using this advanced function :
|
||||
|
@ -2408,15 +2639,20 @@ ZSTDLIB_STATIC_API size_t ZSTD_resetDStream(ZSTD_DStream* zds);
|
|||
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin(ZSTD_CCtx* cctx, int compressionLevel);
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, int compressionLevel);
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx* cctx, const ZSTD_CDict* cdict); /**< note: fails if cdict==NULL */
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
|
||||
|
||||
ZSTD_DEPRECATED("This function will likely be removed in a future release. It is misleading and has very limited utility.")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_copyCCtx(ZSTD_CCtx* cctx, const ZSTD_CCtx* preparedCCtx, unsigned long long pledgedSrcSize); /**< note: if pledgedSrcSize is not known, use ZSTD_CONTENTSIZE_UNKNOWN */
|
||||
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_compressContinue(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
ZSTDLIB_STATIC_API size_t ZSTD_compressEnd(ZSTD_CCtx* cctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
/* The ZSTD_compressBegin_advanced() and ZSTD_compressBegin_usingCDict_advanced() are now DEPRECATED and will generate a compiler warning */
|
||||
ZSTD_DEPRECATED("use advanced API to access custom parameters")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_compressBegin_advanced(ZSTD_CCtx* cctx, const void* dict, size_t dictSize, ZSTD_parameters params, unsigned long long pledgedSrcSize); /**< pledgedSrcSize : If srcSize is not known at init time, use ZSTD_CONTENTSIZE_UNKNOWN */
|
||||
ZSTD_DEPRECATED("use advanced API to access custom parameters")
|
||||
ZSTDLIB_STATIC_API
|
||||
size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_CDict* const cdict, ZSTD_frameParameters const fParams, unsigned long long const pledgedSrcSize); /* compression parameters are already set within cdict. pledgedSrcSize must be correct. If srcSize is not known, use macro ZSTD_CONTENTSIZE_UNKNOWN */
|
||||
/**
|
||||
Buffer-less streaming decompression (synchronous mode)
|
||||
|
@ -2429,8 +2665,8 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
|
|||
Frame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.
|
||||
Data fragment must be large enough to ensure successful decoding.
|
||||
`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.
|
||||
@result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
|
||||
>0 : `srcSize` is too small, please provide at least @result bytes on next attempt.
|
||||
result : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.
|
||||
>0 : `srcSize` is too small, please provide at least result bytes on next attempt.
|
||||
errorCode, which can be tested using ZSTD_isError().
|
||||
|
||||
It fills a ZSTD_frameHeader structure with important information to correctly decode the frame,
|
||||
|
@ -2449,7 +2685,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
|
|||
|
||||
The most memory efficient way is to use a round buffer of sufficient size.
|
||||
Sufficient size is determined by invoking ZSTD_decodingBufferSize_min(),
|
||||
which can @return an error code if required value is too large for current system (in 32-bits mode).
|
||||
which can return an error code if required value is too large for current system (in 32-bits mode).
|
||||
In a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,
|
||||
up to the moment there is not enough room left in the buffer to guarantee decoding another full block,
|
||||
which maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.
|
||||
|
@ -2469,7 +2705,7 @@ size_t ZSTD_compressBegin_usingCDict_advanced(ZSTD_CCtx* const cctx, const ZSTD_
|
|||
ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().
|
||||
ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.
|
||||
|
||||
@result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
|
||||
result of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).
|
||||
It can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.
|
||||
It can also be an error code, which can be tested with ZSTD_isError().
|
||||
|
||||
|
@ -2501,6 +2737,8 @@ typedef struct {
|
|||
unsigned headerSize;
|
||||
unsigned dictID;
|
||||
unsigned checksumFlag;
|
||||
unsigned _reserved1;
|
||||
unsigned _reserved2;
|
||||
} ZSTD_frameHeader;
|
||||
|
||||
/*! ZSTD_getFrameHeader() :
|
||||
|
@ -2523,6 +2761,7 @@ ZSTDLIB_STATIC_API size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx);
|
|||
ZSTDLIB_STATIC_API size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize);
|
||||
|
||||
/* misc */
|
||||
ZSTD_DEPRECATED("This function will likely be removed in the next minor release. It is misleading and has very limited utility.")
|
||||
ZSTDLIB_STATIC_API void ZSTD_copyDCtx(ZSTD_DCtx* dctx, const ZSTD_DCtx* preparedDCtx);
|
||||
typedef enum { ZSTDnit_frameHeader, ZSTDnit_blockHeader, ZSTDnit_block, ZSTDnit_lastBlock, ZSTDnit_checksum, ZSTDnit_skippableFrame } ZSTD_nextInputType_e;
|
||||
ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
|
||||
|
@ -2545,7 +2784,6 @@ ZSTDLIB_STATIC_API ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx);
|
|||
- It is necessary to init context before starting
|
||||
+ compression : any ZSTD_compressBegin*() variant, including with dictionary
|
||||
+ decompression : any ZSTD_decompressBegin*() variant, including with dictionary
|
||||
+ copyCCtx() and copyDCtx() can be used too
|
||||
- Block size is limited, it must be <= ZSTD_getBlockSize() <= ZSTD_BLOCKSIZE_MAX == 128 KB
|
||||
+ If input is larger than a block size, it's necessary to split input data into multiple blocks
|
||||
+ For inputs larger than a single block, consider using regular ZSTD_compress() instead.
|
||||
|
@ -2568,6 +2806,167 @@ ZSTDLIB_STATIC_API size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx, void* dst, size_
|
|||
ZSTDLIB_STATIC_API size_t ZSTD_insertBlock (ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize); /**< insert uncompressed block into `dctx` history. Useful for multi-blocks decompression. */
|
||||
|
||||
|
||||
/* ********************* BLOCK-LEVEL SEQUENCE PRODUCER API *********************
|
||||
*
|
||||
* *** OVERVIEW ***
|
||||
* The Block-Level Sequence Producer API allows users to provide their own custom
|
||||
* sequence producer which libzstd invokes to process each block. The produced list
|
||||
* of sequences (literals and matches) is then post-processed by libzstd to produce
|
||||
* valid compressed blocks.
|
||||
*
|
||||
* This block-level offload API is a more granular complement of the existing
|
||||
* frame-level offload API compressSequences() (introduced in v1.5.1). It offers
|
||||
* an easier migration story for applications already integrated with libzstd: the
|
||||
* user application continues to invoke the same compression functions
|
||||
* ZSTD_compress2() or ZSTD_compressStream2() as usual, and transparently benefits
|
||||
* from the specific advantages of the external sequence producer. For example,
|
||||
* the sequence producer could be tuned to take advantage of known characteristics
|
||||
* of the input, to offer better speed / ratio, or could leverage hardware
|
||||
* acceleration not available within libzstd itself.
|
||||
*
|
||||
* See contrib/externalSequenceProducer for an example program employing the
|
||||
* Block-Level Sequence Producer API.
|
||||
*
|
||||
* *** USAGE ***
|
||||
* The user is responsible for implementing a function of type
|
||||
* ZSTD_sequenceProducer_F. For each block, zstd will pass the following
|
||||
* arguments to the user-provided function:
|
||||
*
|
||||
* - sequenceProducerState: a pointer to a user-managed state for the sequence
|
||||
* producer.
|
||||
*
|
||||
* - outSeqs, outSeqsCapacity: an output buffer for the sequence producer.
|
||||
* outSeqsCapacity is guaranteed >= ZSTD_sequenceBound(srcSize). The memory
|
||||
* backing outSeqs is managed by the CCtx.
|
||||
*
|
||||
* - src, srcSize: an input buffer for the sequence producer to parse.
|
||||
* srcSize is guaranteed to be <= ZSTD_BLOCKSIZE_MAX.
|
||||
*
|
||||
* - dict, dictSize: a history buffer, which may be empty, which the sequence
|
||||
* producer may reference as it parses the src buffer. Currently, zstd will
|
||||
* always pass dictSize == 0 into external sequence producers, but this will
|
||||
* change in the future.
|
||||
*
|
||||
* - compressionLevel: a signed integer representing the zstd compression level
|
||||
* set by the user for the current operation. The sequence producer may choose
|
||||
* to use this information to change its compression strategy and speed/ratio
|
||||
* tradeoff. Note: the compression level does not reflect zstd parameters set
|
||||
* through the advanced API.
|
||||
*
|
||||
* - windowSize: a size_t representing the maximum allowed offset for external
|
||||
* sequences. Note that sequence offsets are sometimes allowed to exceed the
|
||||
* windowSize if a dictionary is present, see doc/zstd_compression_format.md
|
||||
* for details.
|
||||
*
|
||||
* The user-provided function shall return a size_t representing the number of
|
||||
* sequences written to outSeqs. This return value will be treated as an error
|
||||
* code if it is greater than outSeqsCapacity. The return value must be non-zero
|
||||
* if srcSize is non-zero. The ZSTD_SEQUENCE_PRODUCER_ERROR macro is provided
|
||||
* for convenience, but any value greater than outSeqsCapacity will be treated as
|
||||
* an error code.
|
||||
*
|
||||
* If the user-provided function does not return an error code, the sequences
|
||||
* written to outSeqs must be a valid parse of the src buffer. Data corruption may
|
||||
* occur if the parse is not valid. A parse is defined to be valid if the
|
||||
* following conditions hold:
|
||||
* - The sum of matchLengths and literalLengths must equal srcSize.
|
||||
* - All sequences in the parse, except for the final sequence, must have
|
||||
* matchLength >= ZSTD_MINMATCH_MIN. The final sequence must have
|
||||
* matchLength >= ZSTD_MINMATCH_MIN or matchLength == 0.
|
||||
* - All offsets must respect the windowSize parameter as specified in
|
||||
* doc/zstd_compression_format.md.
|
||||
* - If the final sequence has matchLength == 0, it must also have offset == 0.
|
||||
*
|
||||
* zstd will only validate these conditions (and fail compression if they do not
|
||||
* hold) if the ZSTD_c_validateSequences cParam is enabled. Note that sequence
|
||||
* validation has a performance cost.
|
||||
*
|
||||
* If the user-provided function returns an error, zstd will either fall back
|
||||
* to an internal sequence producer or fail the compression operation. The user can
|
||||
* choose between the two behaviors by setting the ZSTD_c_enableSeqProducerFallback
|
||||
* cParam. Fallback compression will follow any other cParam settings, such as
|
||||
* compression level, the same as in a normal compression operation.
|
||||
*
|
||||
* The user shall instruct zstd to use a particular ZSTD_sequenceProducer_F
|
||||
* function by calling
|
||||
* ZSTD_registerSequenceProducer(cctx,
|
||||
* sequenceProducerState,
|
||||
* sequenceProducer)
|
||||
* This setting will persist until the next parameter reset of the CCtx.
|
||||
*
|
||||
* The sequenceProducerState must be initialized by the user before calling
|
||||
* ZSTD_registerSequenceProducer(). The user is responsible for destroying the
|
||||
* sequenceProducerState.
|
||||
*
|
||||
* *** LIMITATIONS ***
|
||||
* This API is compatible with all zstd compression APIs which respect advanced parameters.
|
||||
* However, there are three limitations:
|
||||
*
|
||||
* First, the ZSTD_c_enableLongDistanceMatching cParam is not currently supported.
|
||||
* COMPRESSION WILL FAIL if it is enabled and the user tries to compress with a block-level
|
||||
* external sequence producer.
|
||||
* - Note that ZSTD_c_enableLongDistanceMatching is auto-enabled by default in some
|
||||
* cases (see its documentation for details). Users must explicitly set
|
||||
* ZSTD_c_enableLongDistanceMatching to ZSTD_ps_disable in such cases if an external
|
||||
* sequence producer is registered.
|
||||
* - As of this writing, ZSTD_c_enableLongDistanceMatching is disabled by default
|
||||
* whenever ZSTD_c_windowLog < 128MB, but that's subject to change. Users should
|
||||
* check the docs on ZSTD_c_enableLongDistanceMatching whenever the Block-Level Sequence
|
||||
* Producer API is used in conjunction with advanced settings (like ZSTD_c_windowLog).
|
||||
*
|
||||
* Second, history buffers are not currently supported. Concretely, zstd will always pass
|
||||
* dictSize == 0 to the external sequence producer (for now). This has two implications:
|
||||
* - Dictionaries are not currently supported. Compression will *not* fail if the user
|
||||
* references a dictionary, but the dictionary won't have any effect.
|
||||
* - Stream history is not currently supported. All advanced compression APIs, including
|
||||
* streaming APIs, work with external sequence producers, but each block is treated as
|
||||
* an independent chunk without history from previous blocks.
|
||||
*
|
||||
* Third, multi-threading within a single compression is not currently supported. In other words,
|
||||
* COMPRESSION WILL FAIL if ZSTD_c_nbWorkers > 0 and an external sequence producer is registered.
|
||||
* Multi-threading across compressions is fine: simply create one CCtx per thread.
|
||||
*
|
||||
* Long-term, we plan to overcome all three limitations. There is no technical blocker to
|
||||
* overcoming them. It is purely a question of engineering effort.
|
||||
*/
|
||||
|
||||
#define ZSTD_SEQUENCE_PRODUCER_ERROR ((size_t)(-1))
|
||||
|
||||
typedef size_t ZSTD_sequenceProducer_F (
|
||||
void* sequenceProducerState,
|
||||
ZSTD_Sequence* outSeqs, size_t outSeqsCapacity,
|
||||
const void* src, size_t srcSize,
|
||||
const void* dict, size_t dictSize,
|
||||
int compressionLevel,
|
||||
size_t windowSize
|
||||
);
|
||||
|
||||
/*! ZSTD_registerSequenceProducer() :
|
||||
* Instruct zstd to use a block-level external sequence producer function.
|
||||
*
|
||||
* The sequenceProducerState must be initialized by the caller, and the caller is
|
||||
* responsible for managing its lifetime. This parameter is sticky across
|
||||
* compressions. It will remain set until the user explicitly resets compression
|
||||
* parameters.
|
||||
*
|
||||
* Sequence producer registration is considered to be an "advanced parameter",
|
||||
* part of the "advanced API". This means it will only have an effect on compression
|
||||
* APIs which respect advanced parameters, such as compress2() and compressStream2().
|
||||
* Older compression APIs such as compressCCtx(), which predate the introduction of
|
||||
* "advanced parameters", will ignore any external sequence producer setting.
|
||||
*
|
||||
* The sequence producer can be "cleared" by registering a NULL function pointer. This
|
||||
* removes all limitations described above in the "LIMITATIONS" section of the API docs.
|
||||
*
|
||||
* The user is strongly encouraged to read the full API documentation (above) before
|
||||
* calling this function. */
|
||||
ZSTDLIB_STATIC_API void
|
||||
ZSTD_registerSequenceProducer(
|
||||
ZSTD_CCtx* cctx,
|
||||
void* sequenceProducerState,
|
||||
ZSTD_sequenceProducer_F* sequenceProducer
|
||||
);
|
||||
|
||||
#endif /* ZSTD_H_ZSTD_STATIC_LINKING_ONLY */
|
||||
|
||||
#if defined (__cplusplus)
|
||||
|
|
35
vendor/github.com/valyala/gozstd/zstd_errors.h
generated
vendored
35
vendor/github.com/valyala/gozstd/zstd_errors.h
generated
vendored
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) Yann Collet, Facebook, Inc.
|
||||
* Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This source code is licensed under both the BSD-style license (found in the
|
||||
|
@ -20,19 +20,31 @@ extern "C" {
|
|||
|
||||
|
||||
/* ===== ZSTDERRORLIB_API : control library symbols visibility ===== */
|
||||
#ifndef ZSTDERRORLIB_VISIBILITY
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4)
|
||||
# define ZSTDERRORLIB_VISIBILITY __attribute__ ((visibility ("default")))
|
||||
#ifndef ZSTDERRORLIB_VISIBLE
|
||||
/* Backwards compatibility with old macro name */
|
||||
# ifdef ZSTDERRORLIB_VISIBILITY
|
||||
# define ZSTDERRORLIB_VISIBLE ZSTDERRORLIB_VISIBILITY
|
||||
# elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZSTDERRORLIB_VISIBLE __attribute__ ((visibility ("default")))
|
||||
# else
|
||||
# define ZSTDERRORLIB_VISIBILITY
|
||||
# define ZSTDERRORLIB_VISIBLE
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef ZSTDERRORLIB_HIDDEN
|
||||
# if defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__MINGW32__)
|
||||
# define ZSTDERRORLIB_HIDDEN __attribute__ ((visibility ("hidden")))
|
||||
# else
|
||||
# define ZSTDERRORLIB_HIDDEN
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(ZSTD_DLL_EXPORT) && (ZSTD_DLL_EXPORT==1)
|
||||
# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBILITY
|
||||
# define ZSTDERRORLIB_API __declspec(dllexport) ZSTDERRORLIB_VISIBLE
|
||||
#elif defined(ZSTD_DLL_IMPORT) && (ZSTD_DLL_IMPORT==1)
|
||||
# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
# define ZSTDERRORLIB_API __declspec(dllimport) ZSTDERRORLIB_VISIBLE /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
|
||||
#else
|
||||
# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBILITY
|
||||
# define ZSTDERRORLIB_API ZSTDERRORLIB_VISIBLE
|
||||
#endif
|
||||
|
||||
/*-*********************************************
|
||||
|
@ -58,14 +70,17 @@ typedef enum {
|
|||
ZSTD_error_frameParameter_windowTooLarge = 16,
|
||||
ZSTD_error_corruption_detected = 20,
|
||||
ZSTD_error_checksum_wrong = 22,
|
||||
ZSTD_error_literals_headerWrong = 24,
|
||||
ZSTD_error_dictionary_corrupted = 30,
|
||||
ZSTD_error_dictionary_wrong = 32,
|
||||
ZSTD_error_dictionaryCreation_failed = 34,
|
||||
ZSTD_error_parameter_unsupported = 40,
|
||||
ZSTD_error_parameter_combination_unsupported = 41,
|
||||
ZSTD_error_parameter_outOfBound = 42,
|
||||
ZSTD_error_tableLog_tooLarge = 44,
|
||||
ZSTD_error_maxSymbolValue_tooLarge = 46,
|
||||
ZSTD_error_maxSymbolValue_tooSmall = 48,
|
||||
ZSTD_error_stabilityCondition_notRespected = 50,
|
||||
ZSTD_error_stage_wrong = 60,
|
||||
ZSTD_error_init_missing = 62,
|
||||
ZSTD_error_memory_allocation = 64,
|
||||
|
@ -73,11 +88,15 @@ typedef enum {
|
|||
ZSTD_error_dstSize_tooSmall = 70,
|
||||
ZSTD_error_srcSize_wrong = 72,
|
||||
ZSTD_error_dstBuffer_null = 74,
|
||||
ZSTD_error_noForwardProgress_destFull = 80,
|
||||
ZSTD_error_noForwardProgress_inputEmpty = 82,
|
||||
/* following error codes are __NOT STABLE__, they can be removed or changed in future versions */
|
||||
ZSTD_error_frameIndex_tooLarge = 100,
|
||||
ZSTD_error_seekableIO = 102,
|
||||
ZSTD_error_dstBuffer_wrong = 104,
|
||||
ZSTD_error_srcBuffer_wrong = 105,
|
||||
ZSTD_error_sequenceProducer_failed = 106,
|
||||
ZSTD_error_externalSequences_invalid = 107,
|
||||
ZSTD_error_maxCode = 120 /* never EVER use this value directly, it can change in future versions! Use ZSTD_isError() instead */
|
||||
} ZSTD_ErrorCode;
|
||||
|
||||
|
|
10
vendor/go.uber.org/goleak/CHANGELOG.md
generated
vendored
10
vendor/go.uber.org/goleak/CHANGELOG.md
generated
vendored
|
@ -4,6 +4,12 @@ All notable changes to this project will be documented in this file.
|
|||
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [1.2.1]
|
||||
### Changed
|
||||
- Drop golang/x/lint dependency.
|
||||
|
||||
[1.2.1]: https://github.com/uber-go/goleak/compare/v1.2.0...v1.2.1
|
||||
|
||||
## [1.2.0]
|
||||
### Added
|
||||
- Add Cleanup option that can be used for registering cleanup callbacks. (#78)
|
||||
|
@ -47,5 +53,7 @@ Thanks to @denis-tingajkin for their contributions to this release.
|
|||
|
||||
[1.0.0]: https://github.com/uber-go/goleak/compare/v0.10.0...v1.0.0
|
||||
|
||||
## 0.10.0
|
||||
## [0.10.0]
|
||||
- Initial release.
|
||||
|
||||
[0.10.0]: https://github.com/uber-go/goleak/compare/v0.10.0...HEAD
|
10
vendor/go.uber.org/goleak/Makefile
generated
vendored
10
vendor/go.uber.org/goleak/Makefile
generated
vendored
|
@ -1,6 +1,6 @@
|
|||
export GOBIN ?= $(shell pwd)/bin
|
||||
|
||||
GOLINT = $(GOBIN)/golint
|
||||
REVIVE = $(GOBIN)/revive
|
||||
|
||||
GO_FILES := $(shell \
|
||||
find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
|
||||
|
@ -24,18 +24,18 @@ cover:
|
|||
go test -race -coverprofile=cover.out -coverpkg=./... ./...
|
||||
go tool cover -html=cover.out -o cover.html
|
||||
|
||||
$(GOLINT):
|
||||
go install golang.org/x/lint/golint
|
||||
$(REVIVE):
|
||||
cd tools && go install github.com/mgechev/revive
|
||||
|
||||
.PHONY: lint
|
||||
lint: $(GOLINT)
|
||||
lint: $(REVIVE)
|
||||
@rm -rf lint.log
|
||||
@echo "Checking formatting..."
|
||||
@gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
|
||||
@echo "Checking vet..."
|
||||
@go vet ./... 2>&1 | tee -a lint.log
|
||||
@echo "Checking lint..."
|
||||
@$(GOLINT) ./... 2>&1 | tee -a lint.log
|
||||
@$(REVIVE) -set_exit_status ./... 2>&1 | tee -a lint.log
|
||||
@echo "Checking for unresolved FIXMEs..."
|
||||
@git grep -i fixme | grep -v -e '^vendor/' -e '^Makefile' | tee -a lint.log
|
||||
@[ ! -s lint.log ]
|
||||
|
|
5
vendor/go.uber.org/goleak/README.md
generated
vendored
5
vendor/go.uber.org/goleak/README.md
generated
vendored
|
@ -8,7 +8,9 @@ You can use `go get` to get the latest version:
|
|||
|
||||
`go get -u go.uber.org/goleak`
|
||||
|
||||
`goleak` also supports semver releases. It is compatible with Go 1.5+.
|
||||
`goleak` also supports semver releases.
|
||||
|
||||
Note that go-leak only [supports][release] the two most recent minor versions of Go.
|
||||
|
||||
## Quick Start
|
||||
|
||||
|
@ -69,3 +71,4 @@ No breaking changes will be made to exported APIs before 2.0.
|
|||
[ci]: https://github.com/uber-go/goleak/actions/workflows/go.yml
|
||||
[cov-img]: https://codecov.io/gh/uber-go/goleak/branch/master/graph/badge.svg
|
||||
[cov]: https://codecov.io/gh/uber-go/goleak
|
||||
[release]: https://go.dev/doc/devel/release#policy
|
||||
|
|
22
vendor/go.uber.org/goleak/internal/stack/doc.go
generated
vendored
Normal file
22
vendor/go.uber.org/goleak/internal/stack/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
// Copyright (c) 2017-2023 Uber Technologies, Inc.
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included in
|
||||
// all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
// THE SOFTWARE.
|
||||
|
||||
// Package stack is used for parsing stacks from `runtime.Stack`.
|
||||
package stack
|
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
|
@ -662,6 +662,15 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
|
|||
// It is the caller's responsibility not to violate the maximum frame size
|
||||
// and to not call other Write methods concurrently.
|
||||
func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||
if err := f.startWriteDataPadded(streamID, endStream, data, pad); err != nil {
|
||||
return err
|
||||
}
|
||||
return f.endWrite()
|
||||
}
|
||||
|
||||
// startWriteDataPadded is WriteDataPadded, but only writes the frame to the Framer's internal buffer.
|
||||
// The caller should call endWrite to flush the frame to the underlying writer.
|
||||
func (f *Framer) startWriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||
if !validStreamID(streamID) && !f.AllowIllegalWrites {
|
||||
return errStreamID
|
||||
}
|
||||
|
@ -691,7 +700,7 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by
|
|||
}
|
||||
f.wbuf = append(f.wbuf, data...)
|
||||
f.wbuf = append(f.wbuf, pad...)
|
||||
return f.endWrite()
|
||||
return nil
|
||||
}
|
||||
|
||||
// A SettingsFrame conveys configuration parameters that affect how
|
||||
|
|
85
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
85
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
|
@ -359,6 +359,7 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
|||
|
||||
var hf HeaderField
|
||||
wantStr := d.emitEnabled || it.indexed()
|
||||
var undecodedName undecodedString
|
||||
if nameIdx > 0 {
|
||||
ihf, ok := d.at(nameIdx)
|
||||
if !ok {
|
||||
|
@ -366,15 +367,27 @@ func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
|
|||
}
|
||||
hf.Name = ihf.Name
|
||||
} else {
|
||||
hf.Name, buf, err = d.readString(buf, wantStr)
|
||||
undecodedName, buf, err = d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, buf, err = d.readString(buf, wantStr)
|
||||
undecodedValue, buf, err := d.readString(buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if wantStr {
|
||||
if nameIdx <= 0 {
|
||||
hf.Name, err = d.decodeString(undecodedName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
hf.Value, err = d.decodeString(undecodedValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
d.buf = buf
|
||||
if it.indexed() {
|
||||
d.dynTab.add(hf)
|
||||
|
@ -459,46 +472,52 @@ func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
|
|||
return 0, origP, errNeedMore
|
||||
}
|
||||
|
||||
// readString decodes an hpack string from p.
|
||||
// readString reads an hpack string from p.
|
||||
//
|
||||
// wantStr is whether s will be used. If false, decompression and
|
||||
// []byte->string garbage are skipped if s will be ignored
|
||||
// anyway. This does mean that huffman decoding errors for non-indexed
|
||||
// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
|
||||
// is returning an error anyway, and because they're not indexed, the error
|
||||
// won't affect the decoding state.
|
||||
func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
|
||||
// It returns a reference to the encoded string data to permit deferring decode costs
|
||||
// until after the caller verifies all data is present.
|
||||
func (d *Decoder) readString(p []byte) (u undecodedString, remain []byte, err error) {
|
||||
if len(p) == 0 {
|
||||
return "", p, errNeedMore
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
isHuff := p[0]&128 != 0
|
||||
strLen, p, err := readVarInt(7, p)
|
||||
if err != nil {
|
||||
return "", p, err
|
||||
return u, p, err
|
||||
}
|
||||
if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
|
||||
return "", nil, ErrStringLength
|
||||
// Returning an error here means Huffman decoding errors
|
||||
// for non-indexed strings past the maximum string length
|
||||
// are ignored, but the server is returning an error anyway
|
||||
// and because the string is not indexed the error will not
|
||||
// affect the decoding state.
|
||||
return u, nil, ErrStringLength
|
||||
}
|
||||
if uint64(len(p)) < strLen {
|
||||
return "", p, errNeedMore
|
||||
return u, p, errNeedMore
|
||||
}
|
||||
if !isHuff {
|
||||
if wantStr {
|
||||
s = string(p[:strLen])
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
}
|
||||
|
||||
if wantStr {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
defer bufPool.Put(buf)
|
||||
if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
|
||||
buf.Reset()
|
||||
return "", nil, err
|
||||
}
|
||||
s = buf.String()
|
||||
buf.Reset() // be nice to GC
|
||||
}
|
||||
return s, p[strLen:], nil
|
||||
u.isHuff = isHuff
|
||||
u.b = p[:strLen]
|
||||
return u, p[strLen:], nil
|
||||
}
|
||||
|
||||
type undecodedString struct {
|
||||
isHuff bool
|
||||
b []byte
|
||||
}
|
||||
|
||||
func (d *Decoder) decodeString(u undecodedString) (string, error) {
|
||||
if !u.isHuff {
|
||||
return string(u.b), nil
|
||||
}
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Reset() // don't trust others
|
||||
var s string
|
||||
err := huffmanDecode(buf, d.maxStrLen, u.b)
|
||||
if err == nil {
|
||||
s = buf.String()
|
||||
}
|
||||
buf.Reset() // be nice to GC
|
||||
bufPool.Put(buf)
|
||||
return s, err
|
||||
}
|
||||
|
|
18
vendor/golang.org/x/net/http2/server.go
generated
vendored
18
vendor/golang.org/x/net/http2/server.go
generated
vendored
|
@ -843,8 +843,13 @@ type frameWriteResult struct {
|
|||
// and then reports when it's done.
|
||||
// At most one goroutine can be running writeFrameAsync at a time per
|
||||
// serverConn.
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
|
||||
err := wr.write.writeFrame(sc)
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
|
||||
var err error
|
||||
if wd == nil {
|
||||
err = wr.write.writeFrame(sc)
|
||||
} else {
|
||||
err = sc.framer.endWrite()
|
||||
}
|
||||
sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
|
||||
}
|
||||
|
||||
|
@ -1251,9 +1256,16 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
|
|||
sc.writingFrameAsync = false
|
||||
err := wr.write.writeFrame(sc)
|
||||
sc.wroteFrame(frameWriteResult{wr: wr, err: err})
|
||||
} else if wd, ok := wr.write.(*writeData); ok {
|
||||
// Encode the frame in the serve goroutine, to ensure we don't have
|
||||
// any lingering asynchronous references to data passed to Write.
|
||||
// See https://go.dev/issue/58446.
|
||||
sc.framer.startWriteDataPadded(wd.streamID, wd.endStream, wd.p, nil)
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr, wd)
|
||||
} else {
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr)
|
||||
go sc.writeFrameAsync(wr, nil)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
3
vendor/google.golang.org/api/internal/gensupport/resumable.go
generated
vendored
3
vendor/google.golang.org/api/internal/gensupport/resumable.go
generated
vendored
|
@ -209,7 +209,6 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
|
|||
}
|
||||
return prepareReturn(resp, err)
|
||||
case <-pauseTimer.C:
|
||||
quitAfterTimer.Stop()
|
||||
case <-quitAfterTimer.C:
|
||||
pauseTimer.Stop()
|
||||
return prepareReturn(resp, err)
|
||||
|
@ -231,7 +230,6 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
|
|||
case <-quitAfterTimer.C:
|
||||
return prepareReturn(resp, err)
|
||||
default:
|
||||
quitAfterTimer.Stop()
|
||||
}
|
||||
|
||||
resp, err = rx.transferChunk(ctx)
|
||||
|
@ -243,6 +241,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
|
|||
|
||||
// Check if we should retry the request.
|
||||
if !errorFunc(status, err) {
|
||||
quitAfterTimer.Stop()
|
||||
break
|
||||
}
|
||||
|
||||
|
|
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
|
@ -5,4 +5,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "0.109.0"
|
||||
const Version = "0.110.0"
|
||||
|
|
4
vendor/google.golang.org/api/transport/cert/enterprise_cert.go
generated
vendored
4
vendor/google.golang.org/api/transport/cert/enterprise_cert.go
generated
vendored
|
@ -15,7 +15,6 @@ package cert
|
|||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/googleapis/enterprise-certificate-proxy/client"
|
||||
)
|
||||
|
@ -36,8 +35,7 @@ type ecpSource struct {
|
|||
func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) {
|
||||
key, err := client.Cred(configFilePath)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// Config file missing means Enterprise Certificate Proxy is not supported.
|
||||
if errors.Is(err, client.ErrCredUnavailable) {
|
||||
return nil, errSourceUnavailable
|
||||
}
|
||||
return nil, err
|
||||
|
|
26
vendor/google.golang.org/api/transport/http/configure_http2_go116.go
generated
vendored
26
vendor/google.golang.org/api/transport/http/configure_http2_go116.go
generated
vendored
|
@ -1,26 +0,0 @@
|
|||
// Copyright 2021 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.16
|
||||
// +build go1.16
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the
|
||||
// transport. This allows broken idle connections to be pruned more quickly,
|
||||
// preventing the client from attempting to re-use connections that will no
|
||||
// longer work.
|
||||
func configureHTTP2(trans *http.Transport) {
|
||||
http2Trans, err := http2.ConfigureTransports(trans)
|
||||
if err == nil {
|
||||
http2Trans.ReadIdleTimeout = time.Second * 31
|
||||
}
|
||||
}
|
17
vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go
generated
vendored
17
vendor/google.golang.org/api/transport/http/configure_http2_not_go116.go
generated
vendored
|
@ -1,17 +0,0 @@
|
|||
// Copyright 2021 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.16
|
||||
// +build !go1.16
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the
|
||||
// transport. The interface to do this is only available in Go 1.16 and up, so
|
||||
// this performs a no-op.
|
||||
func configureHTTP2(trans *http.Transport) {}
|
14
vendor/google.golang.org/api/transport/http/dial.go
generated
vendored
14
vendor/google.golang.org/api/transport/http/dial.go
generated
vendored
|
@ -16,6 +16,7 @@ import (
|
|||
"time"
|
||||
|
||||
"go.opencensus.io/plugin/ochttp"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/api/googleapi/transport"
|
||||
"google.golang.org/api/internal"
|
||||
|
@ -175,13 +176,22 @@ func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) htt
|
|||
}
|
||||
}
|
||||
|
||||
// If possible, configure http2 transport in order to use ReadIdleTimeout
|
||||
// setting. This can only be done in Go 1.16 and up.
|
||||
configureHTTP2(trans)
|
||||
|
||||
return trans
|
||||
}
|
||||
|
||||
// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the
|
||||
// transport. This allows broken idle connections to be pruned more quickly,
|
||||
// preventing the client from attempting to re-use connections that will no
|
||||
// longer work.
|
||||
func configureHTTP2(trans *http.Transport) {
|
||||
http2Trans, err := http2.ConfigureTransports(trans)
|
||||
if err == nil {
|
||||
http2Trans.ReadIdleTimeout = time.Second * 31
|
||||
}
|
||||
}
|
||||
|
||||
// fallbackBaseTransport is used in <go1.13 as well as in the rare case if
|
||||
// http.DefaultTransport has been reassigned something that's not a
|
||||
// *http.Transport.
|
||||
|
|
38
vendor/modules.txt
vendored
38
vendor/modules.txt
vendored
|
@ -1,4 +1,4 @@
|
|||
# cloud.google.com/go v0.109.0
|
||||
# cloud.google.com/go v0.110.0
|
||||
## explicit; go 1.19
|
||||
cloud.google.com/go/internal
|
||||
cloud.google.com/go/internal/optional
|
||||
|
@ -10,7 +10,7 @@ cloud.google.com/go/compute/internal
|
|||
# cloud.google.com/go/compute/metadata v0.2.3
|
||||
## explicit; go 1.19
|
||||
cloud.google.com/go/compute/metadata
|
||||
# cloud.google.com/go/iam v0.10.0
|
||||
# cloud.google.com/go/iam v0.12.0
|
||||
## explicit; go 1.19
|
||||
cloud.google.com/go/iam
|
||||
cloud.google.com/go/iam/apiv1/iampb
|
||||
|
@ -71,7 +71,7 @@ github.com/VictoriaMetrics/fasthttp/stackless
|
|||
# github.com/VictoriaMetrics/metrics v1.23.1
|
||||
## explicit; go 1.15
|
||||
github.com/VictoriaMetrics/metrics
|
||||
# github.com/VictoriaMetrics/metricsql v0.53.0
|
||||
# github.com/VictoriaMetrics/metricsql v0.56.1
|
||||
## explicit; go 1.13
|
||||
github.com/VictoriaMetrics/metricsql
|
||||
github.com/VictoriaMetrics/metricsql/binaryop
|
||||
|
@ -81,7 +81,7 @@ github.com/VividCortex/ewma
|
|||
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
|
||||
## explicit; go 1.15
|
||||
github.com/alecthomas/units
|
||||
# github.com/aws/aws-sdk-go v1.44.198
|
||||
# github.com/aws/aws-sdk-go v1.44.204
|
||||
## explicit; go 1.11
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/awserr
|
||||
|
@ -150,10 +150,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv
|
|||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi
|
||||
# github.com/aws/aws-sdk-go-v2/config v1.18.12
|
||||
# github.com/aws/aws-sdk-go-v2/config v1.18.13
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/config
|
||||
# github.com/aws/aws-sdk-go-v2/credentials v1.13.12
|
||||
# github.com/aws/aws-sdk-go-v2/credentials v1.13.13
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/credentials
|
||||
github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
|
||||
|
@ -166,7 +166,7 @@ github.com/aws/aws-sdk-go-v2/credentials/stscreds
|
|||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config
|
||||
# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.51
|
||||
# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.53
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager
|
||||
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.28
|
||||
|
@ -178,7 +178,7 @@ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2
|
|||
# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.29
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini
|
||||
# github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.19
|
||||
# github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.20
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto
|
||||
|
@ -197,19 +197,19 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url
|
|||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config
|
||||
# github.com/aws/aws-sdk-go-v2/service/s3 v1.30.2
|
||||
# github.com/aws/aws-sdk-go-v2/service/s3 v1.30.3
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/service/s3
|
||||
github.com/aws/aws-sdk-go-v2/service/s3/internal/arn
|
||||
github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations
|
||||
github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints
|
||||
github.com/aws/aws-sdk-go-v2/service/s3/types
|
||||
# github.com/aws/aws-sdk-go-v2/service/sso v1.12.1
|
||||
# github.com/aws/aws-sdk-go-v2/service/sso v1.12.2
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/service/sso
|
||||
github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints
|
||||
github.com/aws/aws-sdk-go-v2/service/sso/types
|
||||
# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.1
|
||||
# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.14.2
|
||||
## explicit; go 1.15
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints
|
||||
|
@ -313,7 +313,7 @@ github.com/google/go-cmp/cmp/internal/value
|
|||
# github.com/google/uuid v1.3.0
|
||||
## explicit
|
||||
github.com/google/uuid
|
||||
# github.com/googleapis/enterprise-certificate-proxy v0.2.2
|
||||
# github.com/googleapis/enterprise-certificate-proxy v0.2.3
|
||||
## explicit; go 1.19
|
||||
github.com/googleapis/enterprise-certificate-proxy/client
|
||||
github.com/googleapis/enterprise-certificate-proxy/client/util
|
||||
|
@ -445,7 +445,7 @@ github.com/russross/blackfriday/v2
|
|||
## explicit; go 1.13
|
||||
github.com/stretchr/testify/assert
|
||||
github.com/stretchr/testify/require
|
||||
# github.com/urfave/cli/v2 v2.24.3
|
||||
# github.com/urfave/cli/v2 v2.24.4
|
||||
## explicit; go 1.18
|
||||
github.com/urfave/cli/v2
|
||||
# github.com/valyala/bytebufferpool v1.0.0
|
||||
|
@ -461,7 +461,7 @@ github.com/valyala/fastrand
|
|||
# github.com/valyala/fasttemplate v1.2.2
|
||||
## explicit; go 1.12
|
||||
github.com/valyala/fasttemplate
|
||||
# github.com/valyala/gozstd v1.17.0
|
||||
# github.com/valyala/gozstd v1.18.0
|
||||
## explicit; go 1.12
|
||||
github.com/valyala/gozstd
|
||||
# github.com/valyala/histogram v1.2.0
|
||||
|
@ -522,15 +522,15 @@ go.opentelemetry.io/otel/trace
|
|||
# go.uber.org/atomic v1.10.0
|
||||
## explicit; go 1.18
|
||||
go.uber.org/atomic
|
||||
# go.uber.org/goleak v1.2.0
|
||||
# go.uber.org/goleak v1.2.1
|
||||
## explicit; go 1.18
|
||||
go.uber.org/goleak
|
||||
go.uber.org/goleak/internal/stack
|
||||
# golang.org/x/exp v0.0.0-20230206171751-46f607a40771
|
||||
# golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
|
||||
## explicit; go 1.18
|
||||
golang.org/x/exp/constraints
|
||||
golang.org/x/exp/slices
|
||||
# golang.org/x/net v0.6.0
|
||||
# golang.org/x/net v0.7.0
|
||||
## explicit; go 1.17
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/http/httpguts
|
||||
|
@ -572,7 +572,7 @@ golang.org/x/time/rate
|
|||
## explicit; go 1.17
|
||||
golang.org/x/xerrors
|
||||
golang.org/x/xerrors/internal
|
||||
# google.golang.org/api v0.109.0
|
||||
# google.golang.org/api v0.110.0
|
||||
## explicit; go 1.19
|
||||
google.golang.org/api/googleapi
|
||||
google.golang.org/api/googleapi/transport
|
||||
|
@ -605,7 +605,7 @@ google.golang.org/appengine/internal/socket
|
|||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/socket
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc
|
||||
# google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44
|
||||
## explicit; go 1.19
|
||||
google.golang.org/genproto/googleapis/api
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
|
|
Loading…
Reference in a new issue