mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-01 14:47:38 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
a72dadb8f4
129 changed files with 16242 additions and 25865 deletions
|
@ -187,18 +187,28 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64) (
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Metric{}, fmt.Errorf("failed to send GET request to %q: %s", q, err)
|
return Metric{}, fmt.Errorf("failed to send GET request to %q: %s", q, err)
|
||||||
}
|
}
|
||||||
|
/*
|
||||||
|
There are three potential failures here, none of which should kill the entire
|
||||||
|
migration run:
|
||||||
|
1. bad response code
|
||||||
|
2. failure to read response body
|
||||||
|
3. bad format of response body
|
||||||
|
*/
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
return Metric{}, fmt.Errorf("Bad return from OpenTSDB: %q: %v", resp.StatusCode, resp)
|
log.Println(fmt.Sprintf("bad response code from OpenTSDB query %v...skipping", resp.StatusCode))
|
||||||
|
return Metric{}, nil
|
||||||
}
|
}
|
||||||
defer func() { _ = resp.Body.Close() }()
|
defer func() { _ = resp.Body.Close() }()
|
||||||
body, err := ioutil.ReadAll(resp.Body)
|
body, err := ioutil.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Metric{}, fmt.Errorf("could not retrieve series data from %q: %s", q, err)
|
log.Println("couldn't read response body from OpenTSDB query...skipping")
|
||||||
|
return Metric{}, nil
|
||||||
}
|
}
|
||||||
var output []OtsdbMetric
|
var output []OtsdbMetric
|
||||||
err = json.Unmarshal(body, &output)
|
err = json.Unmarshal(body, &output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Metric{}, fmt.Errorf("failed to unmarshal response from %q [%v]: %s", q, body, err)
|
log.Println(fmt.Sprintf("couldn't marshall response body from OpenTSDB query (%s)...skipping", body))
|
||||||
|
return Metric{}, nil
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
We expect results to look like:
|
We expect results to look like:
|
||||||
|
@ -227,6 +237,8 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64) (
|
||||||
An empty array doesn't cast to a OtsdbMetric struct well, and there's no reason to try, so we should just skip it
|
An empty array doesn't cast to a OtsdbMetric struct well, and there's no reason to try, so we should just skip it
|
||||||
Because we're trying to migrate data without transformations, seeing aggregate tags could mean
|
Because we're trying to migrate data without transformations, seeing aggregate tags could mean
|
||||||
we're dropping series on the floor.
|
we're dropping series on the floor.
|
||||||
|
|
||||||
|
In all "bad" cases, we don't end the migration, we just don't process that particular message
|
||||||
*/
|
*/
|
||||||
if len(output) < 1 {
|
if len(output) < 1 {
|
||||||
// no results returned...return an empty object without error
|
// no results returned...return an empty object without error
|
||||||
|
@ -234,11 +246,11 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64) (
|
||||||
}
|
}
|
||||||
if len(output) > 1 {
|
if len(output) > 1 {
|
||||||
// multiple series returned for a single query. We can't process this right, so...
|
// multiple series returned for a single query. We can't process this right, so...
|
||||||
return Metric{}, fmt.Errorf("Query returned multiple results: %v", output)
|
return Metric{}, nil
|
||||||
}
|
}
|
||||||
if len(output[0].AggregateTags) > 0 {
|
if len(output[0].AggregateTags) > 0 {
|
||||||
// This failure means we've suppressed potential series somehow...
|
// This failure means we've suppressed potential series somehow...
|
||||||
return Metric{}, fmt.Errorf("Query somehow has aggregate tags: %v", output[0].AggregateTags)
|
return Metric{}, nil
|
||||||
}
|
}
|
||||||
data := Metric{}
|
data := Metric{}
|
||||||
data.Metric = output[0].Metric
|
data.Metric = output[0].Metric
|
||||||
|
@ -249,7 +261,7 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64) (
|
||||||
*/
|
*/
|
||||||
data, err = modifyData(data, c.Normalize)
|
data, err = modifyData(data, c.Normalize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Metric{}, fmt.Errorf("invalid series data from %q: %s", q, err)
|
return Metric{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -6270,6 +6270,22 @@ func TestExecSuccess(t *testing.T) {
|
||||||
resultExpected := []netstorage.Result{r}
|
resultExpected := []netstorage.Result{r}
|
||||||
f(q, resultExpected)
|
f(q, resultExpected)
|
||||||
})
|
})
|
||||||
|
t.Run(`increase_prometheus(time())`, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
q := `increase_prometheus(time())`
|
||||||
|
f(q, nil)
|
||||||
|
})
|
||||||
|
t.Run(`increase_prometheus(time()[201s])`, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
q := `increase_prometheus(time()[201s])`
|
||||||
|
r := netstorage.Result{
|
||||||
|
MetricName: metricNameExpected,
|
||||||
|
Values: []float64{200, 200, 200, 200, 200, 200},
|
||||||
|
Timestamps: timestampsExpected,
|
||||||
|
}
|
||||||
|
resultExpected := []netstorage.Result{r}
|
||||||
|
f(q, resultExpected)
|
||||||
|
})
|
||||||
t.Run(`running_max(1)`, func(t *testing.T) {
|
t.Run(`running_max(1)`, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
q := `running_max(1)`
|
q := `running_max(1)`
|
||||||
|
@ -6512,6 +6528,22 @@ func TestExecSuccess(t *testing.T) {
|
||||||
resultExpected := []netstorage.Result{r}
|
resultExpected := []netstorage.Result{r}
|
||||||
f(q, resultExpected)
|
f(q, resultExpected)
|
||||||
})
|
})
|
||||||
|
t.Run(`delta_prometheus(time())`, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
q := `delta_prometheus(time())`
|
||||||
|
f(q, nil)
|
||||||
|
})
|
||||||
|
t.Run(`delta_prometheus(time()[201s])`, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
q := `delta_prometheus(time()[201s])`
|
||||||
|
r := netstorage.Result{
|
||||||
|
MetricName: metricNameExpected,
|
||||||
|
Values: []float64{200, 200, 200, 200, 200, 200},
|
||||||
|
Timestamps: timestampsExpected,
|
||||||
|
}
|
||||||
|
resultExpected := []netstorage.Result{r}
|
||||||
|
f(q, resultExpected)
|
||||||
|
})
|
||||||
t.Run(`median_over_time("foo")`, func(t *testing.T) {
|
t.Run(`median_over_time("foo")`, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
q := `median_over_time("foo")`
|
q := `median_over_time("foo")`
|
||||||
|
@ -7503,6 +7535,12 @@ func TestExecError(t *testing.T) {
|
||||||
f(`bitmap_xor()`)
|
f(`bitmap_xor()`)
|
||||||
f(`quantiles()`)
|
f(`quantiles()`)
|
||||||
f(`limit_offset()`)
|
f(`limit_offset()`)
|
||||||
|
f(`increase()`)
|
||||||
|
f(`increase_prometheus()`)
|
||||||
|
f(`changes()`)
|
||||||
|
f(`changes_prometheus()`)
|
||||||
|
f(`delta()`)
|
||||||
|
f(`delta_prometheus()`)
|
||||||
|
|
||||||
// Invalid argument type
|
// Invalid argument type
|
||||||
f(`median_over_time({}, 2)`)
|
f(`median_over_time({}, 2)`)
|
||||||
|
|
|
@ -24,6 +24,7 @@ var rollupFuncs = map[string]newRollupFunc{
|
||||||
"ascent_over_time": newRollupFuncOneArg(rollupAscentOverTime),
|
"ascent_over_time": newRollupFuncOneArg(rollupAscentOverTime),
|
||||||
"avg_over_time": newRollupFuncOneArg(rollupAvg),
|
"avg_over_time": newRollupFuncOneArg(rollupAvg),
|
||||||
"changes": newRollupFuncOneArg(rollupChanges),
|
"changes": newRollupFuncOneArg(rollupChanges),
|
||||||
|
"changes_prometheus": newRollupFuncOneArg(rollupChangesPrometheus),
|
||||||
"count_eq_over_time": newRollupCountEQ,
|
"count_eq_over_time": newRollupCountEQ,
|
||||||
"count_gt_over_time": newRollupCountGT,
|
"count_gt_over_time": newRollupCountGT,
|
||||||
"count_le_over_time": newRollupCountLE,
|
"count_le_over_time": newRollupCountLE,
|
||||||
|
@ -32,6 +33,7 @@ var rollupFuncs = map[string]newRollupFunc{
|
||||||
"decreases_over_time": newRollupFuncOneArg(rollupDecreases),
|
"decreases_over_time": newRollupFuncOneArg(rollupDecreases),
|
||||||
"default_rollup": newRollupFuncOneArg(rollupDefault), // default rollup func
|
"default_rollup": newRollupFuncOneArg(rollupDefault), // default rollup func
|
||||||
"delta": newRollupFuncOneArg(rollupDelta),
|
"delta": newRollupFuncOneArg(rollupDelta),
|
||||||
|
"delta_prometheus": newRollupFuncOneArg(rollupDeltaPrometheus),
|
||||||
"deriv": newRollupFuncOneArg(rollupDerivSlow),
|
"deriv": newRollupFuncOneArg(rollupDerivSlow),
|
||||||
"deriv_fast": newRollupFuncOneArg(rollupDerivFast),
|
"deriv_fast": newRollupFuncOneArg(rollupDerivFast),
|
||||||
"descent_over_time": newRollupFuncOneArg(rollupDescentOverTime),
|
"descent_over_time": newRollupFuncOneArg(rollupDescentOverTime),
|
||||||
|
@ -46,6 +48,7 @@ var rollupFuncs = map[string]newRollupFunc{
|
||||||
"idelta": newRollupFuncOneArg(rollupIdelta),
|
"idelta": newRollupFuncOneArg(rollupIdelta),
|
||||||
"ideriv": newRollupFuncOneArg(rollupIderiv),
|
"ideriv": newRollupFuncOneArg(rollupIderiv),
|
||||||
"increase": newRollupFuncOneArg(rollupDelta), // + rollupFuncsRemoveCounterResets
|
"increase": newRollupFuncOneArg(rollupDelta), // + rollupFuncsRemoveCounterResets
|
||||||
|
"increase_prometheus": newRollupFuncOneArg(rollupDeltaPrometheus), // + rollupFuncsRemoveCounterResets
|
||||||
"increase_pure": newRollupFuncOneArg(rollupIncreasePure), // + rollupFuncsRemoveCounterResets
|
"increase_pure": newRollupFuncOneArg(rollupIncreasePure), // + rollupFuncsRemoveCounterResets
|
||||||
"increases_over_time": newRollupFuncOneArg(rollupIncreases),
|
"increases_over_time": newRollupFuncOneArg(rollupIncreases),
|
||||||
"integrate": newRollupFuncOneArg(rollupIntegrate),
|
"integrate": newRollupFuncOneArg(rollupIntegrate),
|
||||||
|
@ -162,6 +165,7 @@ var rollupFuncsCanAdjustWindow = map[string]bool{
|
||||||
|
|
||||||
var rollupFuncsRemoveCounterResets = map[string]bool{
|
var rollupFuncsRemoveCounterResets = map[string]bool{
|
||||||
"increase": true,
|
"increase": true,
|
||||||
|
"increase_prometheus": true,
|
||||||
"increase_pure": true,
|
"increase_pure": true,
|
||||||
"irate": true,
|
"irate": true,
|
||||||
"rate": true,
|
"rate": true,
|
||||||
|
@ -1485,6 +1489,18 @@ func rollupDelta(rfa *rollupFuncArg) float64 {
|
||||||
return values[len(values)-1] - prevValue
|
return values[len(values)-1] - prevValue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func rollupDeltaPrometheus(rfa *rollupFuncArg) float64 {
|
||||||
|
// There is no need in handling NaNs here, since they must be cleaned up
|
||||||
|
// before calling rollup funcs.
|
||||||
|
values := rfa.values
|
||||||
|
// Just return the difference between the last and the first sample like Prometheus does.
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1962
|
||||||
|
if len(values) < 2 {
|
||||||
|
return nan
|
||||||
|
}
|
||||||
|
return values[len(values)-1] - values[0]
|
||||||
|
}
|
||||||
|
|
||||||
func rollupIdelta(rfa *rollupFuncArg) float64 {
|
func rollupIdelta(rfa *rollupFuncArg) float64 {
|
||||||
// There is no need in handling NaNs here, since they must be cleaned up
|
// There is no need in handling NaNs here, since they must be cleaned up
|
||||||
// before calling rollup funcs.
|
// before calling rollup funcs.
|
||||||
|
@ -1644,6 +1660,26 @@ func rollupScrapeInterval(rfa *rollupFuncArg) float64 {
|
||||||
return (float64(timestamps[len(timestamps)-1]-rfa.prevTimestamp) / 1e3) / float64(len(timestamps))
|
return (float64(timestamps[len(timestamps)-1]-rfa.prevTimestamp) / 1e3) / float64(len(timestamps))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func rollupChangesPrometheus(rfa *rollupFuncArg) float64 {
|
||||||
|
// There is no need in handling NaNs here, since they must be cleaned up
|
||||||
|
// before calling rollup funcs.
|
||||||
|
values := rfa.values
|
||||||
|
// Do not take into account rfa.prevValue like Prometheus does.
|
||||||
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1962
|
||||||
|
if len(values) < 1 {
|
||||||
|
return nan
|
||||||
|
}
|
||||||
|
prevValue := values[0]
|
||||||
|
n := 0
|
||||||
|
for _, v := range values[1:] {
|
||||||
|
if v != prevValue {
|
||||||
|
n++
|
||||||
|
prevValue = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return float64(n)
|
||||||
|
}
|
||||||
|
|
||||||
func rollupChanges(rfa *rollupFuncArg) float64 {
|
func rollupChanges(rfa *rollupFuncArg) float64 {
|
||||||
// There is no need in handling NaNs here, since they must be cleaned up
|
// There is no need in handling NaNs here, since they must be cleaned up
|
||||||
// before calling rollup funcs.
|
// before calling rollup funcs.
|
||||||
|
|
|
@ -490,11 +490,14 @@ func TestRollupNewRollupFuncSuccess(t *testing.T) {
|
||||||
|
|
||||||
f("default_rollup", 34)
|
f("default_rollup", 34)
|
||||||
f("changes", 11)
|
f("changes", 11)
|
||||||
|
f("changes_prometheus", 10)
|
||||||
f("delta", 34)
|
f("delta", 34)
|
||||||
|
f("delta_prometheus", -89)
|
||||||
f("deriv", -266.85860231406093)
|
f("deriv", -266.85860231406093)
|
||||||
f("deriv_fast", -712)
|
f("deriv_fast", -712)
|
||||||
f("idelta", 0)
|
f("idelta", 0)
|
||||||
f("increase", 398)
|
f("increase", 398)
|
||||||
|
f("increase_prometheus", 275)
|
||||||
f("irate", 0)
|
f("irate", 0)
|
||||||
f("rate", 2200)
|
f("rate", 2200)
|
||||||
f("resets", 5)
|
f("resets", 5)
|
||||||
|
@ -851,6 +854,20 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||||
})
|
})
|
||||||
|
t.Run("delta_prometheus", func(t *testing.T) {
|
||||||
|
rc := rollupConfig{
|
||||||
|
Func: rollupDeltaPrometheus,
|
||||||
|
Start: 0,
|
||||||
|
End: 160,
|
||||||
|
Step: 40,
|
||||||
|
Window: 0,
|
||||||
|
}
|
||||||
|
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||||
|
values := rc.Do(nil, testValues, testTimestamps)
|
||||||
|
valuesExpected := []float64{nan, -102, -42, -10, nan}
|
||||||
|
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||||
|
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||||
|
})
|
||||||
t.Run("idelta", func(t *testing.T) {
|
t.Run("idelta", func(t *testing.T) {
|
||||||
rc := rollupConfig{
|
rc := rollupConfig{
|
||||||
Func: rollupIdelta,
|
Func: rollupIdelta,
|
||||||
|
@ -949,6 +966,20 @@ func TestRollupFuncsNoWindow(t *testing.T) {
|
||||||
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||||
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||||
})
|
})
|
||||||
|
t.Run("changes_prometheus", func(t *testing.T) {
|
||||||
|
rc := rollupConfig{
|
||||||
|
Func: rollupChangesPrometheus,
|
||||||
|
Start: 0,
|
||||||
|
End: 160,
|
||||||
|
Step: 40,
|
||||||
|
Window: 0,
|
||||||
|
}
|
||||||
|
rc.Timestamps = getTimestamps(rc.Start, rc.End, rc.Step)
|
||||||
|
values := rc.Do(nil, testValues, testTimestamps)
|
||||||
|
valuesExpected := []float64{nan, 3, 3, 2, 0}
|
||||||
|
timestampsExpected := []int64{0, 40, 80, 120, 160}
|
||||||
|
testRowsEqual(t, values, rc.Timestamps, valuesExpected, timestampsExpected)
|
||||||
|
})
|
||||||
t.Run("changes_small_window", func(t *testing.T) {
|
t.Run("changes_small_window", func(t *testing.T) {
|
||||||
rc := rollupConfig{
|
rc := rollupConfig{
|
||||||
Func: rollupChanges,
|
Func: rollupChanges,
|
||||||
|
|
|
@ -1,19 +1,12 @@
|
||||||
{
|
{
|
||||||
"files": {
|
"files": {
|
||||||
"main.css": "./static/css/main.83d9ae2d.chunk.css",
|
"main.css": "./static/css/main.a33903a8.css",
|
||||||
"main.js": "./static/js/main.fa15a895.chunk.js",
|
"main.js": "./static/js/main.4305bd17.js",
|
||||||
"runtime-main.js": "./static/js/runtime-main.c4b656b8.js",
|
"static/js/27.85f0e2b0.chunk.js": "./static/js/27.85f0e2b0.chunk.js",
|
||||||
"static/css/2.77671664.chunk.css": "./static/css/2.77671664.chunk.css",
|
"index.html": "./index.html"
|
||||||
"static/js/2.ef1db8c8.chunk.js": "./static/js/2.ef1db8c8.chunk.js",
|
|
||||||
"static/js/3.65648506.chunk.js": "./static/js/3.65648506.chunk.js",
|
|
||||||
"index.html": "./index.html",
|
|
||||||
"static/js/2.ef1db8c8.chunk.js.LICENSE.txt": "./static/js/2.ef1db8c8.chunk.js.LICENSE.txt"
|
|
||||||
},
|
},
|
||||||
"entrypoints": [
|
"entrypoints": [
|
||||||
"static/js/runtime-main.c4b656b8.js",
|
"static/css/main.a33903a8.css",
|
||||||
"static/css/2.77671664.chunk.css",
|
"static/js/main.4305bd17.js"
|
||||||
"static/js/2.ef1db8c8.chunk.js",
|
|
||||||
"static/css/main.83d9ae2d.chunk.css",
|
|
||||||
"static/js/main.fa15a895.chunk.js"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
|
@ -1 +1 @@
|
||||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><link href="./static/css/2.77671664.chunk.css" rel="stylesheet"><link href="./static/css/main.83d9ae2d.chunk.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div><script>!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"65648506"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"==typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([])</script><script src="./static/js/2.ef1db8c8.chunk.js"></script><script src="./static/js/main.fa15a895.chunk.js"></script></body></html>
|
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.4305bd17.js"></script><link href="./static/css/main.a33903a8.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
|
@ -1 +0,0 @@
|
||||||
.uplot,.uplot *,.uplot :after,.uplot :before{box-sizing:border-box}.uplot{font-family:system-ui,-apple-system,"Segoe UI",Roboto,"Helvetica Neue",Arial,"Noto Sans",sans-serif,"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";line-height:1.5;width:-webkit-min-content;width:min-content}.u-title{text-align:center;font-size:18px;font-weight:700}.u-wrap{position:relative;-webkit-user-select:none;-ms-user-select:none;user-select:none}.u-over,.u-under{position:absolute}.u-under{overflow:hidden}.uplot canvas{display:block;position:relative;width:100%;height:100%}.u-axis{position:absolute}.u-legend{font-size:14px;margin:auto;text-align:center}.u-inline{display:block}.u-inline *{display:inline-block}.u-inline tr{margin-right:16px}.u-legend th{font-weight:600}.u-legend th>*{vertical-align:middle;display:inline-block}.u-legend .u-marker{width:1em;height:1em;margin-right:4px;background-clip:padding-box!important}.u-inline.u-live th:after{content:":";vertical-align:middle}.u-inline:not(.u-live) .u-value{display:none}.u-series>*{padding:4px}.u-series th{cursor:pointer}.u-legend .u-off>*{opacity:.3}.u-select{background:rgba(0,0,0,.07)}.u-cursor-x,.u-cursor-y,.u-select{position:absolute;pointer-events:none}.u-cursor-x,.u-cursor-y{left:0;top:0;will-change:transform;z-index:100}.u-hz .u-cursor-x,.u-vt .u-cursor-y{height:100%;border-right:1px dashed #607d8b}.u-hz .u-cursor-y,.u-vt .u-cursor-x{width:100%;border-bottom:1px dashed #607d8b}.u-cursor-pt{position:absolute;top:0;left:0;border-radius:50%;border:0 solid;pointer-events:none;will-change:transform;z-index:100;background-clip:padding-box!important}.u-axis.u-off,.u-cursor-pt.u-off,.u-cursor-x.u-off,.u-cursor-y.u-off,.u-select.u-off{display:none}
|
|
|
@ -1 +0,0 @@
|
||||||
body{font-family:-apple-system,BlinkMacSystemFont,"Segoe UI","Roboto","Oxygen","Ubuntu","Cantarell","Fira Sans","Droid Sans","Helvetica Neue",sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}code{font-family:source-code-pro,Menlo,Monaco,Consolas,"Courier New",monospace}.MuiAccordionSummary-content{margin:0!important}.cm-activeLine{background-color:inherit!important}.cm-editor{border:none;border-radius:4px;font-size:10px}.cm-gutters{border-radius:4px 0 0 4px;height:100%;overflow:hidden;border:none!important}.cm-activeLineGutter,.cm-gutters{background-color:#fff!important}.query-editor .cm-scroller{align-items:center!important}.query-editor .cm-editor.cm-focused{outline:none}.query-editor-container{position:relative;padding:12px;border:1px solid #b9b9b9;border-radius:4px}.query-editor-container_focus{border:1px solid #3f51b5}.query-editor-container_error{border-color:#ff4141}.query-editor-container-one-line .query-editor .cm-editor{height:22px}.query-editor-container-one-line{padding:6px}.query-editor-label{font-weight:400;font-size:12px;line-height:1;letter-spacing:normal;color:rgba(0,0,0,.6);padding:0 5px;white-space:nowrap;overflow:hidden;text-overflow:ellipsis;max-width:calc(133% - 24px);position:absolute;left:4px;top:-.71875em;z-index:1;background-color:#fff;-webkit-transform:scale(.75);transform:scale(.75)}.query-editor-container_error .query-editor-label{color:#ff4141}.u-tooltip{position:absolute;display:none;grid-gap:12px;max-width:300px;padding:8px;border-radius:4px;background:rgba(57,57,57,.9);color:#fff;font-size:10px;line-height:1.4em;font-weight:500;word-wrap:break-word;font-family:monospace;pointer-events:none;z-index:100}.u-tooltip-data{display:flex;flex-wrap:wrap;align-items:center;font-size:11px;line-height:150%}.u-tooltip-data__value{padding:4px;font-weight:700}.u-tooltip__info{display:grid;grid-gap:4px}.u-tooltip__marker{width:12px;height:12px;margin-right:4px}.legendWrapper{display:grid;grid-template-columns:repeat(auto-fit,minmax(400px,1fr));grid-gap:20px;margin-top:20px;cursor:default}.legendGroup{margin-bottom:24px}.legendGroupTitle{display:flex;align-items:center;padding:10px 0 5px;font-size:11px}.legendGroupLine{margin:0 10px}.legendItem{display:inline-grid;grid-template-columns:auto auto;grid-gap:6px;align-items:start;justify-content:start;padding:5px 10px;background-color:#fff;cursor:pointer;transition:.2s ease}.legendItemHide{text-decoration:line-through;opacity:.5}.legendItem:hover{background-color:rgba(0,0,0,.1)}.legendMarker{width:12px;height:12px;border-width:2px;border-style:solid;box-sizing:border-box;transition:.2s ease;margin:3px 0}.legendLabel{font-size:11px;font-weight:400}
|
|
1
app/vmselect/vmui/static/css/main.a33903a8.css
Normal file
1
app/vmselect/vmui/static/css/main.a33903a8.css
Normal file
|
@ -0,0 +1 @@
|
||||||
|
body{-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif}code{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}.MuiAccordionSummary-content{margin:0!important}.cm-activeLine{background-color:inherit!important}.cm-editor{border:none;border-radius:4px;font-size:10px}.cm-gutters{border:none!important;border-radius:4px 0 0 4px;height:100%;overflow:hidden}.cm-activeLineGutter,.cm-gutters{background-color:#fff!important}.query-editor .cm-scroller{align-items:center!important}.query-editor .cm-editor.cm-focused{outline:none}.query-editor-container{border:1px solid #b9b9b9;border-radius:4px;padding:12px;position:relative}.query-editor-container_focus{border:1px solid #3f51b5}.query-editor-container_error{border-color:#ff4141}.query-editor-container-one-line .query-editor .cm-editor{height:22px}.query-editor-container-one-line{padding:6px}.query-editor-label{background-color:#fff;color:rgba(0,0,0,.6);font-size:12px;font-weight:400;left:4px;letter-spacing:normal;line-height:1;max-width:calc(133% - 24px);overflow:hidden;padding:0 5px;position:absolute;text-overflow:ellipsis;top:-.71875em;-webkit-transform:scale(.75);transform:scale(.75);white-space:nowrap;z-index:1}.query-editor-container_error .query-editor-label{color:#ff4141}.uplot,.uplot *,.uplot :after,.uplot :before{box-sizing:border-box}.uplot{font-family:system-ui,-apple-system,Segoe UI,Roboto,Helvetica Neue,Arial,Noto Sans,sans-serif,Apple Color Emoji,Segoe UI Emoji,Segoe UI Symbol,Noto Color Emoji;line-height:1.5;width:-webkit-min-content;width:min-content}.u-title{font-size:18px;font-weight:700;text-align:center}.u-wrap{position:relative;-webkit-user-select:none;-ms-user-select:none;user-select:none}.u-over,.u-under{position:absolute}.u-under{overflow:hidden}.uplot canvas{display:block;height:100%;position:relative;width:100%}.u-axis{position:absolute}.u-legend{font-size:14px;margin:auto;text-align:center}.u-inline{display:block}.u-inline *{display:inline-block}.u-inline tr{margin-right:16px}.u-legend th{font-weight:600}.u-legend th>*{display:inline-block;vertical-align:middle}.u-legend .u-marker{background-clip:padding-box!important;height:1em;margin-right:4px;width:1em}.u-inline.u-live th:after{content:":";vertical-align:middle}.u-inline:not(.u-live) .u-value{display:none}.u-series>*{padding:4px}.u-series th{cursor:pointer}.u-legend .u-off>*{opacity:.3}.u-select{background:rgba(0,0,0,.07)}.u-cursor-x,.u-cursor-y,.u-select{pointer-events:none;position:absolute}.u-cursor-x,.u-cursor-y{left:0;top:0;will-change:transform;z-index:100}.u-hz .u-cursor-x,.u-vt .u-cursor-y{border-right:1px dashed #607d8b;height:100%}.u-hz .u-cursor-y,.u-vt .u-cursor-x{border-bottom:1px dashed #607d8b;width:100%}.u-cursor-pt{background-clip:padding-box!important;border:0 solid;border-radius:50%;left:0;pointer-events:none;position:absolute;top:0;will-change:transform;z-index:100}.u-axis.u-off,.u-cursor-pt.u-off,.u-cursor-x.u-off,.u-cursor-y.u-off,.u-select.u-off,.u-tooltip{display:none}.u-tooltip{grid-gap:12px;word-wrap:break-word;background:rgba(57,57,57,.9);border-radius:4px;color:#fff;font-family:monospace;font-size:10px;font-weight:500;line-height:1.4em;max-width:300px;padding:8px;pointer-events:none;position:absolute;z-index:100}.u-tooltip-data{align-items:center;display:flex;flex-wrap:wrap;font-size:11px;line-height:150%}.u-tooltip-data__value{font-weight:700;padding:4px}.u-tooltip__info{grid-gap:4px;display:grid}.u-tooltip__marker{height:12px;margin-right:4px;width:12px}.legendWrapper{grid-gap:20px;cursor:default;display:grid;grid-template-columns:repeat(auto-fit,minmax(400px,1fr));margin-top:20px}.legendGroup{margin-bottom:24px}.legendGroupTitle{align-items:center;display:flex;font-size:11px;padding:10px 0 5px}.legendGroupLine{margin:0 10px}.legendItem{grid-gap:6px;align-items:start;background-color:#fff;cursor:pointer;display:inline-grid;grid-template-columns:auto auto;justify-content:start;padding:5px 10px;transition:.2s ease}.legendItemHide{opacity:.5;text-decoration:line-through}.legendItem:hover{background-color:rgba(0,0,0,.1)}.legendMarker{border-style:solid;border-width:2px;box-sizing:border-box;height:12px;margin:3px 0;transition:.2s ease;width:12px}.legendLabel{font-size:11px;font-weight:400}
|
File diff suppressed because one or more lines are too long
1
app/vmselect/vmui/static/js/27.85f0e2b0.chunk.js
Normal file
1
app/vmselect/vmui/static/js/27.85f0e2b0.chunk.js
Normal file
|
@ -0,0 +1 @@
|
||||||
|
"use strict";(self.webpackChunkvmui=self.webpackChunkvmui||[]).push([[27],{4027:function(e,n,t){t.r(n),t.d(n,{getCLS:function(){return y},getFCP:function(){return g},getFID:function(){return C},getLCP:function(){return P},getTTFB:function(){return D}});var i,r,a,o,u=function(e,n){return{name:e,value:void 0===n?-1:n,delta:0,entries:[],id:"v2-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)}},c=function(e,n){try{if(PerformanceObserver.supportedEntryTypes.includes(e)){if("first-input"===e&&!("PerformanceEventTiming"in self))return;var t=new PerformanceObserver((function(e){return e.getEntries().map(n)}));return t.observe({type:e,buffered:!0}),t}}catch(e){}},f=function(e,n){var t=function t(i){"pagehide"!==i.type&&"hidden"!==document.visibilityState||(e(i),n&&(removeEventListener("visibilitychange",t,!0),removeEventListener("pagehide",t,!0)))};addEventListener("visibilitychange",t,!0),addEventListener("pagehide",t,!0)},s=function(e){addEventListener("pageshow",(function(n){n.persisted&&e(n)}),!0)},m=function(e,n,t){var i;return function(r){n.value>=0&&(r||t)&&(n.delta=n.value-(i||0),(n.delta||void 0===i)&&(i=n.value,e(n)))}},v=-1,p=function(){return"hidden"===document.visibilityState?0:1/0},d=function(){f((function(e){var n=e.timeStamp;v=n}),!0)},l=function(){return v<0&&(v=p(),d(),s((function(){setTimeout((function(){v=p(),d()}),0)}))),{get firstHiddenTime(){return v}}},g=function(e,n){var t,i=l(),r=u("FCP"),a=function(e){"first-contentful-paint"===e.name&&(f&&f.disconnect(),e.startTime<i.firstHiddenTime&&(r.value=e.startTime,r.entries.push(e),t(!0)))},o=window.performance&&performance.getEntriesByName&&performance.getEntriesByName("first-contentful-paint")[0],f=o?null:c("paint",a);(o||f)&&(t=m(e,r,n),o&&a(o),s((function(i){r=u("FCP"),t=m(e,r,n),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,t(!0)}))}))})))},h=!1,T=-1,y=function(e,n){h||(g((function(e){T=e.value})),h=!0);var t,i=function(n){T>-1&&e(n)},r=u("CLS",0),a=0,o=[],v=function(e){if(!e.hadRecentInput){var n=o[0],i=o[o.length-1];a&&e.startTime-i.startTime<1e3&&e.startTime-n.startTime<5e3?(a+=e.value,o.push(e)):(a=e.value,o=[e]),a>r.value&&(r.value=a,r.entries=o,t())}},p=c("layout-shift",v);p&&(t=m(i,r,n),f((function(){p.takeRecords().map(v),t(!0)})),s((function(){a=0,T=-1,r=u("CLS",0),t=m(i,r,n)})))},E={passive:!0,capture:!0},w=new Date,L=function(e,n){i||(i=n,r=e,a=new Date,F(removeEventListener),S())},S=function(){if(r>=0&&r<a-w){var e={entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+r};o.forEach((function(n){n(e)})),o=[]}},b=function(e){if(e.cancelable){var n=(e.timeStamp>1e12?new Date:performance.now())-e.timeStamp;"pointerdown"==e.type?function(e,n){var t=function(){L(e,n),r()},i=function(){r()},r=function(){removeEventListener("pointerup",t,E),removeEventListener("pointercancel",i,E)};addEventListener("pointerup",t,E),addEventListener("pointercancel",i,E)}(n,e):L(n,e)}},F=function(e){["mousedown","keydown","touchstart","pointerdown"].forEach((function(n){return e(n,b,E)}))},C=function(e,n){var t,a=l(),v=u("FID"),p=function(e){e.startTime<a.firstHiddenTime&&(v.value=e.processingStart-e.startTime,v.entries.push(e),t(!0))},d=c("first-input",p);t=m(e,v,n),d&&f((function(){d.takeRecords().map(p),d.disconnect()}),!0),d&&s((function(){var a;v=u("FID"),t=m(e,v,n),o=[],r=-1,i=null,F(addEventListener),a=p,o.push(a),S()}))},k={},P=function(e,n){var t,i=l(),r=u("LCP"),a=function(e){var n=e.startTime;n<i.firstHiddenTime&&(r.value=n,r.entries.push(e)),t()},o=c("largest-contentful-paint",a);if(o){t=m(e,r,n);var v=function(){k[r.id]||(o.takeRecords().map(a),o.disconnect(),k[r.id]=!0,t(!0))};["keydown","click"].forEach((function(e){addEventListener(e,v,{once:!0,capture:!0})})),f(v,!0),s((function(i){r=u("LCP"),t=m(e,r,n),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,k[r.id]=!0,t(!0)}))}))}))}},D=function(e){var n,t=u("TTFB");n=function(){try{var n=performance.getEntriesByType("navigation")[0]||function(){var e=performance.timing,n={entryType:"navigation",startTime:0};for(var t in e)"navigationStart"!==t&&"toJSON"!==t&&(n[t]=Math.max(e[t]-e.navigationStart,0));return n}();if(t.value=t.delta=n.responseStart,t.value<0||t.value>performance.now())return;t.entries=[n],e(t)}catch(e){}},"complete"===document.readyState?setTimeout(n,0):addEventListener("pageshow",n)}}}]);
|
|
@ -1 +0,0 @@
|
||||||
(this.webpackJsonpvmui=this.webpackJsonpvmui||[]).push([[3],{356:function(e,t,n){"use strict";n.r(t),n.d(t,"getCLS",(function(){return y})),n.d(t,"getFCP",(function(){return g})),n.d(t,"getFID",(function(){return C})),n.d(t,"getLCP",(function(){return k})),n.d(t,"getTTFB",(function(){return D}));var i,r,a,o,u=function(e,t){return{name:e,value:void 0===t?-1:t,delta:0,entries:[],id:"v2-".concat(Date.now(),"-").concat(Math.floor(8999999999999*Math.random())+1e12)}},c=function(e,t){try{if(PerformanceObserver.supportedEntryTypes.includes(e)){if("first-input"===e&&!("PerformanceEventTiming"in self))return;var n=new PerformanceObserver((function(e){return e.getEntries().map(t)}));return n.observe({type:e,buffered:!0}),n}}catch(e){}},f=function(e,t){var n=function n(i){"pagehide"!==i.type&&"hidden"!==document.visibilityState||(e(i),t&&(removeEventListener("visibilitychange",n,!0),removeEventListener("pagehide",n,!0)))};addEventListener("visibilitychange",n,!0),addEventListener("pagehide",n,!0)},s=function(e){addEventListener("pageshow",(function(t){t.persisted&&e(t)}),!0)},m=function(e,t,n){var i;return function(r){t.value>=0&&(r||n)&&(t.delta=t.value-(i||0),(t.delta||void 0===i)&&(i=t.value,e(t)))}},v=-1,p=function(){return"hidden"===document.visibilityState?0:1/0},d=function(){f((function(e){var t=e.timeStamp;v=t}),!0)},l=function(){return v<0&&(v=p(),d(),s((function(){setTimeout((function(){v=p(),d()}),0)}))),{get firstHiddenTime(){return v}}},g=function(e,t){var n,i=l(),r=u("FCP"),a=function(e){"first-contentful-paint"===e.name&&(f&&f.disconnect(),e.startTime<i.firstHiddenTime&&(r.value=e.startTime,r.entries.push(e),n(!0)))},o=window.performance&&performance.getEntriesByName&&performance.getEntriesByName("first-contentful-paint")[0],f=o?null:c("paint",a);(o||f)&&(n=m(e,r,t),o&&a(o),s((function(i){r=u("FCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,n(!0)}))}))})))},h=!1,T=-1,y=function(e,t){h||(g((function(e){T=e.value})),h=!0);var n,i=function(t){T>-1&&e(t)},r=u("CLS",0),a=0,o=[],v=function(e){if(!e.hadRecentInput){var t=o[0],i=o[o.length-1];a&&e.startTime-i.startTime<1e3&&e.startTime-t.startTime<5e3?(a+=e.value,o.push(e)):(a=e.value,o=[e]),a>r.value&&(r.value=a,r.entries=o,n())}},p=c("layout-shift",v);p&&(n=m(i,r,t),f((function(){p.takeRecords().map(v),n(!0)})),s((function(){a=0,T=-1,r=u("CLS",0),n=m(i,r,t)})))},E={passive:!0,capture:!0},w=new Date,L=function(e,t){i||(i=t,r=e,a=new Date,F(removeEventListener),S())},S=function(){if(r>=0&&r<a-w){var e={entryType:"first-input",name:i.type,target:i.target,cancelable:i.cancelable,startTime:i.timeStamp,processingStart:i.timeStamp+r};o.forEach((function(t){t(e)})),o=[]}},b=function(e){if(e.cancelable){var t=(e.timeStamp>1e12?new Date:performance.now())-e.timeStamp;"pointerdown"==e.type?function(e,t){var n=function(){L(e,t),r()},i=function(){r()},r=function(){removeEventListener("pointerup",n,E),removeEventListener("pointercancel",i,E)};addEventListener("pointerup",n,E),addEventListener("pointercancel",i,E)}(t,e):L(t,e)}},F=function(e){["mousedown","keydown","touchstart","pointerdown"].forEach((function(t){return e(t,b,E)}))},C=function(e,t){var n,a=l(),v=u("FID"),p=function(e){e.startTime<a.firstHiddenTime&&(v.value=e.processingStart-e.startTime,v.entries.push(e),n(!0))},d=c("first-input",p);n=m(e,v,t),d&&f((function(){d.takeRecords().map(p),d.disconnect()}),!0),d&&s((function(){var a;v=u("FID"),n=m(e,v,t),o=[],r=-1,i=null,F(addEventListener),a=p,o.push(a),S()}))},P={},k=function(e,t){var n,i=l(),r=u("LCP"),a=function(e){var t=e.startTime;t<i.firstHiddenTime&&(r.value=t,r.entries.push(e)),n()},o=c("largest-contentful-paint",a);if(o){n=m(e,r,t);var v=function(){P[r.id]||(o.takeRecords().map(a),o.disconnect(),P[r.id]=!0,n(!0))};["keydown","click"].forEach((function(e){addEventListener(e,v,{once:!0,capture:!0})})),f(v,!0),s((function(i){r=u("LCP"),n=m(e,r,t),requestAnimationFrame((function(){requestAnimationFrame((function(){r.value=performance.now()-i.timeStamp,P[r.id]=!0,n(!0)}))}))}))}},D=function(e){var t,n=u("TTFB");t=function(){try{var t=performance.getEntriesByType("navigation")[0]||function(){var e=performance.timing,t={entryType:"navigation",startTime:0};for(var n in e)"navigationStart"!==n&&"toJSON"!==n&&(t[n]=Math.max(e[n]-e.navigationStart,0));return t}();if(n.value=n.delta=t.responseStart,n.value<0||n.value>performance.now())return;n.entries=[t],e(n)}catch(e){}},"complete"===document.readyState?setTimeout(t,0):addEventListener("pageshow",t)}}}]);
|
|
2
app/vmselect/vmui/static/js/main.4305bd17.js
Normal file
2
app/vmselect/vmui/static/js/main.4305bd17.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -20,7 +20,7 @@ object-assign
|
||||||
* @license MIT
|
* @license MIT
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/** @license MUI v5.2.0
|
/** @license MUI v5.2.4
|
||||||
*
|
*
|
||||||
* This source code is licensed under the MIT license found in the
|
* This source code is licensed under the MIT license found in the
|
||||||
* LICENSE file in the root directory of this source tree.
|
* LICENSE file in the root directory of this source tree.
|
File diff suppressed because one or more lines are too long
|
@ -1 +0,0 @@
|
||||||
!function(e){function r(r){for(var n,i,a=r[0],c=r[1],l=r[2],s=0,p=[];s<a.length;s++)i=a[s],Object.prototype.hasOwnProperty.call(o,i)&&o[i]&&p.push(o[i][0]),o[i]=0;for(n in c)Object.prototype.hasOwnProperty.call(c,n)&&(e[n]=c[n]);for(f&&f(r);p.length;)p.shift()();return u.push.apply(u,l||[]),t()}function t(){for(var e,r=0;r<u.length;r++){for(var t=u[r],n=!0,a=1;a<t.length;a++){var c=t[a];0!==o[c]&&(n=!1)}n&&(u.splice(r--,1),e=i(i.s=t[0]))}return e}var n={},o={1:0},u=[];function i(r){if(n[r])return n[r].exports;var t=n[r]={i:r,l:!1,exports:{}};return e[r].call(t.exports,t,t.exports,i),t.l=!0,t.exports}i.e=function(e){var r=[],t=o[e];if(0!==t)if(t)r.push(t[2]);else{var n=new Promise((function(r,n){t=o[e]=[r,n]}));r.push(t[2]=n);var u,a=document.createElement("script");a.charset="utf-8",a.timeout=120,i.nc&&a.setAttribute("nonce",i.nc),a.src=function(e){return i.p+"static/js/"+({}[e]||e)+"."+{3:"65648506"}[e]+".chunk.js"}(e);var c=new Error;u=function(r){a.onerror=a.onload=null,clearTimeout(l);var t=o[e];if(0!==t){if(t){var n=r&&("load"===r.type?"missing":r.type),u=r&&r.target&&r.target.src;c.message="Loading chunk "+e+" failed.\n("+n+": "+u+")",c.name="ChunkLoadError",c.type=n,c.request=u,t[1](c)}o[e]=void 0}};var l=setTimeout((function(){u({type:"timeout",target:a})}),12e4);a.onerror=a.onload=u,document.head.appendChild(a)}return Promise.all(r)},i.m=e,i.c=n,i.d=function(e,r,t){i.o(e,r)||Object.defineProperty(e,r,{enumerable:!0,get:t})},i.r=function(e){"undefined"!==typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},i.t=function(e,r){if(1&r&&(e=i(e)),8&r)return e;if(4&r&&"object"===typeof e&&e&&e.__esModule)return e;var t=Object.create(null);if(i.r(t),Object.defineProperty(t,"default",{enumerable:!0,value:e}),2&r&&"string"!=typeof e)for(var n in e)i.d(t,n,function(r){return e[r]}.bind(null,n));return t},i.n=function(e){var r=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(r,"a",r),r},i.o=function(e,r){return Object.prototype.hasOwnProperty.call(e,r)},i.p="./",i.oe=function(e){throw console.error(e),e};var a=this.webpackJsonpvmui=this.webpackJsonpvmui||[],c=a.push.bind(a);a.push=r,a=a.slice();for(var l=0;l<a.length;l++)r(a[l]);var f=c;t()}([]);
|
|
29328
app/vmui/packages/vmui/package-lock.json
generated
29328
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -5,7 +5,7 @@
|
||||||
"homepage": "./",
|
"homepage": "./",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@codemirror/autocomplete": "^0.19.9",
|
"@codemirror/autocomplete": "^0.19.9",
|
||||||
"@codemirror/basic-setup": "^0.19.0",
|
"@codemirror/basic-setup": "^0.19.1",
|
||||||
"@codemirror/commands": "^0.19.6",
|
"@codemirror/commands": "^0.19.6",
|
||||||
"@codemirror/highlight": "^0.19.6",
|
"@codemirror/highlight": "^0.19.6",
|
||||||
"@codemirror/state": "^0.19.6",
|
"@codemirror/state": "^0.19.6",
|
||||||
|
@ -13,9 +13,9 @@
|
||||||
"@date-io/dayjs": "^2.11.0",
|
"@date-io/dayjs": "^2.11.0",
|
||||||
"@emotion/react": "^11.7.1",
|
"@emotion/react": "^11.7.1",
|
||||||
"@emotion/styled": "^11.6.0",
|
"@emotion/styled": "^11.6.0",
|
||||||
"@mui/icons-material": "^5.2.1",
|
"@mui/icons-material": "^5.2.4",
|
||||||
"@mui/lab": "^5.0.0-alpha.59",
|
"@mui/lab": "^5.0.0-alpha.60",
|
||||||
"@mui/material": "^5.2.3",
|
"@mui/material": "^5.2.4",
|
||||||
"@mui/styles": "^5.2.3",
|
"@mui/styles": "^5.2.3",
|
||||||
"@testing-library/jest-dom": "^5.16.1",
|
"@testing-library/jest-dom": "^5.16.1",
|
||||||
"@testing-library/react": "^12.1.2",
|
"@testing-library/react": "^12.1.2",
|
||||||
|
@ -24,7 +24,7 @@
|
||||||
"@types/lodash.debounce": "^4.0.6",
|
"@types/lodash.debounce": "^4.0.6",
|
||||||
"@types/lodash.get": "^4.4.6",
|
"@types/lodash.get": "^4.4.6",
|
||||||
"@types/lodash.throttle": "^4.1.6",
|
"@types/lodash.throttle": "^4.1.6",
|
||||||
"@types/node": "^16.11.12",
|
"@types/node": "^17.0.1",
|
||||||
"@types/numeral": "^2.0.2",
|
"@types/numeral": "^2.0.2",
|
||||||
"@types/qs": "^6.9.7",
|
"@types/qs": "^6.9.7",
|
||||||
"@types/react": "^17.0.37",
|
"@types/react": "^17.0.37",
|
||||||
|
@ -41,9 +41,9 @@
|
||||||
"react-dom": "^17.0.2",
|
"react-dom": "^17.0.2",
|
||||||
"react-draggable": "^4.4.4",
|
"react-draggable": "^4.4.4",
|
||||||
"react-measure": "^2.5.2",
|
"react-measure": "^2.5.2",
|
||||||
"react-scripts": "4.0.3",
|
"react-scripts": "5.0.0",
|
||||||
"typescript": "~4.5.3",
|
"typescript": "~4.5.4",
|
||||||
"uplot": "^1.6.17",
|
"uplot": "^1.6.18",
|
||||||
"web-vitals": "^2.1.2"
|
"web-vitals": "^2.1.2"
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
@ -73,9 +73,9 @@
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.0",
|
"@babel/plugin-proposal-nullish-coalescing-operator": "^7.16.5",
|
||||||
"@typescript-eslint/eslint-plugin": "^5.6.0",
|
"@typescript-eslint/eslint-plugin": "^5.7.0",
|
||||||
"@typescript-eslint/parser": "^5.6.0",
|
"@typescript-eslint/parser": "^5.7.0",
|
||||||
"customize-cra": "^1.0.0",
|
"customize-cra": "^1.0.0",
|
||||||
"eslint-plugin-react": "^7.27.1",
|
"eslint-plugin-react": "^7.27.1",
|
||||||
"react-app-rewired": "^2.1.8"
|
"react-app-rewired": "^2.1.8"
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -216,6 +216,16 @@ groups:
|
||||||
description: "Vmagent dropped {{ $value | humanize1024 }} from persistent queue
|
description: "Vmagent dropped {{ $value | humanize1024 }} from persistent queue
|
||||||
on instance {{ $labels.instance }} for the last 10m."
|
on instance {{ $labels.instance }} for the last 10m."
|
||||||
|
|
||||||
|
- alert: RejectedRemoteWriteDataBlocksAreDropped
|
||||||
|
expr: sum(increase(vmagent_remotewrite_packets_dropped_total[5m])) by (job, instance) > 0
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=79&var-instance={{ $labels.instance }}"
|
||||||
|
summary: "Job \"{{ $labels.job }}\" on instance {{ $labels.instance }} drops the rejected by
|
||||||
|
remote-write server data blocks. Check the logs to find the reason for rejects."
|
||||||
|
|
||||||
- alert: TooManyScrapeErrors
|
- alert: TooManyScrapeErrors
|
||||||
expr: sum(increase(vm_promscrape_scrapes_failed_total[5m])) by (job, instance) > 0
|
expr: sum(increase(vm_promscrape_scrapes_failed_total[5m])) by (job, instance) > 0
|
||||||
for: 15m
|
for: 15m
|
||||||
|
@ -261,6 +271,30 @@ groups:
|
||||||
This usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase
|
This usually means that `-remoteWrite.queues` command-line flag must be increased in order to increase
|
||||||
the number of connections per each remote storage."
|
the number of connections per each remote storage."
|
||||||
|
|
||||||
|
- alert: PersistentQueueForWritesIsSaturated
|
||||||
|
expr: rate(vm_persistentqueue_write_duration_seconds_total[5m]) > 0.9
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=98&var-instance={{ $labels.instance }}"
|
||||||
|
summary: "Persistent queue writes for instance {{ $labels.instance }} are saturated"
|
||||||
|
description: "Persistent queue writes for vmagent \"{{ $labels.job }}\" (instance {{ $labels.instance }})
|
||||||
|
are saturated by more than 90% and vmagent won't be able to keep up with flushing data on disk.
|
||||||
|
In this case, consider to decrease load on the vmagent or improve the disk throughput."
|
||||||
|
|
||||||
|
- alert: PersistentQueueForReadsIsSaturated
|
||||||
|
expr: rate(vm_persistentqueue_read_duration_seconds_total[5m]) > 0.9
|
||||||
|
for: 15m
|
||||||
|
labels:
|
||||||
|
severity: warning
|
||||||
|
annotations:
|
||||||
|
dashboard: "http://localhost:3000/d/G7Z9GzMGz?viewPanel=99&var-instance={{ $labels.instance }}"
|
||||||
|
summary: "Persistent queue reads for instance {{ $labels.instance }} are saturated"
|
||||||
|
description: "Persistent queue reads for vmagent \"{{ $labels.job }}\" (instance {{ $labels.instance }})
|
||||||
|
are saturated by more than 90% and vmagent won't be able to keep up with reading data from the disk.
|
||||||
|
In this case, consider to decrease load on the vmagent or improve the disk throughput."
|
||||||
|
|
||||||
- alert: SeriesLimitHourReached
|
- alert: SeriesLimitHourReached
|
||||||
expr: (vmagent_hourly_series_limit_current_series / vmagent_hourly_series_limit_max_series) > 0.9
|
expr: (vmagent_hourly_series_limit_current_series / vmagent_hourly_series_limit_max_series) > 0.9
|
||||||
labels:
|
labels:
|
||||||
|
|
|
@ -17,6 +17,7 @@ sort: 15
|
||||||
* FEATURE: preserve the order of time series passed to [limit_offset](https://docs.victoriametrics.com/MetricsQL.html#limit_offset) function. This allows implementing series paging via `limit_offset(limit, offset, sort_by_label(...))`. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1920) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/951) issues.
|
* FEATURE: preserve the order of time series passed to [limit_offset](https://docs.victoriametrics.com/MetricsQL.html#limit_offset) function. This allows implementing series paging via `limit_offset(limit, offset, sort_by_label(...))`. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1920) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/951) issues.
|
||||||
* FEATURE: automaticall convert `(value1|...|valueN)` into `{value1,...,valueN}` inside `__graphite__` pseudo-label. This allows using [Grafana multi-value template variables](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, `{__graphite__=~"foo.($bar)"}` is expanded to `{__graphite__=~"foo.{x,y}"}` if both `x` and `y` are selected for `$bar` template variable. See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics) for details.
|
* FEATURE: automaticall convert `(value1|...|valueN)` into `{value1,...,valueN}` inside `__graphite__` pseudo-label. This allows using [Grafana multi-value template variables](https://grafana.com/docs/grafana/latest/variables/formatting-multi-value-variables/) inside `__graphite__` pseudo-label. For example, `{__graphite__=~"foo.($bar)"}` is expanded to `{__graphite__=~"foo.{x,y}"}` if both `x` and `y` are selected for `$bar` template variable. See [these docs](https://docs.victoriametrics.com/#selecting-graphite-metrics) for details.
|
||||||
* FEATURE: add [timestamp_with_name](https://docs.victoriametrics.com/MetricsQL.html#timestamp_with_name) function. It works the same as [timestamp](https://docs.victoriametrics.com/MetricsQL.html#timestamp), but leaves the original time series names, so it can be used in queries, which match multiple time series names: `timestamp_with_name({foo="bar"}[1h])`. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/949#issuecomment-995222388) for more context.
|
* FEATURE: add [timestamp_with_name](https://docs.victoriametrics.com/MetricsQL.html#timestamp_with_name) function. It works the same as [timestamp](https://docs.victoriametrics.com/MetricsQL.html#timestamp), but leaves the original time series names, so it can be used in queries, which match multiple time series names: `timestamp_with_name({foo="bar"}[1h])`. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/949#issuecomment-995222388) for more context.
|
||||||
|
* FEATURE: add [changes_prometheus](https://docs.victoriametrics.com/MetricsQL.html#changes_prometheus), [increase_prometheus](https://docs.victoriametrics.com/MetricsQL.html#increase_prometheus) and [delta_prometheus](https://docs.victoriametrics.com/MetricsQL.html#delta_prometheus) functions, which don't take into account the previous sample before the given lookbehind window specified in square brackets. These functions may be used when the Prometheus behaviour for `changes()`, `increase()` and `delta()` functions is needed to be preserved. VictoriaMetrics uses slightly different behaviour for `changes()`, `increase()` and `delta()` functions by default - see [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1962).
|
||||||
|
|
||||||
* BUGFIX: fix `unaligned 64-bit atomic operation` panic on 32-bit architectures, which has been introduced in v1.70.0. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1944).
|
* BUGFIX: fix `unaligned 64-bit atomic operation` panic on 32-bit architectures, which has been introduced in v1.70.0. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1944).
|
||||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): restore the ability to use `$labels.alertname` in labels templating. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1921).
|
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): restore the ability to use `$labels.alertname` in labels templating. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1921).
|
||||||
|
|
15
docs/FAQ.md
15
docs/FAQ.md
|
@ -282,6 +282,13 @@ If old time series are constantly substituted by new time series at a high rate,
|
||||||
* Increased size of inverted index, which is stored at `<-storageDataPath>/indexdb`, since the inverted index contains entries for every label of every time series with at least a single ingested sample
|
* Increased size of inverted index, which is stored at `<-storageDataPath>/indexdb`, since the inverted index contains entries for every label of every time series with at least a single ingested sample
|
||||||
* Slow down of queries over multiple days.
|
* Slow down of queries over multiple days.
|
||||||
|
|
||||||
|
The main reason for high churn rate is a metric label with frequently changed value. Examples of such labels:
|
||||||
|
|
||||||
|
* `queryid`, which changes with each query at `postgres_exporter`.
|
||||||
|
* `app_name` or `deployment_id`, which changes with each new deployment in Kubernetes.
|
||||||
|
* A label derived from the current time such as `timestamp`, `minute` or `hour`.
|
||||||
|
* A `hash` or `uuid` label, which changes frequently.
|
||||||
|
|
||||||
The solution against high churn rate is to identify and eliminate labels with frequently changed values. The [/api/v1/status/tsdb](https://docs.victoriametrics.com/#tsdb-stats) page can help determining these labels.
|
The solution against high churn rate is to identify and eliminate labels with frequently changed values. The [/api/v1/status/tsdb](https://docs.victoriametrics.com/#tsdb-stats) page can help determining these labels.
|
||||||
|
|
||||||
|
|
||||||
|
@ -323,3 +330,11 @@ Please see [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-da
|
||||||
## How to migrate data from Graphite to VictoriaMetrics?
|
## How to migrate data from Graphite to VictoriaMetrics?
|
||||||
|
|
||||||
Please use [whisper-to-graphite](https://github.com/bzed/whisper-to-graphite) tool for reading the data from Graphite and pushing it to VictoriaMetrics via [Graphite import API](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd).
|
Please use [whisper-to-graphite](https://github.com/bzed/whisper-to-graphite) tool for reading the data from Graphite and pushing it to VictoriaMetrics via [Graphite import API](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd).
|
||||||
|
|
||||||
|
|
||||||
|
## Why do same metrics have differences in VictoriaMetrics and Prometheus dashboards?
|
||||||
|
|
||||||
|
There could be a slight difference in stored values for time series. Due to different compression algorithms, VM may reduce precision for float values with more than 12 significant decimal digits. Please see [this article](https://valyala.medium.com/evaluating-performance-and-correctness-victoriametrics-response-e27315627e87)
|
||||||
|
|
||||||
|
The query engine may behave differently for some functions. Please see [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e)
|
||||||
|
|
||||||
|
|
|
@ -92,7 +92,11 @@ See also [implicit query conversions](#implicit-query-conversions).
|
||||||
|
|
||||||
#### changes
|
#### changes
|
||||||
|
|
||||||
`changes(series_selector[d])` calculates the number of times the raw samples changed on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Metric names are stripped from the resulting rollups. This function is supported by PromQL.
|
`changes(series_selector[d])` calculates the number of times the raw samples changed on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Unlike `changes()` in Prometheus it takes into account the change from the last sample before the given lookbehind window `d`. See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details. Metric names are stripped from the resulting rollups. This function is supported by PromQL. See also [changes_prometheus](#changes_prometheus).
|
||||||
|
|
||||||
|
#### changes_prometheus
|
||||||
|
|
||||||
|
`changes_prometheus(series_selector[d])` calculates the number of times the raw samples changed on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). It doesn't take into account the change from the last sample before the given lookbehind window `d` in the same way as Prometheus does. See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details. Metric names are stripped from the resulting rollups. This function is supported by PromQL. See also [changes](#changes).
|
||||||
|
|
||||||
#### count_eq_over_time
|
#### count_eq_over_time
|
||||||
|
|
||||||
|
@ -124,7 +128,11 @@ See also [implicit query conversions](#implicit-query-conversions).
|
||||||
|
|
||||||
#### delta
|
#### delta
|
||||||
|
|
||||||
`delta(series_selector[d])` calculates the difference between the first and the last point over the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). Metric names are stripped from the resulting rollups. This function is supported by PromQL. See also [increase](#increase).
|
`delta(series_selector[d])` calculates the difference between the last sample before the given lookbehind window `d` and the last sample at the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). The behaviour of `delta()` function in MetricsQL is slighly different to the behaviour of `delta()` function in Prometheus. See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details. Metric names are stripped from the resulting rollups. This function is supported by PromQL. See also [increase](#increase) and [delta_prometheus](#delta_prometheus).
|
||||||
|
|
||||||
|
#### delta_prometheus
|
||||||
|
|
||||||
|
`delta_prometheus(series_selector[d])` calculates the difference between the first and the last samples at the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). The behaviour of `delta_prometheus()` is close to the behaviour of `delta()` function in Prometheus. See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details. Metric names are stripped from the resulting rollups. See also [delta](#delta).
|
||||||
|
|
||||||
#### deriv
|
#### deriv
|
||||||
|
|
||||||
|
@ -180,7 +188,11 @@ See also [implicit query conversions](#implicit-query-conversions).
|
||||||
|
|
||||||
#### increase
|
#### increase
|
||||||
|
|
||||||
`increase(series_selector[d])` calculates the increase over the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). It is expected that the `series_selector` returns time series of [counter type](https://prometheus.io/docs/concepts/metric_types/#counter). Metric names are stripped from the resulting rollups. This function is supported by PromQL. See also [increase_pure](#increase_pure) and [delta](#delta).
|
`increase(series_selector[d])` calculates the increase over the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). It is expected that the `series_selector` returns time series of [counter type](https://prometheus.io/docs/concepts/metric_types/#counter). Unlike Prometheus it takes into account the last sample before the given lookbehind window `d` when calculating the result. See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details. Metric names are stripped from the resulting rollups. This function is supported by PromQL. See also [increase_pure](#increase_pure), [increase_prometheus](#increase_prometheus) and [delta](#delta).
|
||||||
|
|
||||||
|
#### increase_prometheus
|
||||||
|
|
||||||
|
`increase_prometheus(series_selector[d])` calculates the increase over the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). It is expected that the `series_selector` returns time series of [counter type](https://prometheus.io/docs/concepts/metric_types/#counter). It doesn't take into account the last sample before the given lookbehind window `d` when calculating the result in the same way as Prometheus does. See [this article](https://medium.com/@romanhavronenko/victoriametrics-promql-compliance-d4318203f51e) for details. Metric names are stripped from the resulting rollups. This function is supported by PromQL. See also [increase_pure](#increase_pure) and [increase](#increase).
|
||||||
|
|
||||||
#### increase_pure
|
#### increase_pure
|
||||||
|
|
||||||
|
|
16
go.mod
16
go.mod
|
@ -1,7 +1,6 @@
|
||||||
module github.com/VictoriaMetrics/VictoriaMetrics
|
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.99.0 // indirect
|
|
||||||
cloud.google.com/go/storage v1.18.2
|
cloud.google.com/go/storage v1.18.2
|
||||||
github.com/VictoriaMetrics/fastcache v1.8.0
|
github.com/VictoriaMetrics/fastcache v1.8.0
|
||||||
|
|
||||||
|
@ -9,13 +8,13 @@ require (
|
||||||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||||
github.com/VictoriaMetrics/metrics v1.18.1
|
github.com/VictoriaMetrics/metrics v1.18.1
|
||||||
github.com/VictoriaMetrics/metricsql v0.33.0
|
github.com/VictoriaMetrics/metricsql v0.34.0
|
||||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||||
github.com/aws/aws-sdk-go v1.42.22
|
github.com/aws/aws-sdk-go v1.42.23
|
||||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2
|
github.com/cespare/xxhash/v2 v2.1.2
|
||||||
github.com/cheggaaa/pb/v3 v3.0.8
|
github.com/cheggaaa/pb/v3 v3.0.8
|
||||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect
|
github.com/cncf/xds/go v0.0.0-20211216145620-d92e9ce0af51 // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||||
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
|
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||||
|
@ -35,12 +34,11 @@ require (
|
||||||
github.com/valyala/fasttemplate v1.2.1
|
github.com/valyala/fasttemplate v1.2.1
|
||||||
github.com/valyala/gozstd v1.14.2
|
github.com/valyala/gozstd v1.14.2
|
||||||
github.com/valyala/quicktemplate v1.7.0
|
github.com/valyala/quicktemplate v1.7.0
|
||||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63
|
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
|
||||||
google.golang.org/api v0.62.0
|
google.golang.org/api v0.63.0
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
google.golang.org/grpc v1.43.0 // indirect
|
||||||
google.golang.org/grpc v1.42.0 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
30
go.sum
30
go.sum
|
@ -27,7 +27,6 @@ cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aD
|
||||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||||
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
|
|
||||||
cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY=
|
cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY=
|
||||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
|
@ -110,8 +109,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR
|
||||||
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
|
||||||
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
|
github.com/VictoriaMetrics/metrics v1.18.1 h1:OZ0+kTTto8oPfHnVAnTOoyl0XlRhRkoQrD2n2cOuRw0=
|
||||||
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
|
||||||
github.com/VictoriaMetrics/metricsql v0.33.0 h1:UBj7+Tf4dhD47tIxMYfAiy/4GXJN6xPYTweCZ+sRqw0=
|
github.com/VictoriaMetrics/metricsql v0.34.0 h1:zF9yzRyNCAxzgEBBnE4y/p0QYNpSQp2jGEBCVE2fUD0=
|
||||||
github.com/VictoriaMetrics/metricsql v0.33.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
github.com/VictoriaMetrics/metricsql v0.34.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
|
||||||
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
|
||||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||||
|
@ -156,8 +155,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
||||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||||
github.com/aws/aws-sdk-go v1.42.22 h1:EwcM7/+Ytg6xK+jbeM2+f9OELHqPiEiEKetT/GgAr7I=
|
github.com/aws/aws-sdk-go v1.42.23 h1:V0V5hqMEyVelgpu1e4gMPVCJ+KhmscdNxP/NWP1iCOA=
|
||||||
github.com/aws/aws-sdk-go v1.42.22/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
github.com/aws/aws-sdk-go v1.42.23/go.mod h1:gyRszuZ/icHmHAVE4gc/r+cfCmhA1AD+vqfWbgI+eHs=
|
||||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
||||||
|
@ -210,8 +209,8 @@ github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWH
|
||||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 h1:KwaoQzs/WeUxxJqiJsZ4euOly1Az/IgZXXSxlD/UBNk=
|
github.com/cncf/xds/go v0.0.0-20211216145620-d92e9ce0af51 h1:F6fR7MjvOIk+FLQOeBCAbbKItVgbdj0l9VWPiHeBEiY=
|
||||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20211216145620-d92e9ce0af51/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||||
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||||
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
|
@ -1181,8 +1180,9 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
|
|
||||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM=
|
||||||
|
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
@ -1309,9 +1309,9 @@ golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486 h1:5hpz5aRr+W1erYCL5JRhSUBJRph7l9XkNveoExlrKYk=
|
|
||||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
|
||||||
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
@ -1460,8 +1460,8 @@ google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
|
||||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||||
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
|
google.golang.org/api v0.58.0/go.mod h1:cAbP2FsxoGVNwtgNAmmn3y5G1TWAiVYRmg4yku3lv+E=
|
||||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||||
google.golang.org/api v0.62.0 h1:PhGymJMXfGBzc4lBRmrx9+1w4w2wEzURHNGF/sD/xGc=
|
google.golang.org/api v0.63.0 h1:n2bqqK895ygnBpdPDYetfy23K7fJ22wsrZKCyfuRkkA=
|
||||||
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
|
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
@ -1535,8 +1535,6 @@ google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEc
|
||||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211016002631-37fc39342514/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
|
||||||
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
|
||||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
@ -1573,8 +1571,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
||||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
|
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
|
||||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
|
3
vendor/github.com/VictoriaMetrics/metricsql/rollup.go
generated
vendored
3
vendor/github.com/VictoriaMetrics/metricsql/rollup.go
generated
vendored
|
@ -10,6 +10,7 @@ var rollupFuncs = map[string]bool{
|
||||||
"ascent_over_time": true,
|
"ascent_over_time": true,
|
||||||
"avg_over_time": true,
|
"avg_over_time": true,
|
||||||
"changes": true,
|
"changes": true,
|
||||||
|
"changes_prometheus": true,
|
||||||
"count_eq_over_time": true,
|
"count_eq_over_time": true,
|
||||||
"count_gt_over_time": true,
|
"count_gt_over_time": true,
|
||||||
"count_le_over_time": true,
|
"count_le_over_time": true,
|
||||||
|
@ -18,6 +19,7 @@ var rollupFuncs = map[string]bool{
|
||||||
"decreases_over_time": true,
|
"decreases_over_time": true,
|
||||||
"default_rollup": true,
|
"default_rollup": true,
|
||||||
"delta": true,
|
"delta": true,
|
||||||
|
"delta_prometheus": true,
|
||||||
"deriv": true,
|
"deriv": true,
|
||||||
"deriv_fast": true,
|
"deriv_fast": true,
|
||||||
"descent_over_time": true,
|
"descent_over_time": true,
|
||||||
|
@ -32,6 +34,7 @@ var rollupFuncs = map[string]bool{
|
||||||
"idelta": true,
|
"idelta": true,
|
||||||
"ideriv": true,
|
"ideriv": true,
|
||||||
"increase": true,
|
"increase": true,
|
||||||
|
"increase_prometheus": true,
|
||||||
"increase_pure": true,
|
"increase_pure": true,
|
||||||
"increases_over_time": true,
|
"increases_over_time": true,
|
||||||
"integrate": true,
|
"integrate": true,
|
||||||
|
|
126
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
126
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -25,6 +25,7 @@ const (
|
||||||
ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
|
ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
|
||||||
ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
|
ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
|
||||||
ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
|
ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
|
||||||
|
ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta).
|
||||||
CaCentral1RegionID = "ca-central-1" // Canada (Central).
|
CaCentral1RegionID = "ca-central-1" // Canada (Central).
|
||||||
EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt).
|
EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt).
|
||||||
EuNorth1RegionID = "eu-north-1" // Europe (Stockholm).
|
EuNorth1RegionID = "eu-north-1" // Europe (Stockholm).
|
||||||
|
@ -161,6 +162,9 @@ var awsPartition = partition{
|
||||||
"ap-southeast-2": region{
|
"ap-southeast-2": region{
|
||||||
Description: "Asia Pacific (Sydney)",
|
Description: "Asia Pacific (Sydney)",
|
||||||
},
|
},
|
||||||
|
"ap-southeast-3": region{
|
||||||
|
Description: "Asia Pacific (Jakarta)",
|
||||||
|
},
|
||||||
"ca-central-1": region{
|
"ca-central-1": region{
|
||||||
Description: "Canada (Central)",
|
Description: "Canada (Central)",
|
||||||
},
|
},
|
||||||
|
@ -235,6 +239,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -391,6 +398,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -1007,6 +1017,14 @@ var awsPartition = partition{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "api.ecr.ap-southeast-3.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
},
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
@ -1896,6 +1914,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -2466,6 +2487,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -3108,6 +3132,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -3813,6 +3840,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -4746,6 +4776,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -5412,6 +5445,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -5564,6 +5600,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -6012,6 +6051,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -6316,6 +6358,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -6488,6 +6533,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -6753,6 +6801,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -7403,6 +7454,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -7536,6 +7590,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -7874,6 +7931,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -8010,6 +8070,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -9237,6 +9300,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -10940,6 +11006,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -11043,6 +11112,9 @@ var awsPartition = partition{
|
||||||
},
|
},
|
||||||
"kinesisanalytics": service{
|
"kinesisanalytics": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
|
endpointKey{
|
||||||
|
Region: "af-south-1",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-east-1",
|
Region: "ap-east-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -11052,6 +11124,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-northeast-2",
|
Region: "ap-northeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-northeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-south-1",
|
Region: "ap-south-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -11297,6 +11372,15 @@ var awsPartition = partition{
|
||||||
},
|
},
|
||||||
Deprecated: boxedTrue,
|
Deprecated: boxedTrue,
|
||||||
},
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "kms-fips.ap-southeast-3.amazonaws.com",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-3-fips",
|
Region: "ap-southeast-3-fips",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
@ -11695,6 +11779,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -11995,6 +12082,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -12794,6 +12884,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -13100,6 +13193,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -14894,6 +14990,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -15174,6 +15273,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -18515,6 +18617,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -18648,6 +18753,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -18778,6 +18886,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -18972,6 +19083,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -19258,6 +19372,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -19335,6 +19452,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "aws-global",
|
Region: "aws-global",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
|
@ -19483,6 +19603,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -19610,6 +19733,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.42.22"
|
const SDKVersion = "1.42.23"
|
||||||
|
|
177
vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go
generated
vendored
Normal file
177
vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,177 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.25.0
|
||||||
|
// protoc v3.18.0
|
||||||
|
// source: xds/core/v3/cidr.proto
|
||||||
|
|
||||||
|
package v3
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/cncf/xds/go/xds/annotations/v3"
|
||||||
|
_ "github.com/envoyproxy/protoc-gen-validate/validate"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
wrappers "github.com/golang/protobuf/ptypes/wrappers"
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||||
|
// of the legacy proto package is being used.
|
||||||
|
const _ = proto.ProtoPackageIsVersion4
|
||||||
|
|
||||||
|
type CidrRange struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"`
|
||||||
|
PrefixLen *wrappers.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *CidrRange) Reset() {
|
||||||
|
*x = CidrRange{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_xds_core_v3_cidr_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *CidrRange) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*CidrRange) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *CidrRange) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_xds_core_v3_cidr_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead.
|
||||||
|
func (*CidrRange) Descriptor() ([]byte, []int) {
|
||||||
|
return file_xds_core_v3_cidr_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *CidrRange) GetAddressPrefix() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.AddressPrefix
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *CidrRange) GetPrefixLen() *wrappers.UInt32Value {
|
||||||
|
if x != nil {
|
||||||
|
return x.PrefixLen
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_xds_core_v3_cidr_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_xds_core_v3_cidr_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69,
|
||||||
|
0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f,
|
||||||
|
0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
|
||||||
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||||
|
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
|
||||||
|
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73,
|
||||||
|
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
|
||||||
|
0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
|
||||||
|
0x82, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a,
|
||||||
|
0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
|
||||||
|
0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d,
|
||||||
|
0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a,
|
||||||
|
0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||||
|
0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||||
|
0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
|
||||||
|
0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69,
|
||||||
|
0x78, 0x4c, 0x65, 0x6e, 0x42, 0x56, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68,
|
||||||
|
0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e,
|
||||||
|
0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||||
|
0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
|
||||||
|
0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72,
|
||||||
|
0x65, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x62, 0x06, 0x70, 0x72,
|
||||||
|
0x6f, 0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_xds_core_v3_cidr_proto_rawDescOnce sync.Once
|
||||||
|
file_xds_core_v3_cidr_proto_rawDescData = file_xds_core_v3_cidr_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_xds_core_v3_cidr_proto_rawDescGZIP() []byte {
|
||||||
|
file_xds_core_v3_cidr_proto_rawDescOnce.Do(func() {
|
||||||
|
file_xds_core_v3_cidr_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_cidr_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_xds_core_v3_cidr_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_xds_core_v3_cidr_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||||
|
var file_xds_core_v3_cidr_proto_goTypes = []interface{}{
|
||||||
|
(*CidrRange)(nil), // 0: xds.core.v3.CidrRange
|
||||||
|
(*wrappers.UInt32Value)(nil), // 1: google.protobuf.UInt32Value
|
||||||
|
}
|
||||||
|
var file_xds_core_v3_cidr_proto_depIdxs = []int32{
|
||||||
|
1, // 0: xds.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value
|
||||||
|
1, // [1:1] is the sub-list for method output_type
|
||||||
|
1, // [1:1] is the sub-list for method input_type
|
||||||
|
1, // [1:1] is the sub-list for extension type_name
|
||||||
|
1, // [1:1] is the sub-list for extension extendee
|
||||||
|
0, // [0:1] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_xds_core_v3_cidr_proto_init() }
|
||||||
|
func file_xds_core_v3_cidr_proto_init() {
|
||||||
|
if File_xds_core_v3_cidr_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_xds_core_v3_cidr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*CidrRange); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_xds_core_v3_cidr_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 1,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_xds_core_v3_cidr_proto_goTypes,
|
||||||
|
DependencyIndexes: file_xds_core_v3_cidr_proto_depIdxs,
|
||||||
|
MessageInfos: file_xds_core_v3_cidr_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_xds_core_v3_cidr_proto = out.File
|
||||||
|
file_xds_core_v3_cidr_proto_rawDesc = nil
|
||||||
|
file_xds_core_v3_cidr_proto_goTypes = nil
|
||||||
|
file_xds_core_v3_cidr_proto_depIdxs = nil
|
||||||
|
}
|
116
vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go
generated
vendored
Normal file
116
vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go
generated
vendored
Normal file
|
@ -0,0 +1,116 @@
|
||||||
|
// Code generated by protoc-gen-validate. DO NOT EDIT.
|
||||||
|
// source: xds/core/v3/cidr.proto
|
||||||
|
|
||||||
|
package v3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/mail"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ensure the imports are used
|
||||||
|
var (
|
||||||
|
_ = bytes.MinRead
|
||||||
|
_ = errors.New("")
|
||||||
|
_ = fmt.Print
|
||||||
|
_ = utf8.UTFMax
|
||||||
|
_ = (*regexp.Regexp)(nil)
|
||||||
|
_ = (*strings.Reader)(nil)
|
||||||
|
_ = net.IPv4len
|
||||||
|
_ = time.Duration(0)
|
||||||
|
_ = (*url.URL)(nil)
|
||||||
|
_ = (*mail.Address)(nil)
|
||||||
|
_ = anypb.Any{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Validate checks the field values on CidrRange with the rules defined in the
|
||||||
|
// proto definition for this message. If any rules are violated, an error is returned.
|
||||||
|
func (m *CidrRange) Validate() error {
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 {
|
||||||
|
return CidrRangeValidationError{
|
||||||
|
field: "AddressPrefix",
|
||||||
|
reason: "value length must be at least 1 runes",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if wrapper := m.GetPrefixLen(); wrapper != nil {
|
||||||
|
|
||||||
|
if wrapper.GetValue() > 128 {
|
||||||
|
return CidrRangeValidationError{
|
||||||
|
field: "PrefixLen",
|
||||||
|
reason: "value must be less than or equal to 128",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CidrRangeValidationError is the validation error returned by
|
||||||
|
// CidrRange.Validate if the designated constraints aren't met.
|
||||||
|
type CidrRangeValidationError struct {
|
||||||
|
field string
|
||||||
|
reason string
|
||||||
|
cause error
|
||||||
|
key bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field function returns field value.
|
||||||
|
func (e CidrRangeValidationError) Field() string { return e.field }
|
||||||
|
|
||||||
|
// Reason function returns reason value.
|
||||||
|
func (e CidrRangeValidationError) Reason() string { return e.reason }
|
||||||
|
|
||||||
|
// Cause function returns cause value.
|
||||||
|
func (e CidrRangeValidationError) Cause() error { return e.cause }
|
||||||
|
|
||||||
|
// Key function returns key value.
|
||||||
|
func (e CidrRangeValidationError) Key() bool { return e.key }
|
||||||
|
|
||||||
|
// ErrorName returns error name.
|
||||||
|
func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" }
|
||||||
|
|
||||||
|
// Error satisfies the builtin error interface
|
||||||
|
func (e CidrRangeValidationError) Error() string {
|
||||||
|
cause := ""
|
||||||
|
if e.cause != nil {
|
||||||
|
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
key := ""
|
||||||
|
if e.key {
|
||||||
|
key = "key for "
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"invalid %sCidrRange.%s: %s%s",
|
||||||
|
key,
|
||||||
|
e.field,
|
||||||
|
e.reason,
|
||||||
|
cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ error = CidrRangeValidationError{}
|
||||||
|
|
||||||
|
var _ interface {
|
||||||
|
Field() string
|
||||||
|
Reason() string
|
||||||
|
Key() bool
|
||||||
|
Cause() error
|
||||||
|
ErrorName() string
|
||||||
|
} = CidrRangeValidationError{}
|
261
vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go
generated
vendored
Normal file
261
vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,261 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.25.0
|
||||||
|
// protoc v3.18.0
|
||||||
|
// source: xds/type/matcher/v3/ip.proto
|
||||||
|
|
||||||
|
package v3
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/cncf/xds/go/xds/annotations/v3"
|
||||||
|
v3 "github.com/cncf/xds/go/xds/core/v3"
|
||||||
|
_ "github.com/envoyproxy/protoc-gen-validate/validate"
|
||||||
|
proto "github.com/golang/protobuf/proto"
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||||
|
// of the legacy proto package is being used.
|
||||||
|
const _ = proto.ProtoPackageIsVersion4
|
||||||
|
|
||||||
|
type IPMatcher struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
RangeMatchers []*IPMatcher_IPRangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *IPMatcher) Reset() {
|
||||||
|
*x = IPMatcher{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *IPMatcher) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*IPMatcher) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *IPMatcher) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use IPMatcher.ProtoReflect.Descriptor instead.
|
||||||
|
func (*IPMatcher) Descriptor() ([]byte, []int) {
|
||||||
|
return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *IPMatcher) GetRangeMatchers() []*IPMatcher_IPRangeMatcher {
|
||||||
|
if x != nil {
|
||||||
|
return x.RangeMatchers
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type IPMatcher_IPRangeMatcher struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Ranges []*v3.CidrRange `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"`
|
||||||
|
OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
|
||||||
|
Exclusive bool `protobuf:"varint,3,opt,name=exclusive,proto3" json:"exclusive,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *IPMatcher_IPRangeMatcher) Reset() {
|
||||||
|
*x = IPMatcher_IPRangeMatcher{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *IPMatcher_IPRangeMatcher) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*IPMatcher_IPRangeMatcher) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *IPMatcher_IPRangeMatcher) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use IPMatcher_IPRangeMatcher.ProtoReflect.Descriptor instead.
|
||||||
|
func (*IPMatcher_IPRangeMatcher) Descriptor() ([]byte, []int) {
|
||||||
|
return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *IPMatcher_IPRangeMatcher) GetRanges() []*v3.CidrRange {
|
||||||
|
if x != nil {
|
||||||
|
return x.Ranges
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *IPMatcher_IPRangeMatcher) GetOnMatch() *Matcher_OnMatch {
|
||||||
|
if x != nil {
|
||||||
|
return x.OnMatch
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *IPMatcher_IPRangeMatcher) GetExclusive() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.Exclusive
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_xds_type_matcher_v3_ip_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_xds_type_matcher_v3_ip_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x1c, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
|
||||||
|
0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x69, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13,
|
||||||
|
0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
|
||||||
|
0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
|
||||||
|
0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70,
|
||||||
|
0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
|
||||||
|
0x33, 0x2f, 0x63, 0x69, 0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64,
|
||||||
|
0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76,
|
||||||
|
0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
|
||||||
|
0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
|
||||||
|
0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x09, 0x49, 0x50, 0x4d,
|
||||||
|
0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f,
|
||||||
|
0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d,
|
||||||
|
0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
|
||||||
|
0x72, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x50, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x49,
|
||||||
|
0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x72,
|
||||||
|
0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0x0a,
|
||||||
|
0x0e, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12,
|
||||||
|
0x38, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
||||||
|
0x16, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69,
|
||||||
|
0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08,
|
||||||
|
0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f,
|
||||||
|
0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64,
|
||||||
|
0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
|
||||||
|
0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
|
||||||
|
0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78,
|
||||||
|
0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65,
|
||||||
|
0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x42, 0x66, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e,
|
||||||
|
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
|
||||||
|
0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x49, 0x50, 0x4d, 0x61,
|
||||||
|
0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69,
|
||||||
|
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64,
|
||||||
|
0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61,
|
||||||
|
0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01,
|
||||||
|
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_xds_type_matcher_v3_ip_proto_rawDescOnce sync.Once
|
||||||
|
file_xds_type_matcher_v3_ip_proto_rawDescData = file_xds_type_matcher_v3_ip_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_xds_type_matcher_v3_ip_proto_rawDescGZIP() []byte {
|
||||||
|
file_xds_type_matcher_v3_ip_proto_rawDescOnce.Do(func() {
|
||||||
|
file_xds_type_matcher_v3_ip_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_ip_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_xds_type_matcher_v3_ip_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_xds_type_matcher_v3_ip_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||||
|
var file_xds_type_matcher_v3_ip_proto_goTypes = []interface{}{
|
||||||
|
(*IPMatcher)(nil), // 0: xds.type.matcher.v3.IPMatcher
|
||||||
|
(*IPMatcher_IPRangeMatcher)(nil), // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher
|
||||||
|
(*v3.CidrRange)(nil), // 2: xds.core.v3.CidrRange
|
||||||
|
(*Matcher_OnMatch)(nil), // 3: xds.type.matcher.v3.Matcher.OnMatch
|
||||||
|
}
|
||||||
|
var file_xds_type_matcher_v3_ip_proto_depIdxs = []int32{
|
||||||
|
1, // 0: xds.type.matcher.v3.IPMatcher.range_matchers:type_name -> xds.type.matcher.v3.IPMatcher.IPRangeMatcher
|
||||||
|
2, // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.ranges:type_name -> xds.core.v3.CidrRange
|
||||||
|
3, // 2: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
|
||||||
|
3, // [3:3] is the sub-list for method output_type
|
||||||
|
3, // [3:3] is the sub-list for method input_type
|
||||||
|
3, // [3:3] is the sub-list for extension type_name
|
||||||
|
3, // [3:3] is the sub-list for extension extendee
|
||||||
|
0, // [0:3] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_xds_type_matcher_v3_ip_proto_init() }
|
||||||
|
func file_xds_type_matcher_v3_ip_proto_init() {
|
||||||
|
if File_xds_type_matcher_v3_ip_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
file_xds_type_matcher_v3_matcher_proto_init()
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_xds_type_matcher_v3_ip_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*IPMatcher); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_xds_type_matcher_v3_ip_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||||
|
switch v := v.(*IPMatcher_IPRangeMatcher); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_xds_type_matcher_v3_ip_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 2,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_xds_type_matcher_v3_ip_proto_goTypes,
|
||||||
|
DependencyIndexes: file_xds_type_matcher_v3_ip_proto_depIdxs,
|
||||||
|
MessageInfos: file_xds_type_matcher_v3_ip_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_xds_type_matcher_v3_ip_proto = out.File
|
||||||
|
file_xds_type_matcher_v3_ip_proto_rawDesc = nil
|
||||||
|
file_xds_type_matcher_v3_ip_proto_goTypes = nil
|
||||||
|
file_xds_type_matcher_v3_ip_proto_depIdxs = nil
|
||||||
|
}
|
214
vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go
generated
vendored
Normal file
214
vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
||||||
|
// Code generated by protoc-gen-validate. DO NOT EDIT.
|
||||||
|
// source: xds/type/matcher/v3/ip.proto
|
||||||
|
|
||||||
|
package v3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/mail"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"unicode/utf8"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ensure the imports are used
|
||||||
|
var (
|
||||||
|
_ = bytes.MinRead
|
||||||
|
_ = errors.New("")
|
||||||
|
_ = fmt.Print
|
||||||
|
_ = utf8.UTFMax
|
||||||
|
_ = (*regexp.Regexp)(nil)
|
||||||
|
_ = (*strings.Reader)(nil)
|
||||||
|
_ = net.IPv4len
|
||||||
|
_ = time.Duration(0)
|
||||||
|
_ = (*url.URL)(nil)
|
||||||
|
_ = (*mail.Address)(nil)
|
||||||
|
_ = anypb.Any{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// Validate checks the field values on IPMatcher with the rules defined in the
|
||||||
|
// proto definition for this message. If any rules are violated, an error is returned.
|
||||||
|
func (m *IPMatcher) Validate() error {
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, item := range m.GetRangeMatchers() {
|
||||||
|
_, _ = idx, item
|
||||||
|
|
||||||
|
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
|
||||||
|
if err := v.Validate(); err != nil {
|
||||||
|
return IPMatcherValidationError{
|
||||||
|
field: fmt.Sprintf("RangeMatchers[%v]", idx),
|
||||||
|
reason: "embedded message failed validation",
|
||||||
|
cause: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPMatcherValidationError is the validation error returned by
|
||||||
|
// IPMatcher.Validate if the designated constraints aren't met.
|
||||||
|
type IPMatcherValidationError struct {
|
||||||
|
field string
|
||||||
|
reason string
|
||||||
|
cause error
|
||||||
|
key bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field function returns field value.
|
||||||
|
func (e IPMatcherValidationError) Field() string { return e.field }
|
||||||
|
|
||||||
|
// Reason function returns reason value.
|
||||||
|
func (e IPMatcherValidationError) Reason() string { return e.reason }
|
||||||
|
|
||||||
|
// Cause function returns cause value.
|
||||||
|
func (e IPMatcherValidationError) Cause() error { return e.cause }
|
||||||
|
|
||||||
|
// Key function returns key value.
|
||||||
|
func (e IPMatcherValidationError) Key() bool { return e.key }
|
||||||
|
|
||||||
|
// ErrorName returns error name.
|
||||||
|
func (e IPMatcherValidationError) ErrorName() string { return "IPMatcherValidationError" }
|
||||||
|
|
||||||
|
// Error satisfies the builtin error interface
|
||||||
|
func (e IPMatcherValidationError) Error() string {
|
||||||
|
cause := ""
|
||||||
|
if e.cause != nil {
|
||||||
|
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
key := ""
|
||||||
|
if e.key {
|
||||||
|
key = "key for "
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"invalid %sIPMatcher.%s: %s%s",
|
||||||
|
key,
|
||||||
|
e.field,
|
||||||
|
e.reason,
|
||||||
|
cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ error = IPMatcherValidationError{}
|
||||||
|
|
||||||
|
var _ interface {
|
||||||
|
Field() string
|
||||||
|
Reason() string
|
||||||
|
Key() bool
|
||||||
|
Cause() error
|
||||||
|
ErrorName() string
|
||||||
|
} = IPMatcherValidationError{}
|
||||||
|
|
||||||
|
// Validate checks the field values on IPMatcher_IPRangeMatcher with the rules
|
||||||
|
// defined in the proto definition for this message. If any rules are
|
||||||
|
// violated, an error is returned.
|
||||||
|
func (m *IPMatcher_IPRangeMatcher) Validate() error {
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(m.GetRanges()) < 1 {
|
||||||
|
return IPMatcher_IPRangeMatcherValidationError{
|
||||||
|
field: "Ranges",
|
||||||
|
reason: "value must contain at least 1 item(s)",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for idx, item := range m.GetRanges() {
|
||||||
|
_, _ = idx, item
|
||||||
|
|
||||||
|
if v, ok := interface{}(item).(interface{ Validate() error }); ok {
|
||||||
|
if err := v.Validate(); err != nil {
|
||||||
|
return IPMatcher_IPRangeMatcherValidationError{
|
||||||
|
field: fmt.Sprintf("Ranges[%v]", idx),
|
||||||
|
reason: "embedded message failed validation",
|
||||||
|
cause: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
|
||||||
|
if err := v.Validate(); err != nil {
|
||||||
|
return IPMatcher_IPRangeMatcherValidationError{
|
||||||
|
field: "OnMatch",
|
||||||
|
reason: "embedded message failed validation",
|
||||||
|
cause: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// no validation rules for Exclusive
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPMatcher_IPRangeMatcherValidationError is the validation error returned by
|
||||||
|
// IPMatcher_IPRangeMatcher.Validate if the designated constraints aren't met.
|
||||||
|
type IPMatcher_IPRangeMatcherValidationError struct {
|
||||||
|
field string
|
||||||
|
reason string
|
||||||
|
cause error
|
||||||
|
key bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Field function returns field value.
|
||||||
|
func (e IPMatcher_IPRangeMatcherValidationError) Field() string { return e.field }
|
||||||
|
|
||||||
|
// Reason function returns reason value.
|
||||||
|
func (e IPMatcher_IPRangeMatcherValidationError) Reason() string { return e.reason }
|
||||||
|
|
||||||
|
// Cause function returns cause value.
|
||||||
|
func (e IPMatcher_IPRangeMatcherValidationError) Cause() error { return e.cause }
|
||||||
|
|
||||||
|
// Key function returns key value.
|
||||||
|
func (e IPMatcher_IPRangeMatcherValidationError) Key() bool { return e.key }
|
||||||
|
|
||||||
|
// ErrorName returns error name.
|
||||||
|
func (e IPMatcher_IPRangeMatcherValidationError) ErrorName() string {
|
||||||
|
return "IPMatcher_IPRangeMatcherValidationError"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error satisfies the builtin error interface
|
||||||
|
func (e IPMatcher_IPRangeMatcherValidationError) Error() string {
|
||||||
|
cause := ""
|
||||||
|
if e.cause != nil {
|
||||||
|
cause = fmt.Sprintf(" | caused by: %v", e.cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
key := ""
|
||||||
|
if e.key {
|
||||||
|
key = "key for "
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"invalid %sIPMatcher_IPRangeMatcher.%s: %s%s",
|
||||||
|
key,
|
||||||
|
e.field,
|
||||||
|
e.reason,
|
||||||
|
cause)
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ error = IPMatcher_IPRangeMatcherValidationError{}
|
||||||
|
|
||||||
|
var _ interface {
|
||||||
|
Field() string
|
||||||
|
Reason() string
|
||||||
|
Key() bool
|
||||||
|
Cause() error
|
||||||
|
ErrorName() string
|
||||||
|
} = IPMatcher_IPRangeMatcherValidationError{}
|
2
vendor/golang.org/x/net/http2/server.go
generated
vendored
2
vendor/golang.org/x/net/http2/server.go
generated
vendored
|
@ -722,7 +722,7 @@ func (sc *serverConn) canonicalHeader(v string) string {
|
||||||
// maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
|
// maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
|
||||||
// entries in the canonHeader cache. This should be larger than the number
|
// entries in the canonHeader cache. This should be larger than the number
|
||||||
// of unique, uncommon header keys likely to be sent by the peer, while not
|
// of unique, uncommon header keys likely to be sent by the peer, while not
|
||||||
// so high as to permit unreaasonable memory usage if the peer sends an unbounded
|
// so high as to permit unreasonable memory usage if the peer sends an unbounded
|
||||||
// number of unique header keys.
|
// number of unique header keys.
|
||||||
const maxCachedCanonicalHeaders = 32
|
const maxCachedCanonicalHeaders = 32
|
||||||
if len(sc.canonHeader) < maxCachedCanonicalHeaders {
|
if len(sc.canonHeader) < maxCachedCanonicalHeaders {
|
||||||
|
|
2
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
2
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
|
@ -261,6 +261,7 @@ struct ltchars {
|
||||||
#include <linux/vm_sockets.h>
|
#include <linux/vm_sockets.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
#include <linux/watchdog.h>
|
#include <linux/watchdog.h>
|
||||||
|
#include <linux/wireguard.h>
|
||||||
|
|
||||||
#include <mtd/ubi-user.h>
|
#include <mtd/ubi-user.h>
|
||||||
#include <mtd/mtd-user.h>
|
#include <mtd/mtd-user.h>
|
||||||
|
@ -606,6 +607,7 @@ ccflags="$@"
|
||||||
$2 ~ /^MTD/ ||
|
$2 ~ /^MTD/ ||
|
||||||
$2 ~ /^OTP/ ||
|
$2 ~ /^OTP/ ||
|
||||||
$2 ~ /^MEM/ ||
|
$2 ~ /^MEM/ ||
|
||||||
|
$2 ~ /^WG/ ||
|
||||||
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
|
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
|
||||||
$2 ~ /^__WCOREFLAG$/ {next}
|
$2 ~ /^__WCOREFLAG$/ {next}
|
||||||
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
|
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
|
||||||
|
|
7
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
7
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
|
@ -2826,6 +2826,13 @@ const (
|
||||||
WDIOS_TEMPPANIC = 0x4
|
WDIOS_TEMPPANIC = 0x4
|
||||||
WDIOS_UNKNOWN = -0x1
|
WDIOS_UNKNOWN = -0x1
|
||||||
WEXITED = 0x4
|
WEXITED = 0x4
|
||||||
|
WGALLOWEDIP_A_MAX = 0x3
|
||||||
|
WGDEVICE_A_MAX = 0x8
|
||||||
|
WGPEER_A_MAX = 0xa
|
||||||
|
WG_CMD_MAX = 0x1
|
||||||
|
WG_GENL_NAME = "wireguard"
|
||||||
|
WG_GENL_VERSION = 0x1
|
||||||
|
WG_KEY_LEN = 0x20
|
||||||
WIN_ACKMEDIACHANGE = 0xdb
|
WIN_ACKMEDIACHANGE = 0xdb
|
||||||
WIN_CHECKPOWERMODE1 = 0xe5
|
WIN_CHECKPOWERMODE1 = 0xe5
|
||||||
WIN_CHECKPOWERMODE2 = 0x98
|
WIN_CHECKPOWERMODE2 = 0x98
|
||||||
|
|
75
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
75
vendor/golang.org/x/sys/unix/ztypes_linux.go
generated
vendored
|
@ -867,6 +867,7 @@ const (
|
||||||
CTRL_CMD_NEWMCAST_GRP = 0x7
|
CTRL_CMD_NEWMCAST_GRP = 0x7
|
||||||
CTRL_CMD_DELMCAST_GRP = 0x8
|
CTRL_CMD_DELMCAST_GRP = 0x8
|
||||||
CTRL_CMD_GETMCAST_GRP = 0x9
|
CTRL_CMD_GETMCAST_GRP = 0x9
|
||||||
|
CTRL_CMD_GETPOLICY = 0xa
|
||||||
CTRL_ATTR_UNSPEC = 0x0
|
CTRL_ATTR_UNSPEC = 0x0
|
||||||
CTRL_ATTR_FAMILY_ID = 0x1
|
CTRL_ATTR_FAMILY_ID = 0x1
|
||||||
CTRL_ATTR_FAMILY_NAME = 0x2
|
CTRL_ATTR_FAMILY_NAME = 0x2
|
||||||
|
@ -875,12 +876,19 @@ const (
|
||||||
CTRL_ATTR_MAXATTR = 0x5
|
CTRL_ATTR_MAXATTR = 0x5
|
||||||
CTRL_ATTR_OPS = 0x6
|
CTRL_ATTR_OPS = 0x6
|
||||||
CTRL_ATTR_MCAST_GROUPS = 0x7
|
CTRL_ATTR_MCAST_GROUPS = 0x7
|
||||||
|
CTRL_ATTR_POLICY = 0x8
|
||||||
|
CTRL_ATTR_OP_POLICY = 0x9
|
||||||
|
CTRL_ATTR_OP = 0xa
|
||||||
CTRL_ATTR_OP_UNSPEC = 0x0
|
CTRL_ATTR_OP_UNSPEC = 0x0
|
||||||
CTRL_ATTR_OP_ID = 0x1
|
CTRL_ATTR_OP_ID = 0x1
|
||||||
CTRL_ATTR_OP_FLAGS = 0x2
|
CTRL_ATTR_OP_FLAGS = 0x2
|
||||||
CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
|
CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
|
||||||
CTRL_ATTR_MCAST_GRP_NAME = 0x1
|
CTRL_ATTR_MCAST_GRP_NAME = 0x1
|
||||||
CTRL_ATTR_MCAST_GRP_ID = 0x2
|
CTRL_ATTR_MCAST_GRP_ID = 0x2
|
||||||
|
CTRL_ATTR_POLICY_UNSPEC = 0x0
|
||||||
|
CTRL_ATTR_POLICY_DO = 0x1
|
||||||
|
CTRL_ATTR_POLICY_DUMP = 0x2
|
||||||
|
CTRL_ATTR_POLICY_DUMP_MAX = 0x2
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -3968,3 +3976,70 @@ type MountAttr struct {
|
||||||
Propagation uint64
|
Propagation uint64
|
||||||
Userns_fd uint64
|
Userns_fd uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
WG_CMD_GET_DEVICE = 0x0
|
||||||
|
WG_CMD_SET_DEVICE = 0x1
|
||||||
|
WGDEVICE_F_REPLACE_PEERS = 0x1
|
||||||
|
WGDEVICE_A_UNSPEC = 0x0
|
||||||
|
WGDEVICE_A_IFINDEX = 0x1
|
||||||
|
WGDEVICE_A_IFNAME = 0x2
|
||||||
|
WGDEVICE_A_PRIVATE_KEY = 0x3
|
||||||
|
WGDEVICE_A_PUBLIC_KEY = 0x4
|
||||||
|
WGDEVICE_A_FLAGS = 0x5
|
||||||
|
WGDEVICE_A_LISTEN_PORT = 0x6
|
||||||
|
WGDEVICE_A_FWMARK = 0x7
|
||||||
|
WGDEVICE_A_PEERS = 0x8
|
||||||
|
WGPEER_F_REMOVE_ME = 0x1
|
||||||
|
WGPEER_F_REPLACE_ALLOWEDIPS = 0x2
|
||||||
|
WGPEER_F_UPDATE_ONLY = 0x4
|
||||||
|
WGPEER_A_UNSPEC = 0x0
|
||||||
|
WGPEER_A_PUBLIC_KEY = 0x1
|
||||||
|
WGPEER_A_PRESHARED_KEY = 0x2
|
||||||
|
WGPEER_A_FLAGS = 0x3
|
||||||
|
WGPEER_A_ENDPOINT = 0x4
|
||||||
|
WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL = 0x5
|
||||||
|
WGPEER_A_LAST_HANDSHAKE_TIME = 0x6
|
||||||
|
WGPEER_A_RX_BYTES = 0x7
|
||||||
|
WGPEER_A_TX_BYTES = 0x8
|
||||||
|
WGPEER_A_ALLOWEDIPS = 0x9
|
||||||
|
WGPEER_A_PROTOCOL_VERSION = 0xa
|
||||||
|
WGALLOWEDIP_A_UNSPEC = 0x0
|
||||||
|
WGALLOWEDIP_A_FAMILY = 0x1
|
||||||
|
WGALLOWEDIP_A_IPADDR = 0x2
|
||||||
|
WGALLOWEDIP_A_CIDR_MASK = 0x3
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
NL_ATTR_TYPE_INVALID = 0x0
|
||||||
|
NL_ATTR_TYPE_FLAG = 0x1
|
||||||
|
NL_ATTR_TYPE_U8 = 0x2
|
||||||
|
NL_ATTR_TYPE_U16 = 0x3
|
||||||
|
NL_ATTR_TYPE_U32 = 0x4
|
||||||
|
NL_ATTR_TYPE_U64 = 0x5
|
||||||
|
NL_ATTR_TYPE_S8 = 0x6
|
||||||
|
NL_ATTR_TYPE_S16 = 0x7
|
||||||
|
NL_ATTR_TYPE_S32 = 0x8
|
||||||
|
NL_ATTR_TYPE_S64 = 0x9
|
||||||
|
NL_ATTR_TYPE_BINARY = 0xa
|
||||||
|
NL_ATTR_TYPE_STRING = 0xb
|
||||||
|
NL_ATTR_TYPE_NUL_STRING = 0xc
|
||||||
|
NL_ATTR_TYPE_NESTED = 0xd
|
||||||
|
NL_ATTR_TYPE_NESTED_ARRAY = 0xe
|
||||||
|
NL_ATTR_TYPE_BITFIELD32 = 0xf
|
||||||
|
|
||||||
|
NL_POLICY_TYPE_ATTR_UNSPEC = 0x0
|
||||||
|
NL_POLICY_TYPE_ATTR_TYPE = 0x1
|
||||||
|
NL_POLICY_TYPE_ATTR_MIN_VALUE_S = 0x2
|
||||||
|
NL_POLICY_TYPE_ATTR_MAX_VALUE_S = 0x3
|
||||||
|
NL_POLICY_TYPE_ATTR_MIN_VALUE_U = 0x4
|
||||||
|
NL_POLICY_TYPE_ATTR_MAX_VALUE_U = 0x5
|
||||||
|
NL_POLICY_TYPE_ATTR_MIN_LENGTH = 0x6
|
||||||
|
NL_POLICY_TYPE_ATTR_MAX_LENGTH = 0x7
|
||||||
|
NL_POLICY_TYPE_ATTR_POLICY_IDX = 0x8
|
||||||
|
NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE = 0x9
|
||||||
|
NL_POLICY_TYPE_ATTR_BITFIELD32_MASK = 0xa
|
||||||
|
NL_POLICY_TYPE_ATTR_PAD = 0xb
|
||||||
|
NL_POLICY_TYPE_ATTR_MASK = 0xc
|
||||||
|
NL_POLICY_TYPE_ATTR_MAX = 0xc
|
||||||
|
)
|
||||||
|
|
37
vendor/golang.org/x/sys/windows/exec_windows.go
generated
vendored
37
vendor/golang.org/x/sys/windows/exec_windows.go
generated
vendored
|
@ -9,8 +9,6 @@ package windows
|
||||||
import (
|
import (
|
||||||
errorspkg "errors"
|
errorspkg "errors"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"golang.org/x/sys/internal/unsafeheader"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// EscapeArg rewrites command line argument s as prescribed
|
// EscapeArg rewrites command line argument s as prescribed
|
||||||
|
@ -147,8 +145,12 @@ func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListCo
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
alloc, err := LocalAlloc(LMEM_FIXED, uint32(size))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
// size is guaranteed to be ≥1 by InitializeProcThreadAttributeList.
|
// size is guaranteed to be ≥1 by InitializeProcThreadAttributeList.
|
||||||
al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(&make([]byte, size)[0]))}
|
al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(alloc))}
|
||||||
err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size)
|
err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -157,36 +159,17 @@ func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListCo
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute.
|
// Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute.
|
||||||
// Note that the value passed to this function will be copied into memory
|
|
||||||
// allocated by LocalAlloc, the contents of which should not contain any
|
|
||||||
// Go-managed pointers, even if the passed value itself is a Go-managed
|
|
||||||
// pointer.
|
|
||||||
func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error {
|
func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error {
|
||||||
alloc, err := LocalAlloc(LMEM_FIXED, uint32(size))
|
al.pointers = append(al.pointers, value)
|
||||||
if err != nil {
|
return updateProcThreadAttribute(al.data, 0, attribute, value, size, nil, nil)
|
||||||
return err
|
|
||||||
}
|
|
||||||
var src, dst []byte
|
|
||||||
hdr := (*unsafeheader.Slice)(unsafe.Pointer(&src))
|
|
||||||
hdr.Data = value
|
|
||||||
hdr.Cap = int(size)
|
|
||||||
hdr.Len = int(size)
|
|
||||||
hdr = (*unsafeheader.Slice)(unsafe.Pointer(&dst))
|
|
||||||
hdr.Data = unsafe.Pointer(alloc)
|
|
||||||
hdr.Cap = int(size)
|
|
||||||
hdr.Len = int(size)
|
|
||||||
copy(dst, src)
|
|
||||||
al.heapAllocations = append(al.heapAllocations, alloc)
|
|
||||||
return updateProcThreadAttribute(al.data, 0, attribute, unsafe.Pointer(alloc), size, nil, nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete frees ProcThreadAttributeList's resources.
|
// Delete frees ProcThreadAttributeList's resources.
|
||||||
func (al *ProcThreadAttributeListContainer) Delete() {
|
func (al *ProcThreadAttributeListContainer) Delete() {
|
||||||
deleteProcThreadAttributeList(al.data)
|
deleteProcThreadAttributeList(al.data)
|
||||||
for i := range al.heapAllocations {
|
LocalFree(Handle(unsafe.Pointer(al.data)))
|
||||||
LocalFree(Handle(al.heapAllocations[i]))
|
al.data = nil
|
||||||
}
|
al.pointers = nil
|
||||||
al.heapAllocations = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx.
|
// List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx.
|
||||||
|
|
39
vendor/golang.org/x/sys/windows/types_windows.go
generated
vendored
39
vendor/golang.org/x/sys/windows/types_windows.go
generated
vendored
|
@ -939,7 +939,7 @@ type ProcThreadAttributeList struct{}
|
||||||
|
|
||||||
type ProcThreadAttributeListContainer struct {
|
type ProcThreadAttributeListContainer struct {
|
||||||
data *ProcThreadAttributeList
|
data *ProcThreadAttributeList
|
||||||
heapAllocations []uintptr
|
pointers []unsafe.Pointer
|
||||||
}
|
}
|
||||||
|
|
||||||
type ProcessInformation struct {
|
type ProcessInformation struct {
|
||||||
|
@ -2749,6 +2749,43 @@ type PROCESS_BASIC_INFORMATION struct {
|
||||||
InheritedFromUniqueProcessId uintptr
|
InheritedFromUniqueProcessId uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SYSTEM_PROCESS_INFORMATION struct {
|
||||||
|
NextEntryOffset uint32
|
||||||
|
NumberOfThreads uint32
|
||||||
|
WorkingSetPrivateSize int64
|
||||||
|
HardFaultCount uint32
|
||||||
|
NumberOfThreadsHighWatermark uint32
|
||||||
|
CycleTime uint64
|
||||||
|
CreateTime int64
|
||||||
|
UserTime int64
|
||||||
|
KernelTime int64
|
||||||
|
ImageName NTUnicodeString
|
||||||
|
BasePriority int32
|
||||||
|
UniqueProcessID uintptr
|
||||||
|
InheritedFromUniqueProcessID uintptr
|
||||||
|
HandleCount uint32
|
||||||
|
SessionID uint32
|
||||||
|
UniqueProcessKey *uint32
|
||||||
|
PeakVirtualSize uintptr
|
||||||
|
VirtualSize uintptr
|
||||||
|
PageFaultCount uint32
|
||||||
|
PeakWorkingSetSize uintptr
|
||||||
|
WorkingSetSize uintptr
|
||||||
|
QuotaPeakPagedPoolUsage uintptr
|
||||||
|
QuotaPagedPoolUsage uintptr
|
||||||
|
QuotaPeakNonPagedPoolUsage uintptr
|
||||||
|
QuotaNonPagedPoolUsage uintptr
|
||||||
|
PagefileUsage uintptr
|
||||||
|
PeakPagefileUsage uintptr
|
||||||
|
PrivatePageCount uintptr
|
||||||
|
ReadOperationCount int64
|
||||||
|
WriteOperationCount int64
|
||||||
|
OtherOperationCount int64
|
||||||
|
ReadTransferCount int64
|
||||||
|
WriteTransferCount int64
|
||||||
|
OtherTransferCount int64
|
||||||
|
}
|
||||||
|
|
||||||
// SystemInformationClasses for NtQuerySystemInformation and NtSetSystemInformation
|
// SystemInformationClasses for NtQuerySystemInformation and NtSetSystemInformation
|
||||||
const (
|
const (
|
||||||
SystemBasicInformation = iota
|
SystemBasicInformation = iota
|
||||||
|
|
4
vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json
generated
vendored
4
vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-api.json
generated
vendored
|
@ -226,14 +226,14 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"revision": "20210813",
|
"revision": "20211203",
|
||||||
"rootUrl": "https://iamcredentials.googleapis.com/",
|
"rootUrl": "https://iamcredentials.googleapis.com/",
|
||||||
"schemas": {
|
"schemas": {
|
||||||
"GenerateAccessTokenRequest": {
|
"GenerateAccessTokenRequest": {
|
||||||
"id": "GenerateAccessTokenRequest",
|
"id": "GenerateAccessTokenRequest",
|
||||||
"properties": {
|
"properties": {
|
||||||
"delegates": {
|
"delegates": {
|
||||||
"description": "The sequence of service accounts in a delegation chain. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.",
|
"description": "The sequence of service accounts in a delegation chain. This field is required for [delegated requests](https://cloud.google.com/iam/help/credentials/delegated-request). For [direct requests](https://cloud.google.com/iam/help/credentials/direct-request), which are more common, do not specify this field. Each service account must be granted the `roles/iam.serviceAccountTokenCreator` role on its next service account in the chain. The last service account in the chain must be granted the `roles/iam.serviceAccountTokenCreator` role on the service account that is specified in the `name` field of the request. The delegates must have the following format: `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-` wildcard character is required; replacing it with a project ID is invalid.",
|
||||||
"items": {
|
"items": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
|
|
24
vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
generated
vendored
24
vendor/google.golang.org/api/iamcredentials/v1/iamcredentials-gen.go
generated
vendored
|
@ -159,12 +159,16 @@ type ProjectsServiceAccountsService struct {
|
||||||
|
|
||||||
type GenerateAccessTokenRequest struct {
|
type GenerateAccessTokenRequest struct {
|
||||||
// Delegates: The sequence of service accounts in a delegation chain.
|
// Delegates: The sequence of service accounts in a delegation chain.
|
||||||
// Each service account must be granted the
|
// This field is required for delegated requests
|
||||||
// `roles/iam.serviceAccountTokenCreator` role on its next service
|
// (https://cloud.google.com/iam/help/credentials/delegated-request).
|
||||||
// account in the chain. The last service account in the chain must be
|
// For direct requests
|
||||||
// granted the `roles/iam.serviceAccountTokenCreator` role on the
|
// (https://cloud.google.com/iam/help/credentials/direct-request), which
|
||||||
// service account that is specified in the `name` field of the request.
|
// are more common, do not specify this field. Each service account must
|
||||||
// The delegates must have the following format:
|
// be granted the `roles/iam.serviceAccountTokenCreator` role on its
|
||||||
|
// next service account in the chain. The last service account in the
|
||||||
|
// chain must be granted the `roles/iam.serviceAccountTokenCreator` role
|
||||||
|
// on the service account that is specified in the `name` field of the
|
||||||
|
// request. The delegates must have the following format:
|
||||||
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
|
// `projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}`. The `-`
|
||||||
// wildcard character is required; replacing it with a project ID is
|
// wildcard character is required; replacing it with a project ID is
|
||||||
// invalid.
|
// invalid.
|
||||||
|
@ -553,7 +557,7 @@ func (c *ProjectsServiceAccountsGenerateAccessTokenCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsServiceAccountsGenerateAccessTokenCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -700,7 +704,7 @@ func (c *ProjectsServiceAccountsGenerateIdTokenCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsServiceAccountsGenerateIdTokenCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -847,7 +851,7 @@ func (c *ProjectsServiceAccountsSignBlobCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsServiceAccountsSignBlobCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -994,7 +998,7 @@ func (c *ProjectsServiceAccountsSignJwtCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsServiceAccountsSignJwtCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
|
67
vendor/google.golang.org/api/internal/gensupport/resumable.go
generated
vendored
67
vendor/google.golang.org/api/internal/gensupport/resumable.go
generated
vendored
|
@ -12,32 +12,6 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
gax "github.com/googleapis/gax-go/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their
|
|
||||||
// own implementation.
|
|
||||||
type Backoff interface {
|
|
||||||
Pause() time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// These are declared as global variables so that tests can overwrite them.
|
|
||||||
var (
|
|
||||||
retryDeadline = 32 * time.Second
|
|
||||||
backoff = func() Backoff {
|
|
||||||
return &gax.Backoff{Initial: 100 * time.Millisecond}
|
|
||||||
}
|
|
||||||
// isRetryable is a platform-specific hook, specified in retryable_linux.go
|
|
||||||
syscallRetryable func(error) bool = func(err error) bool { return false }
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// statusTooManyRequests is returned by the storage API if the
|
|
||||||
// per-project limits have been temporarily exceeded. The request
|
|
||||||
// should be retried.
|
|
||||||
// https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes
|
|
||||||
statusTooManyRequests = 429
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// ResumableUpload is used by the generated APIs to provide resumable uploads.
|
// ResumableUpload is used by the generated APIs to provide resumable uploads.
|
||||||
|
@ -57,6 +31,9 @@ type ResumableUpload struct {
|
||||||
|
|
||||||
// Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
|
// Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
|
||||||
Callback func(int64)
|
Callback func(int64)
|
||||||
|
|
||||||
|
// Retry optionally configures retries for requests made against the upload.
|
||||||
|
Retry *RetryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Progress returns the number of bytes uploaded at this point.
|
// Progress returns the number of bytes uploaded at this point.
|
||||||
|
@ -176,13 +153,15 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
|
||||||
}
|
}
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
// Configure retryable error criteria.
|
||||||
|
errorFunc := rx.Retry.errorFunc()
|
||||||
|
|
||||||
// Send all chunks.
|
// Send all chunks.
|
||||||
for {
|
for {
|
||||||
var pause time.Duration
|
var pause time.Duration
|
||||||
|
|
||||||
// Each chunk gets its own initialized-at-zero retry.
|
// Each chunk gets its own initialized-at-zero backoff.
|
||||||
bo := backoff()
|
bo := rx.Retry.backoff()
|
||||||
quitAfter := time.After(retryDeadline)
|
quitAfter := time.After(retryDeadline)
|
||||||
|
|
||||||
// Retry loop for a single chunk.
|
// Retry loop for a single chunk.
|
||||||
|
@ -206,7 +185,7 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we should retry the request.
|
// Check if we should retry the request.
|
||||||
if !shouldRetry(status, err) {
|
if !errorFunc(status, err) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -226,33 +205,3 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
|
||||||
return prepareReturn(resp, err)
|
return prepareReturn(resp, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldRetry indicates whether an error is retryable for the purposes of this
|
|
||||||
// package, following guidance from
|
|
||||||
// https://cloud.google.com/storage/docs/exponential-backoff .
|
|
||||||
func shouldRetry(status int, err error) bool {
|
|
||||||
if 500 <= status && status <= 599 {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if status == statusTooManyRequests {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err == io.ErrUnexpectedEOF {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Transient network errors should be retried.
|
|
||||||
if syscallRetryable(err) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err, ok := err.(interface{ Temporary() bool }); ok {
|
|
||||||
if err.Temporary() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If Go 1.13 error unwrapping is available, use this to examine wrapped
|
|
||||||
// errors.
|
|
||||||
if err, ok := err.(interface{ Unwrap() error }); ok {
|
|
||||||
return shouldRetry(status, err.Unwrap())
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
106
vendor/google.golang.org/api/internal/gensupport/retry.go
generated
vendored
Normal file
106
vendor/google.golang.org/api/internal/gensupport/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
||||||
|
// Copyright 2021 Google LLC.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package gensupport
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/googleapis/gax-go/v2"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their
|
||||||
|
// own implementation.
|
||||||
|
type Backoff interface {
|
||||||
|
Pause() time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are declared as global variables so that tests can overwrite them.
|
||||||
|
var (
|
||||||
|
// Per-chunk deadline for resumable uploads.
|
||||||
|
retryDeadline = 32 * time.Second
|
||||||
|
// Default backoff timer.
|
||||||
|
backoff = func() Backoff {
|
||||||
|
return &gax.Backoff{Initial: 100 * time.Millisecond}
|
||||||
|
}
|
||||||
|
// syscallRetryable is a platform-specific hook, specified in retryable_linux.go
|
||||||
|
syscallRetryable func(error) bool = func(err error) bool { return false }
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// statusTooManyRequests is returned by the storage API if the
|
||||||
|
// per-project limits have been temporarily exceeded. The request
|
||||||
|
// should be retried.
|
||||||
|
// https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes
|
||||||
|
statusTooManyRequests = 429
|
||||||
|
)
|
||||||
|
|
||||||
|
// shouldRetry indicates whether an error is retryable for the purposes of this
|
||||||
|
// package, unless a ShouldRetry func is specified by the RetryConfig instead.
|
||||||
|
// It follows guidance from
|
||||||
|
// https://cloud.google.com/storage/docs/exponential-backoff .
|
||||||
|
func shouldRetry(status int, err error) bool {
|
||||||
|
if 500 <= status && status <= 599 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if status == statusTooManyRequests {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err == io.ErrUnexpectedEOF {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Transient network errors should be retried.
|
||||||
|
if syscallRetryable(err) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err, ok := err.(interface{ Temporary() bool }); ok {
|
||||||
|
if err.Temporary() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If Go 1.13 error unwrapping is available, use this to examine wrapped
|
||||||
|
// errors.
|
||||||
|
if err, ok := err.(interface{ Unwrap() error }); ok {
|
||||||
|
return shouldRetry(status, err.Unwrap())
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetryConfig allows configuration of backoff timing and retryable errors.
|
||||||
|
type RetryConfig struct {
|
||||||
|
Backoff *gax.Backoff
|
||||||
|
ShouldRetry func(err error) bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a new backoff object based on the configured values.
|
||||||
|
func (r *RetryConfig) backoff() Backoff {
|
||||||
|
if r == nil || r.Backoff == nil {
|
||||||
|
return backoff()
|
||||||
|
}
|
||||||
|
return &gax.Backoff{
|
||||||
|
Initial: r.Backoff.Initial,
|
||||||
|
Max: r.Backoff.Max,
|
||||||
|
Multiplier: r.Backoff.Multiplier,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is kind of hacky; it is necessary because ShouldRetry expects to
|
||||||
|
// handle HTTP errors via googleapi.Error, but the error has not yet been
|
||||||
|
// wrapped with a googleapi.Error at this layer, and the ErrorFunc type
|
||||||
|
// in the manual layer does not pass in a status explicitly as it does
|
||||||
|
// here. So, we must wrap error status codes in a googleapi.Error so that
|
||||||
|
// ShouldRetry can parse this correctly.
|
||||||
|
func (r *RetryConfig) errorFunc() func(status int, err error) bool {
|
||||||
|
if r == nil || r.ShouldRetry == nil {
|
||||||
|
return shouldRetry
|
||||||
|
}
|
||||||
|
return func(status int, err error) bool {
|
||||||
|
if status >= 400 {
|
||||||
|
return r.ShouldRetry(&googleapi.Error{Code: status})
|
||||||
|
}
|
||||||
|
return r.ShouldRetry(err)
|
||||||
|
}
|
||||||
|
}
|
23
vendor/google.golang.org/api/internal/gensupport/send.go
generated
vendored
23
vendor/google.golang.org/api/internal/gensupport/send.go
generated
vendored
|
@ -10,6 +10,8 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/googleapis/gax-go/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SendRequest sends a single HTTP request using the given client.
|
// SendRequest sends a single HTTP request using the given client.
|
||||||
|
@ -50,7 +52,7 @@ func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Re
|
||||||
// If ctx is non-nil, it calls all hooks, then sends the request with
|
// If ctx is non-nil, it calls all hooks, then sends the request with
|
||||||
// req.WithContext, then calls any functions returned by the hooks in
|
// req.WithContext, then calls any functions returned by the hooks in
|
||||||
// reverse order.
|
// reverse order.
|
||||||
func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) {
|
||||||
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
|
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
|
||||||
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
|
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
|
||||||
if _, ok := req.Header["Accept-Encoding"]; ok {
|
if _, ok := req.Header["Accept-Encoding"]; ok {
|
||||||
|
@ -59,10 +61,10 @@ func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Re
|
||||||
if ctx == nil {
|
if ctx == nil {
|
||||||
return client.Do(req)
|
return client.Do(req)
|
||||||
}
|
}
|
||||||
return sendAndRetry(ctx, client, req)
|
return sendAndRetry(ctx, client, req, retry)
|
||||||
}
|
}
|
||||||
|
|
||||||
func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request, retry *RetryConfig) (*http.Response, error) {
|
||||||
if client == nil {
|
if client == nil {
|
||||||
client = http.DefaultClient
|
client = http.DefaultClient
|
||||||
}
|
}
|
||||||
|
@ -72,7 +74,18 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (
|
||||||
|
|
||||||
// Loop to retry the request, up to the context deadline.
|
// Loop to retry the request, up to the context deadline.
|
||||||
var pause time.Duration
|
var pause time.Duration
|
||||||
bo := backoff()
|
var bo Backoff
|
||||||
|
if retry != nil && retry.Backoff != nil {
|
||||||
|
bo = &gax.Backoff{
|
||||||
|
Initial: retry.Backoff.Initial,
|
||||||
|
Max: retry.Backoff.Max,
|
||||||
|
Multiplier: retry.Backoff.Multiplier,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bo = backoff()
|
||||||
|
}
|
||||||
|
|
||||||
|
var errorFunc = retry.errorFunc()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
@ -96,7 +109,7 @@ func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (
|
||||||
// Check if we can retry the request. A retry can only be done if the error
|
// Check if we can retry the request. A retry can only be done if the error
|
||||||
// is retryable and the request body can be re-created using GetBody (this
|
// is retryable and the request body can be re-created using GetBody (this
|
||||||
// will not be possible if the body was unbuffered).
|
// will not be possible if the body was unbuffered).
|
||||||
if req.GetBody == nil || !shouldRetry(status, err) {
|
if req.GetBody == nil || !errorFunc(status, err) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
var errBody error
|
var errBody error
|
||||||
|
|
135
vendor/google.golang.org/api/storage/v1/storage-gen.go
generated
vendored
135
vendor/google.golang.org/api/storage/v1/storage-gen.go
generated
vendored
|
@ -55,6 +55,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/googleapis/gax-go/v2"
|
||||||
googleapi "google.golang.org/api/googleapi"
|
googleapi "google.golang.org/api/googleapi"
|
||||||
gensupport "google.golang.org/api/internal/gensupport"
|
gensupport "google.golang.org/api/internal/gensupport"
|
||||||
option "google.golang.org/api/option"
|
option "google.golang.org/api/option"
|
||||||
|
@ -2520,7 +2521,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -2673,7 +2674,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -2842,7 +2843,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -3017,7 +3018,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -3183,7 +3184,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -3362,7 +3363,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -3550,7 +3551,7 @@ func (c *BucketsDeleteCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -3731,7 +3732,7 @@ func (c *BucketsGetCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -3939,7 +3940,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -4158,7 +4159,7 @@ func (c *BucketsInsertCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -4417,7 +4418,7 @@ func (c *BucketsListCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -4631,7 +4632,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -4868,7 +4869,7 @@ func (c *BucketsPatchCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -5099,7 +5100,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -5277,7 +5278,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -5519,7 +5520,7 @@ func (c *BucketsUpdateCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -5731,7 +5732,7 @@ func (c *ChannelsStopCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -5853,7 +5854,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -6006,7 +6007,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
|
func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -6176,7 +6177,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
|
func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -6368,7 +6369,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
|
func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -6546,7 +6547,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
|
func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -6725,7 +6726,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -6900,7 +6901,7 @@ func (c *NotificationsDeleteCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -7051,7 +7052,7 @@ func (c *NotificationsGetCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) {
|
func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -7223,7 +7224,7 @@ func (c *NotificationsInsertCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) {
|
func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -7400,7 +7401,7 @@ func (c *NotificationsListCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) {
|
func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -7580,7 +7581,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -7759,7 +7760,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -7954,7 +7955,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -8155,7 +8156,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -8347,7 +8348,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -8552,7 +8553,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -8795,7 +8796,7 @@ func (c *ObjectsComposeCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -9151,7 +9152,7 @@ func (c *ObjectsCopyCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -9483,7 +9484,7 @@ func (c *ObjectsDeleteCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -9720,7 +9721,7 @@ func (c *ObjectsGetCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -9974,7 +9975,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -10099,6 +10100,7 @@ type ObjectsInsertCall struct {
|
||||||
mediaInfo_ *gensupport.MediaInfo
|
mediaInfo_ *gensupport.MediaInfo
|
||||||
ctx_ context.Context
|
ctx_ context.Context
|
||||||
header_ http.Header
|
header_ http.Header
|
||||||
|
retry *gensupport.RetryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Insert: Stores a new object and metadata.
|
// Insert: Stores a new object and metadata.
|
||||||
|
@ -10265,6 +10267,29 @@ func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *Objec
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithRetry causes the library to retry the initial request of the upload
|
||||||
|
// (for resumable uploads) or the entire upload (for multipart uploads) if
|
||||||
|
// a transient error occurs. This is contingent on ChunkSize being > 0 (so
|
||||||
|
// that the input data may be buffered). The backoff argument will be used to
|
||||||
|
// determine exponential backoff timing, and the errorFunc is used to determine
|
||||||
|
// which errors are considered retryable. By default, exponetial backoff will be
|
||||||
|
// applied using gax defaults, and the following errors are retried:
|
||||||
|
//
|
||||||
|
// - HTTP responses with codes 429, 502, 503, and 504.
|
||||||
|
//
|
||||||
|
// - Transient network errors such as connection reset and io.ErrUnexpectedEOF.
|
||||||
|
//
|
||||||
|
// - Errors which are considered transient using the Temporary() interface.
|
||||||
|
//
|
||||||
|
// - Wrapped versions of these errors.
|
||||||
|
func (c *ObjectsInsertCall) WithRetry(bo *gax.Backoff, errorFunc func(err error) bool) *ObjectsInsertCall {
|
||||||
|
c.retry = &gensupport.RetryConfig{
|
||||||
|
Backoff: bo,
|
||||||
|
ShouldRetry: errorFunc,
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
// Fields allows partial responses to be retrieved. See
|
// Fields allows partial responses to be retrieved. See
|
||||||
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
|
// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
|
||||||
// for more information.
|
// for more information.
|
||||||
|
@ -10294,7 +10319,7 @@ func (c *ObjectsInsertCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -10328,7 +10353,10 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
|
||||||
googleapi.Expand(req.URL, map[string]string{
|
googleapi.Expand(req.URL, map[string]string{
|
||||||
"bucket": c.bucket,
|
"bucket": c.bucket,
|
||||||
})
|
})
|
||||||
return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req)
|
if c.retry != nil {
|
||||||
|
return gensupport.SendRequestWithRetry(c.ctx_, c.s.client, req, c.retry)
|
||||||
|
}
|
||||||
|
return gensupport.SendRequest(c.ctx_, c.s.client, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do executes the "storage.objects.insert" call.
|
// Do executes the "storage.objects.insert" call.
|
||||||
|
@ -10361,6 +10389,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
|
||||||
if rx != nil {
|
if rx != nil {
|
||||||
rx.Client = c.s.client
|
rx.Client = c.s.client
|
||||||
rx.UserAgent = c.s.userAgent()
|
rx.UserAgent = c.s.userAgent()
|
||||||
|
rx.Retry = c.retry
|
||||||
ctx := c.ctx_
|
ctx := c.ctx_
|
||||||
if ctx == nil {
|
if ctx == nil {
|
||||||
ctx = context.TODO()
|
ctx = context.TODO()
|
||||||
|
@ -10669,7 +10698,7 @@ func (c *ObjectsListCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -10990,7 +11019,7 @@ func (c *ObjectsPatchCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -11395,7 +11424,7 @@ func (c *ObjectsRewriteCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -11702,7 +11731,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -11907,7 +11936,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -12172,7 +12201,7 @@ func (c *ObjectsUpdateCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -12492,7 +12521,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -12711,7 +12740,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -12864,7 +12893,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -13003,7 +13032,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -13205,7 +13234,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -13404,7 +13433,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
@ -13583,7 +13612,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header {
|
||||||
|
|
||||||
func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) {
|
func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) {
|
||||||
reqHeaders := make(http.Header)
|
reqHeaders := make(http.Header)
|
||||||
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211207")
|
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20211212")
|
||||||
for k, v := range c.header_ {
|
for k, v := range c.header_ {
|
||||||
reqHeaders[k] = v
|
reqHeaders[k] = v
|
||||||
}
|
}
|
||||||
|
|
29
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
29
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
|
@ -174,25 +174,32 @@ type ClientConn interface {
|
||||||
|
|
||||||
// BuildOptions contains additional information for Build.
|
// BuildOptions contains additional information for Build.
|
||||||
type BuildOptions struct {
|
type BuildOptions struct {
|
||||||
// DialCreds is the transport credential the Balancer implementation can
|
// DialCreds is the transport credentials to use when communicating with a
|
||||||
// use to dial to a remote load balancer server. The Balancer implementations
|
// remote load balancer server. Balancer implementations which do not
|
||||||
// can ignore this if it does not need to talk to another party securely.
|
// communicate with a remote load balancer server can ignore this field.
|
||||||
DialCreds credentials.TransportCredentials
|
DialCreds credentials.TransportCredentials
|
||||||
// CredsBundle is the credentials bundle that the Balancer can use.
|
// CredsBundle is the credentials bundle to use when communicating with a
|
||||||
|
// remote load balancer server. Balancer implementations which do not
|
||||||
|
// communicate with a remote load balancer server can ignore this field.
|
||||||
CredsBundle credentials.Bundle
|
CredsBundle credentials.Bundle
|
||||||
// Dialer is the custom dialer the Balancer implementation can use to dial
|
// Dialer is the custom dialer to use when communicating with a remote load
|
||||||
// to a remote load balancer server. The Balancer implementations
|
// balancer server. Balancer implementations which do not communicate with a
|
||||||
// can ignore this if it doesn't need to talk to remote balancer.
|
// remote load balancer server can ignore this field.
|
||||||
Dialer func(context.Context, string) (net.Conn, error)
|
Dialer func(context.Context, string) (net.Conn, error)
|
||||||
// ChannelzParentID is the entity parent's channelz unique identification number.
|
// Authority is the server name to use as part of the authentication
|
||||||
|
// handshake when communicating with a remote load balancer server. Balancer
|
||||||
|
// implementations which do not communicate with a remote load balancer
|
||||||
|
// server can ignore this field.
|
||||||
|
Authority string
|
||||||
|
// ChannelzParentID is the parent ClientConn's channelz ID.
|
||||||
ChannelzParentID int64
|
ChannelzParentID int64
|
||||||
// CustomUserAgent is the custom user agent set on the parent ClientConn.
|
// CustomUserAgent is the custom user agent set on the parent ClientConn.
|
||||||
// The balancer should set the same custom user agent if it creates a
|
// The balancer should set the same custom user agent if it creates a
|
||||||
// ClientConn.
|
// ClientConn.
|
||||||
CustomUserAgent string
|
CustomUserAgent string
|
||||||
// Target contains the parsed address info of the dial target. It is the same resolver.Target as
|
// Target contains the parsed address info of the dial target. It is the
|
||||||
// passed to the resolver.
|
// same resolver.Target as passed to the resolver. See the documentation for
|
||||||
// See the documentation for the resolver.Target type for details about what it contains.
|
// the resolver.Target type for details about what it contains.
|
||||||
Target resolver.Target
|
Target resolver.Target
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,6 +17,8 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package weightedtarget implements the weighted_target balancer.
|
// Package weightedtarget implements the weighted_target balancer.
|
||||||
|
//
|
||||||
|
// All APIs in this package are experimental.
|
||||||
package weightedtarget
|
package weightedtarget
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -24,14 +26,14 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/balancer/weightedtarget/weightedaggregator"
|
||||||
|
"google.golang.org/grpc/internal/balancergroup"
|
||||||
"google.golang.org/grpc/internal/grpclog"
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/internal/hierarchy"
|
"google.golang.org/grpc/internal/hierarchy"
|
||||||
"google.golang.org/grpc/internal/pretty"
|
"google.golang.org/grpc/internal/pretty"
|
||||||
"google.golang.org/grpc/internal/wrr"
|
"google.golang.org/grpc/internal/wrr"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/balancergroup"
|
|
||||||
"google.golang.org/grpc/xds/internal/balancer/weightedtarget/weightedaggregator"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Name is the name of the weighted_target balancer.
|
// Name is the name of the weighted_target balancer.
|
||||||
|
@ -52,7 +54,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba
|
||||||
b.logger = prefixLogger(b)
|
b.logger = prefixLogger(b)
|
||||||
b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR)
|
b.stateAggregator = weightedaggregator.New(cc, b.logger, NewRandomWRR)
|
||||||
b.stateAggregator.Start()
|
b.stateAggregator.Start()
|
||||||
b.bg = balancergroup.New(cc, bOpts, b.stateAggregator, nil, b.logger)
|
b.bg = balancergroup.New(cc, bOpts, b.stateAggregator, b.logger)
|
||||||
b.bg.Start()
|
b.bg.Start()
|
||||||
b.logger.Infof("Created")
|
b.logger.Infof("Created")
|
||||||
return b
|
return b
|
||||||
|
@ -69,11 +71,6 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err
|
||||||
type weightedTargetBalancer struct {
|
type weightedTargetBalancer struct {
|
||||||
logger *grpclog.PrefixLogger
|
logger *grpclog.PrefixLogger
|
||||||
|
|
||||||
// TODO: Make this package not dependent on any xds specific code.
|
|
||||||
// BalancerGroup uses xdsinternal.LocalityID as the key in the map of child
|
|
||||||
// policies that it maintains and reports load using LRS. Once these two
|
|
||||||
// dependencies are removed from the balancerGroup, this package will not
|
|
||||||
// have any dependencies on xds code.
|
|
||||||
bg *balancergroup.BalancerGroup
|
bg *balancergroup.BalancerGroup
|
||||||
stateAggregator *weightedaggregator.Aggregator
|
stateAggregator *weightedaggregator.Aggregator
|
||||||
|
|
29
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
29
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
|
@ -83,13 +83,13 @@ var (
|
||||||
// errTransportCredsAndBundle indicates that creds bundle is used together
|
// errTransportCredsAndBundle indicates that creds bundle is used together
|
||||||
// with other individual Transport Credentials.
|
// with other individual Transport Credentials.
|
||||||
errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
|
errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
|
||||||
// errTransportCredentialsMissing indicates that users want to transmit security
|
// errNoTransportCredsInBundle indicated that the configured creds bundle
|
||||||
// information (e.g., OAuth2 token) which requires secure connection on an insecure
|
// returned a transport credentials which was nil.
|
||||||
// connection.
|
errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials")
|
||||||
|
// errTransportCredentialsMissing indicates that users want to transmit
|
||||||
|
// security information (e.g., OAuth2 token) which requires secure
|
||||||
|
// connection on an insecure connection.
|
||||||
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
|
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
|
||||||
// errCredentialsConflict indicates that grpc.WithTransportCredentials()
|
|
||||||
// and grpc.WithInsecure() are both called for a connection.
|
|
||||||
errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -177,17 +177,20 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
cc.csMgr.channelzID = cc.channelzID
|
cc.csMgr.channelzID = cc.channelzID
|
||||||
}
|
}
|
||||||
|
|
||||||
if !cc.dopts.insecure {
|
|
||||||
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
|
||||||
return nil, errNoTransportSecurity
|
return nil, errNoTransportSecurity
|
||||||
}
|
}
|
||||||
if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
|
if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
|
||||||
return nil, errTransportCredsAndBundle
|
return nil, errTransportCredsAndBundle
|
||||||
}
|
}
|
||||||
} else {
|
if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
|
||||||
if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil {
|
return nil, errNoTransportCredsInBundle
|
||||||
return nil, errCredentialsConflict
|
|
||||||
}
|
}
|
||||||
|
transportCreds := cc.dopts.copts.TransportCredentials
|
||||||
|
if transportCreds == nil {
|
||||||
|
transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
|
||||||
|
}
|
||||||
|
if transportCreds.Info().SecurityProtocol == "insecure" {
|
||||||
for _, cd := range cc.dopts.copts.PerRPCCredentials {
|
for _, cd := range cc.dopts.copts.PerRPCCredentials {
|
||||||
if cd.RequireTransportSecurity() {
|
if cd.RequireTransportSecurity() {
|
||||||
return nil, errTransportCredentialsMissing
|
return nil, errTransportCredentialsMissing
|
||||||
|
@ -282,6 +285,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
DialCreds: credsClone,
|
DialCreds: credsClone,
|
||||||
CredsBundle: cc.dopts.copts.CredsBundle,
|
CredsBundle: cc.dopts.copts.CredsBundle,
|
||||||
Dialer: cc.dopts.copts.Dialer,
|
Dialer: cc.dopts.copts.Dialer,
|
||||||
|
Authority: cc.authority,
|
||||||
CustomUserAgent: cc.dopts.copts.UserAgent,
|
CustomUserAgent: cc.dopts.copts.UserAgent,
|
||||||
ChannelzParentID: cc.channelzID,
|
ChannelzParentID: cc.channelzID,
|
||||||
Target: cc.parsedTarget,
|
Target: cc.parsedTarget,
|
||||||
|
@ -629,7 +633,10 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var ret error
|
var ret error
|
||||||
if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
|
if cc.dopts.disableServiceConfig {
|
||||||
|
channelz.Infof(logger, cc.channelzID, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
|
||||||
|
cc.maybeApplyDefaultServiceConfig(s.Addresses)
|
||||||
|
} else if s.ServiceConfig == nil {
|
||||||
cc.maybeApplyDefaultServiceConfig(s.Addresses)
|
cc.maybeApplyDefaultServiceConfig(s.Addresses)
|
||||||
// TODO: do we need to apply a failing LB policy if there is no
|
// TODO: do we need to apply a failing LB policy if there is no
|
||||||
// default, per the error handling design?
|
// default, per the error handling design?
|
||||||
|
|
10
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
10
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
|
@ -178,8 +178,18 @@ type TransportCredentials interface {
|
||||||
//
|
//
|
||||||
// This API is experimental.
|
// This API is experimental.
|
||||||
type Bundle interface {
|
type Bundle interface {
|
||||||
|
// TransportCredentials returns the transport credentials from the Bundle.
|
||||||
|
//
|
||||||
|
// Implementations must return non-nil transport credentials. If transport
|
||||||
|
// security is not needed by the Bundle, implementations may choose to
|
||||||
|
// return insecure.NewCredentials().
|
||||||
TransportCredentials() TransportCredentials
|
TransportCredentials() TransportCredentials
|
||||||
|
|
||||||
|
// PerRPCCredentials returns the per-RPC credentials from the Bundle.
|
||||||
|
//
|
||||||
|
// May be nil if per-RPC credentials are not needed.
|
||||||
PerRPCCredentials() PerRPCCredentials
|
PerRPCCredentials() PerRPCCredentials
|
||||||
|
|
||||||
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
|
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
|
||||||
// existing Bundle may cause races.
|
// existing Bundle may cause races.
|
||||||
//
|
//
|
||||||
|
|
3
vendor/google.golang.org/grpc/credentials/insecure/insecure.go
generated
vendored
3
vendor/google.golang.org/grpc/credentials/insecure/insecure.go
generated
vendored
|
@ -33,6 +33,9 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewCredentials returns a credentials which disables transport security.
|
// NewCredentials returns a credentials which disables transport security.
|
||||||
|
//
|
||||||
|
// Note that using this credentials with per-RPC credentials which require
|
||||||
|
// transport security is incompatible and will cause grpc.Dial() to fail.
|
||||||
func NewCredentials() credentials.TransportCredentials {
|
func NewCredentials() credentials.TransportCredentials {
|
||||||
return insecureTC{}
|
return insecureTC{}
|
||||||
}
|
}
|
||||||
|
|
24
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
24
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
|
@ -27,9 +27,9 @@ import (
|
||||||
"google.golang.org/grpc/backoff"
|
"google.golang.org/grpc/backoff"
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||||
"google.golang.org/grpc/internal/envconfig"
|
|
||||||
"google.golang.org/grpc/internal/transport"
|
"google.golang.org/grpc/internal/transport"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
|
@ -50,7 +50,6 @@ type dialOptions struct {
|
||||||
bs internalbackoff.Strategy
|
bs internalbackoff.Strategy
|
||||||
block bool
|
block bool
|
||||||
returnLastError bool
|
returnLastError bool
|
||||||
insecure bool
|
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
scChan <-chan ServiceConfig
|
scChan <-chan ServiceConfig
|
||||||
authority string
|
authority string
|
||||||
|
@ -228,18 +227,14 @@ func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithConnectParams configures the dialer to use the provided ConnectParams.
|
// WithConnectParams configures the ClientConn to use the provided ConnectParams
|
||||||
|
// for creating and maintaining connections to servers.
|
||||||
//
|
//
|
||||||
// The backoff configuration specified as part of the ConnectParams overrides
|
// The backoff configuration specified as part of the ConnectParams overrides
|
||||||
// all defaults specified in
|
// all defaults specified in
|
||||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
|
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
|
||||||
// using the backoff.DefaultConfig as a base, in cases where you want to
|
// using the backoff.DefaultConfig as a base, in cases where you want to
|
||||||
// override only a subset of the backoff configuration.
|
// override only a subset of the backoff configuration.
|
||||||
//
|
|
||||||
// Experimental
|
|
||||||
//
|
|
||||||
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
|
|
||||||
// later release.
|
|
||||||
func WithConnectParams(p ConnectParams) DialOption {
|
func WithConnectParams(p ConnectParams) DialOption {
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
o.bs = internalbackoff.Exponential{Config: p.Backoff}
|
o.bs = internalbackoff.Exponential{Config: p.Backoff}
|
||||||
|
@ -303,11 +298,17 @@ func WithReturnConnectionError() DialOption {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithInsecure returns a DialOption which disables transport security for this
|
// WithInsecure returns a DialOption which disables transport security for this
|
||||||
// ClientConn. Note that transport security is required unless WithInsecure is
|
// ClientConn. Under the hood, it uses insecure.NewCredentials().
|
||||||
// set.
|
//
|
||||||
|
// Note that using this DialOption with per-RPC credentials (through
|
||||||
|
// WithCredentialsBundle or WithPerRPCCredentials) which require transport
|
||||||
|
// security is incompatible and will cause grpc.Dial() to fail.
|
||||||
|
//
|
||||||
|
// Deprecated: use insecure.NewCredentials() instead.
|
||||||
|
// Will be supported throughout 1.x.
|
||||||
func WithInsecure() DialOption {
|
func WithInsecure() DialOption {
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
o.insecure = true
|
o.copts.TransportCredentials = insecure.NewCredentials()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -580,7 +581,6 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption {
|
||||||
|
|
||||||
func defaultDialOptions() dialOptions {
|
func defaultDialOptions() dialOptions {
|
||||||
return dialOptions{
|
return dialOptions{
|
||||||
disableRetry: !envconfig.Retry,
|
|
||||||
healthCheckFunc: internal.HealthCheckFunc,
|
healthCheckFunc: internal.HealthCheckFunc,
|
||||||
copts: transport.ConnectOptions{
|
copts: transport.ConnectOptions{
|
||||||
WriteBufferSize: defaultWriteBufSize,
|
WriteBufferSize: defaultWriteBufSize,
|
||||||
|
|
|
@ -23,9 +23,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
orcapb "github.com/cncf/xds/go/xds/data/orca/v3"
|
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient/load"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/internal/cache"
|
"google.golang.org/grpc/internal/cache"
|
||||||
|
@ -178,7 +175,6 @@ func (sbc *subBalancerWrapper) stopBalancer() {
|
||||||
//
|
//
|
||||||
// Updates from ClientConn are forwarded to sub-balancers
|
// Updates from ClientConn are forwarded to sub-balancers
|
||||||
// - service config update
|
// - service config update
|
||||||
// - Not implemented
|
|
||||||
// - address update
|
// - address update
|
||||||
// - subConn state change
|
// - subConn state change
|
||||||
// - find the corresponding balancer and forward
|
// - find the corresponding balancer and forward
|
||||||
|
@ -199,7 +195,6 @@ type BalancerGroup struct {
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
buildOpts balancer.BuildOptions
|
buildOpts balancer.BuildOptions
|
||||||
logger *grpclog.PrefixLogger
|
logger *grpclog.PrefixLogger
|
||||||
loadStore load.PerClusterReporter // TODO: delete this, no longer needed. It was used by EDS.
|
|
||||||
|
|
||||||
// stateAggregator is where the state/picker updates will be sent to. It's
|
// stateAggregator is where the state/picker updates will be sent to. It's
|
||||||
// provided by the parent balancer, to build a picker with all the
|
// provided by the parent balancer, to build a picker with all the
|
||||||
|
@ -254,15 +249,11 @@ var DefaultSubBalancerCloseTimeout = 15 * time.Minute
|
||||||
|
|
||||||
// New creates a new BalancerGroup. Note that the BalancerGroup
|
// New creates a new BalancerGroup. Note that the BalancerGroup
|
||||||
// needs to be started to work.
|
// needs to be started to work.
|
||||||
//
|
func New(cc balancer.ClientConn, bOpts balancer.BuildOptions, stateAggregator BalancerStateAggregator, logger *grpclog.PrefixLogger) *BalancerGroup {
|
||||||
// TODO(easwars): Pass an options struct instead of N args.
|
|
||||||
func New(cc balancer.ClientConn, bOpts balancer.BuildOptions, stateAggregator BalancerStateAggregator, loadStore load.PerClusterReporter, logger *grpclog.PrefixLogger) *BalancerGroup {
|
|
||||||
return &BalancerGroup{
|
return &BalancerGroup{
|
||||||
cc: cc,
|
cc: cc,
|
||||||
buildOpts: bOpts,
|
buildOpts: bOpts,
|
||||||
logger: logger,
|
logger: logger,
|
||||||
loadStore: loadStore,
|
|
||||||
|
|
||||||
stateAggregator: stateAggregator,
|
stateAggregator: stateAggregator,
|
||||||
|
|
||||||
idToBalancerConfig: make(map[string]*subBalancerWrapper),
|
idToBalancerConfig: make(map[string]*subBalancerWrapper),
|
||||||
|
@ -467,10 +458,6 @@ func (bg *BalancerGroup) newSubConn(config *subBalancerWrapper, addrs []resolver
|
||||||
// state, then forward to ClientConn.
|
// state, then forward to ClientConn.
|
||||||
func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) {
|
func (bg *BalancerGroup) updateBalancerState(id string, state balancer.State) {
|
||||||
bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state)
|
bg.logger.Infof("Balancer state update from locality %v, new state: %+v", id, state)
|
||||||
if bg.loadStore != nil {
|
|
||||||
// Only wrap the picker to do load reporting if loadStore was set.
|
|
||||||
state.Picker = newLoadReportPicker(state.Picker, id, bg.loadStore)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send new state to the aggregator, without holding the incomingMu.
|
// Send new state to the aggregator, without holding the incomingMu.
|
||||||
// incomingMu is to protect all calls to the parent ClientConn, this update
|
// incomingMu is to protect all calls to the parent ClientConn, this update
|
||||||
|
@ -520,52 +507,12 @@ func (bg *BalancerGroup) ExitIdle() {
|
||||||
bg.outgoingMu.Unlock()
|
bg.outgoingMu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
// ExitIdleOne instructs the sub-balancer `id` to exit IDLE state, if
|
||||||
serverLoadCPUName = "cpu_utilization"
|
// appropriate and possible.
|
||||||
serverLoadMemoryName = "mem_utilization"
|
func (bg *BalancerGroup) ExitIdleOne(id string) {
|
||||||
)
|
bg.outgoingMu.Lock()
|
||||||
|
if config := bg.idToBalancerConfig[id]; config != nil {
|
||||||
type loadReportPicker struct {
|
config.exitIdle()
|
||||||
p balancer.Picker
|
|
||||||
|
|
||||||
locality string
|
|
||||||
loadStore load.PerClusterReporter
|
|
||||||
}
|
}
|
||||||
|
bg.outgoingMu.Unlock()
|
||||||
func newLoadReportPicker(p balancer.Picker, id string, loadStore load.PerClusterReporter) *loadReportPicker {
|
|
||||||
return &loadReportPicker{
|
|
||||||
p: p,
|
|
||||||
locality: id,
|
|
||||||
loadStore: loadStore,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lrp *loadReportPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
|
||||||
res, err := lrp.p.Pick(info)
|
|
||||||
if err != nil {
|
|
||||||
return res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
lrp.loadStore.CallStarted(lrp.locality)
|
|
||||||
oldDone := res.Done
|
|
||||||
res.Done = func(info balancer.DoneInfo) {
|
|
||||||
if oldDone != nil {
|
|
||||||
oldDone(info)
|
|
||||||
}
|
|
||||||
lrp.loadStore.CallFinished(lrp.locality, info.Err)
|
|
||||||
|
|
||||||
load, ok := info.ServerLoad.(*orcapb.OrcaLoadReport)
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
lrp.loadStore.CallServerLoad(lrp.locality, serverLoadCPUName, load.CpuUtilization)
|
|
||||||
lrp.loadStore.CallServerLoad(lrp.locality, serverLoadMemoryName, load.MemUtilization)
|
|
||||||
for n, d := range load.RequestCost {
|
|
||||||
lrp.loadStore.CallServerLoad(lrp.locality, n, d)
|
|
||||||
}
|
|
||||||
for n, d := range load.Utilization {
|
|
||||||
lrp.loadStore.CallServerLoad(lrp.locality, n, d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res, err
|
|
||||||
}
|
}
|
20
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
20
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
|
@ -204,9 +204,9 @@ func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
||||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||||
}
|
}
|
||||||
if pid == 0 {
|
if pid == 0 {
|
||||||
db.get().addChannel(id, cn, true, pid, ref)
|
db.get().addChannel(id, cn, true, pid)
|
||||||
} else {
|
} else {
|
||||||
db.get().addChannel(id, cn, false, pid, ref)
|
db.get().addChannel(id, cn, false, pid)
|
||||||
}
|
}
|
||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
|
@ -228,7 +228,7 @@ func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||||
pid: pid,
|
pid: pid,
|
||||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||||
}
|
}
|
||||||
db.get().addSubChannel(id, sc, pid, ref)
|
db.get().addSubChannel(id, sc, pid)
|
||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,7 +258,7 @@ func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
||||||
db.get().addListenSocket(id, ls, pid, ref)
|
db.get().addListenSocket(id, ls, pid)
|
||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,11 +273,11 @@ func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
||||||
}
|
}
|
||||||
id := idGen.genID()
|
id := idGen.genID()
|
||||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
||||||
db.get().addNormalSocket(id, ns, pid, ref)
|
db.get().addNormalSocket(id, ns, pid)
|
||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemoveEntry removes an entry with unique channelz trakcing id to be id from
|
// RemoveEntry removes an entry with unique channelz tracking id to be id from
|
||||||
// channelz database.
|
// channelz database.
|
||||||
func RemoveEntry(id int64) {
|
func RemoveEntry(id int64) {
|
||||||
db.get().removeEntry(id)
|
db.get().removeEntry(id)
|
||||||
|
@ -333,7 +333,7 @@ func (c *channelMap) addServer(id int64, s *server) {
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
cn.cm = c
|
cn.cm = c
|
||||||
cn.trace.cm = c
|
cn.trace.cm = c
|
||||||
|
@ -346,7 +346,7 @@ func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid in
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
sc.cm = c
|
sc.cm = c
|
||||||
sc.trace.cm = c
|
sc.trace.cm = c
|
||||||
|
@ -355,7 +355,7 @@ func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref stri
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
|
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
ls.cm = c
|
ls.cm = c
|
||||||
c.listenSockets[id] = ls
|
c.listenSockets[id] = ls
|
||||||
|
@ -363,7 +363,7 @@ func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref
|
||||||
c.mu.Unlock()
|
c.mu.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
|
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64) {
|
||||||
c.mu.Lock()
|
c.mu.Lock()
|
||||||
ns.cm = c
|
ns.cm = c
|
||||||
c.normalSockets[id] = ns
|
c.normalSockets[id] = ns
|
||||||
|
|
6
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
6
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
|
@ -22,20 +22,14 @@ package envconfig
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
xdsenv "google.golang.org/grpc/internal/xds/env"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
prefix = "GRPC_GO_"
|
prefix = "GRPC_GO_"
|
||||||
retryStr = prefix + "RETRY"
|
|
||||||
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// Retry is enabled unless explicitly disabled via "GRPC_GO_RETRY=off" or
|
|
||||||
// if XDS retry support is explicitly disabled.
|
|
||||||
Retry = !strings.EqualFold(os.Getenv(retryStr), "off") && xdsenv.RetrySupport
|
|
||||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
||||||
)
|
)
|
||||||
|
|
|
@ -16,9 +16,7 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Package env acts a single source of definition for all environment variables
|
package envconfig
|
||||||
// related to the xDS implementation in gRPC.
|
|
||||||
package env
|
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
@ -26,72 +24,67 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// BootstrapFileNameEnv is the env variable to set bootstrap file name.
|
// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
|
||||||
// Do not use this and read from env directly. Its value is read and kept in
|
// Do not use this and read from env directly. Its value is read and kept in
|
||||||
// variable BootstrapFileName.
|
// variable BootstrapFileName.
|
||||||
//
|
//
|
||||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||||
BootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
|
XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
|
||||||
// BootstrapFileContentEnv is the env variable to set bootstrapp file
|
// XDSBootstrapFileContentEnv is the env variable to set bootstrapp file
|
||||||
// content. Do not use this and read from env directly. Its value is read
|
// content. Do not use this and read from env directly. Its value is read
|
||||||
// and kept in variable BootstrapFileName.
|
// and kept in variable BootstrapFileName.
|
||||||
//
|
//
|
||||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||||
BootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
||||||
|
|
||||||
ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
|
ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH"
|
||||||
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
|
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
|
||||||
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||||
retrySupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RETRY"
|
|
||||||
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
||||||
|
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
||||||
|
|
||||||
c2pResolverSupportEnv = "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER"
|
|
||||||
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// BootstrapFileName holds the name of the file which contains xDS bootstrap
|
// XDSBootstrapFileName holds the name of the file which contains xDS
|
||||||
// configuration. Users can specify the location of the bootstrap file by
|
// bootstrap configuration. Users can specify the location of the bootstrap
|
||||||
// setting the environment variable "GRPC_XDS_BOOTSTRAP".
|
// file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
|
||||||
//
|
//
|
||||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||||
BootstrapFileName = os.Getenv(BootstrapFileNameEnv)
|
XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
|
||||||
// BootstrapFileContent holds the content of the xDS bootstrap
|
// XDSBootstrapFileContent holds the content of the xDS bootstrap
|
||||||
// configuration. Users can specify the bootstrap config by
|
// configuration. Users can specify the bootstrap config by setting the
|
||||||
// setting the environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
|
// environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
|
||||||
//
|
//
|
||||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||||
BootstrapFileContent = os.Getenv(BootstrapFileContentEnv)
|
XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
|
||||||
// RingHashSupport indicates whether ring hash support is enabled, which can
|
// XDSRingHash indicates whether ring hash support is enabled, which can be
|
||||||
// be disabled by setting the environment variable
|
// disabled by setting the environment variable
|
||||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
// "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false".
|
||||||
RingHashSupport = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
|
XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false")
|
||||||
// ClientSideSecuritySupport is used to control processing of security
|
// XDSClientSideSecurity is used to control processing of security
|
||||||
// configuration on the client-side.
|
// configuration on the client-side.
|
||||||
//
|
//
|
||||||
// Note that there is no env var protection for the server-side because we
|
// Note that there is no env var protection for the server-side because we
|
||||||
// have a brand new API on the server-side and users explicitly need to use
|
// have a brand new API on the server-side and users explicitly need to use
|
||||||
// the new API to get security integration on the server.
|
// the new API to get security integration on the server.
|
||||||
ClientSideSecuritySupport = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
|
XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false")
|
||||||
// AggregateAndDNSSupportEnv indicates whether processing of aggregated
|
// XDSAggregateAndDNS indicates whether processing of aggregated cluster
|
||||||
// cluster and DNS cluster is enabled, which can be enabled by setting the
|
// and DNS cluster is enabled, which can be enabled by setting the
|
||||||
// environment variable
|
// environment variable
|
||||||
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
// "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to
|
||||||
// "true".
|
// "true".
|
||||||
AggregateAndDNSSupportEnv = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true")
|
XDSAggregateAndDNS = strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "true")
|
||||||
|
|
||||||
// RetrySupport indicates whether xDS retry is enabled.
|
// XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled,
|
||||||
RetrySupport = !strings.EqualFold(os.Getenv(retrySupportEnv), "false")
|
|
||||||
|
|
||||||
// RBACSupport indicates whether xDS configured RBAC HTTP Filter is enabled,
|
|
||||||
// which can be disabled by setting the environment variable
|
// which can be disabled by setting the environment variable
|
||||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||||
RBACSupport = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
|
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
|
||||||
|
|
||||||
|
// XDSFederation indicates whether federation support is enabled.
|
||||||
|
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
||||||
|
|
||||||
// C2PResolverSupport indicates whether support for C2P resolver is enabled.
|
|
||||||
// This can be enabled by setting the environment variable
|
|
||||||
// "GRPC_EXPERIMENTAL_GOOGLE_C2P_RESOLVER" to "true".
|
|
||||||
C2PResolverSupport = strings.EqualFold(os.Getenv(c2pResolverSupportEnv), "true")
|
|
||||||
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
|
||||||
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv)
|
||||||
)
|
)
|
4
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
generated
vendored
|
@ -136,12 +136,10 @@ type inFlow struct {
|
||||||
|
|
||||||
// newLimit updates the inflow window to a new value n.
|
// newLimit updates the inflow window to a new value n.
|
||||||
// It assumes that n is always greater than the old limit.
|
// It assumes that n is always greater than the old limit.
|
||||||
func (f *inFlow) newLimit(n uint32) uint32 {
|
func (f *inFlow) newLimit(n uint32) {
|
||||||
f.mu.Lock()
|
f.mu.Lock()
|
||||||
d := n - f.limit
|
|
||||||
f.limit = n
|
f.limit = n
|
||||||
f.mu.Unlock()
|
f.mu.Unlock()
|
||||||
return d
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
func (f *inFlow) maybeAdjust(n uint32) uint32 {
|
||||||
|
|
15
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
15
vendor/google.golang.org/grpc/internal/transport/http2_client.go
generated
vendored
|
@ -201,6 +201,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
// gRPC, resolver, balancer etc. can specify arbitrary data in the
|
||||||
|
// Attributes field of resolver.Address, which is shoved into connectCtx
|
||||||
|
// and passed to the dialer and credential handshaker. This makes it possible for
|
||||||
|
// address specific arbitrary data to reach custom dialers and credential handshakers.
|
||||||
|
connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
|
||||||
|
|
||||||
conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
|
conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if opts.FailOnNonTempDialError {
|
if opts.FailOnNonTempDialError {
|
||||||
|
@ -245,11 +251,6 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if transportCreds != nil {
|
if transportCreds != nil {
|
||||||
// gRPC, resolver, balancer etc. can specify arbitrary data in the
|
|
||||||
// Attributes field of resolver.Address, which is shoved into connectCtx
|
|
||||||
// and passed to the credential handshaker. This makes it possible for
|
|
||||||
// address specific arbitrary data to reach the credential handshaker.
|
|
||||||
connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
|
|
||||||
rawConn := conn
|
rawConn := conn
|
||||||
// Pull the deadline from the connectCtx, which will be used for
|
// Pull the deadline from the connectCtx, which will be used for
|
||||||
// timeouts in the authentication protocol handshake. Can ignore the
|
// timeouts in the authentication protocol handshake. Can ignore the
|
||||||
|
@ -587,7 +588,7 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
|
return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
|
||||||
}
|
}
|
||||||
for k, v := range data {
|
for k, v := range data {
|
||||||
// Capital header names are illegal in HTTP/2.
|
// Capital header names are illegal in HTTP/2.
|
||||||
|
@ -1556,7 +1557,7 @@ func minTime(a, b time.Duration) time.Duration {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
|
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
|
||||||
func (t *http2Client) keepalive() {
|
func (t *http2Client) keepalive() {
|
||||||
p := &ping{data: [8]byte{}}
|
p := &ping{data: [8]byte{}}
|
||||||
// True iff a ping has been sent, and no data has been received since then.
|
// True iff a ping has been sent, and no data has been received since then.
|
||||||
|
|
36
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
36
vendor/google.golang.org/grpc/internal/transport/http2_server.go
generated
vendored
|
@ -73,7 +73,6 @@ type http2Server struct {
|
||||||
writerDone chan struct{} // sync point to enable testing.
|
writerDone chan struct{} // sync point to enable testing.
|
||||||
remoteAddr net.Addr
|
remoteAddr net.Addr
|
||||||
localAddr net.Addr
|
localAddr net.Addr
|
||||||
maxStreamID uint32 // max stream ID ever seen
|
|
||||||
authInfo credentials.AuthInfo // auth info about the connection
|
authInfo credentials.AuthInfo // auth info about the connection
|
||||||
inTapHandle tap.ServerInHandle
|
inTapHandle tap.ServerInHandle
|
||||||
framer *framer
|
framer *framer
|
||||||
|
@ -123,6 +122,11 @@ type http2Server struct {
|
||||||
bufferPool *bufferPool
|
bufferPool *bufferPool
|
||||||
|
|
||||||
connectionID uint64
|
connectionID uint64
|
||||||
|
|
||||||
|
// maxStreamMu guards the maximum stream ID
|
||||||
|
// This lock may not be taken if mu is already held.
|
||||||
|
maxStreamMu sync.Mutex
|
||||||
|
maxStreamID uint32 // max stream ID ever seen
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServerTransport creates a http2 transport with conn and configuration
|
// NewServerTransport creates a http2 transport with conn and configuration
|
||||||
|
@ -334,6 +338,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
|
||||||
|
|
||||||
// operateHeader takes action on the decoded headers.
|
// operateHeader takes action on the decoded headers.
|
||||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
|
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
|
||||||
|
// Acquire max stream ID lock for entire duration
|
||||||
|
t.maxStreamMu.Lock()
|
||||||
|
defer t.maxStreamMu.Unlock()
|
||||||
|
|
||||||
streamID := frame.Header().StreamID
|
streamID := frame.Header().StreamID
|
||||||
|
|
||||||
// frame.Truncated is set to true when framer detects that the current header
|
// frame.Truncated is set to true when framer detects that the current header
|
||||||
|
@ -348,6 +356,15 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
||||||
|
// illegal gRPC stream id.
|
||||||
|
if logger.V(logLevel) {
|
||||||
|
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
t.maxStreamID = streamID
|
||||||
|
|
||||||
buf := newRecvBuffer()
|
buf := newRecvBuffer()
|
||||||
s := &Stream{
|
s := &Stream{
|
||||||
id: streamID,
|
id: streamID,
|
||||||
|
@ -355,7 +372,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
buf: buf,
|
buf: buf,
|
||||||
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
fc: &inFlow{limit: uint32(t.initialWindowSize)},
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// If a gRPC Response-Headers has already been received, then it means
|
// If a gRPC Response-Headers has already been received, then it means
|
||||||
// that the peer is speaking gRPC and we are in gRPC mode.
|
// that the peer is speaking gRPC and we are in gRPC mode.
|
||||||
|
@ -498,16 +514,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(
|
||||||
s.cancel()
|
s.cancel()
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if streamID%2 != 1 || streamID <= t.maxStreamID {
|
|
||||||
t.mu.Unlock()
|
|
||||||
// illegal gRPC stream id.
|
|
||||||
if logger.V(logLevel) {
|
|
||||||
logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
|
|
||||||
}
|
|
||||||
s.cancel()
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
t.maxStreamID = streamID
|
|
||||||
if httpMethod != http.MethodPost {
|
if httpMethod != http.MethodPost {
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
if logger.V(logLevel) {
|
if logger.V(logLevel) {
|
||||||
|
@ -1293,20 +1299,23 @@ var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
|
||||||
// Handles outgoing GoAway and returns true if loopy needs to put itself
|
// Handles outgoing GoAway and returns true if loopy needs to put itself
|
||||||
// in draining mode.
|
// in draining mode.
|
||||||
func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||||
|
t.maxStreamMu.Lock()
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
if t.state == closing { // TODO(mmukhi): This seems unnecessary.
|
if t.state == closing { // TODO(mmukhi): This seems unnecessary.
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
|
t.maxStreamMu.Unlock()
|
||||||
// The transport is closing.
|
// The transport is closing.
|
||||||
return false, ErrConnClosing
|
return false, ErrConnClosing
|
||||||
}
|
}
|
||||||
sid := t.maxStreamID
|
|
||||||
if !g.headsUp {
|
if !g.headsUp {
|
||||||
// Stop accepting more streams now.
|
// Stop accepting more streams now.
|
||||||
t.state = draining
|
t.state = draining
|
||||||
|
sid := t.maxStreamID
|
||||||
if len(t.activeStreams) == 0 {
|
if len(t.activeStreams) == 0 {
|
||||||
g.closeConn = true
|
g.closeConn = true
|
||||||
}
|
}
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
|
t.maxStreamMu.Unlock()
|
||||||
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
|
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
@ -1319,6 +1328,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
t.mu.Unlock()
|
t.mu.Unlock()
|
||||||
|
t.maxStreamMu.Unlock()
|
||||||
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
|
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
|
||||||
// Follow that with a ping and wait for the ack to come back or a timer
|
// Follow that with a ping and wait for the ack to come back or a timer
|
||||||
// to expire. During this time accept new streams since they might have
|
// to expire. During this time accept new streams since they might have
|
||||||
|
|
4
vendor/google.golang.org/grpc/internal/transport/proxy.go
generated
vendored
4
vendor/google.golang.org/grpc/internal/transport/proxy.go
generated
vendored
|
@ -37,7 +37,7 @@ var (
|
||||||
httpProxyFromEnvironment = http.ProxyFromEnvironment
|
httpProxyFromEnvironment = http.ProxyFromEnvironment
|
||||||
)
|
)
|
||||||
|
|
||||||
func mapAddress(ctx context.Context, address string) (*url.URL, error) {
|
func mapAddress(address string) (*url.URL, error) {
|
||||||
req := &http.Request{
|
req := &http.Request{
|
||||||
URL: &url.URL{
|
URL: &url.URL{
|
||||||
Scheme: "https",
|
Scheme: "https",
|
||||||
|
@ -114,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
|
||||||
// connection.
|
// connection.
|
||||||
func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
|
func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) {
|
||||||
newAddr := addr
|
newAddr := addr
|
||||||
proxyURL, err := mapAddress(ctx, addr)
|
proxyURL, err := mapAddress(addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
6
vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go
generated
vendored
6
vendor/google.golang.org/grpc/internal/xds/rbac/rbac_engine.go
generated
vendored
|
@ -108,13 +108,13 @@ type engine struct {
|
||||||
// newEngine creates an RBAC Engine based on the contents of policy. Returns a
|
// newEngine creates an RBAC Engine based on the contents of policy. Returns a
|
||||||
// non-nil error if the policy is invalid.
|
// non-nil error if the policy is invalid.
|
||||||
func newEngine(config *v3rbacpb.RBAC) (*engine, error) {
|
func newEngine(config *v3rbacpb.RBAC) (*engine, error) {
|
||||||
a := *config.Action.Enum()
|
a := config.GetAction()
|
||||||
if a != v3rbacpb.RBAC_ALLOW && a != v3rbacpb.RBAC_DENY {
|
if a != v3rbacpb.RBAC_ALLOW && a != v3rbacpb.RBAC_DENY {
|
||||||
return nil, fmt.Errorf("unsupported action %s", config.Action)
|
return nil, fmt.Errorf("unsupported action %s", config.Action)
|
||||||
}
|
}
|
||||||
|
|
||||||
policies := make(map[string]*policyMatcher, len(config.Policies))
|
policies := make(map[string]*policyMatcher, len(config.GetPolicies()))
|
||||||
for name, policy := range config.Policies {
|
for name, policy := range config.GetPolicies() {
|
||||||
matcher, err := newPolicyMatcher(policy)
|
matcher, err := newPolicyMatcher(policy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
2
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
2
vendor/google.golang.org/grpc/pickfirst.go
generated
vendored
|
@ -125,7 +125,7 @@ func (b *pickfirstBalancer) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *pickfirstBalancer) ExitIdle() {
|
func (b *pickfirstBalancer) ExitIdle() {
|
||||||
if b.state == connectivity.Idle {
|
if b.sc != nil && b.state == connectivity.Idle {
|
||||||
b.sc.Connect()
|
b.sc.Connect()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
4
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
4
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
|
@ -102,8 +102,8 @@ done
|
||||||
|
|
||||||
# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
|
# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
|
||||||
# current location. Move it into the right place.
|
# current location. Move it into the right place.
|
||||||
mkdir -p ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1
|
mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
|
||||||
mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/balancer/rls/internal/proto/grpc_lookup_v1
|
mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
|
||||||
|
|
||||||
# grpc_testingv3/testv3.pb.go is not re-generated because it was
|
# grpc_testingv3/testv3.pb.go is not re-generated because it was
|
||||||
# intentionally generated by an older version of protoc-gen-go.
|
# intentionally generated by an older version of protoc-gen-go.
|
||||||
|
|
6
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
6
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
|
@ -712,13 +712,11 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
size = len(d)
|
|
||||||
}
|
|
||||||
if size > maxReceiveMessageSize {
|
if size > maxReceiveMessageSize {
|
||||||
// TODO: Revisit the error code. Currently keep it consistent with java
|
// TODO: Revisit the error code. Currently keep it consistent with java
|
||||||
// implementation.
|
// implementation.
|
||||||
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
19
vendor/google.golang.org/grpc/status/status.go
generated
vendored
19
vendor/google.golang.org/grpc/status/status.go
generated
vendored
|
@ -29,6 +29,7 @@ package status
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
|
@ -117,18 +118,18 @@ func Code(err error) codes.Code {
|
||||||
return codes.Unknown
|
return codes.Unknown
|
||||||
}
|
}
|
||||||
|
|
||||||
// FromContextError converts a context error into a Status. It returns a
|
// FromContextError converts a context error or wrapped context error into a
|
||||||
// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
|
// Status. It returns a Status with codes.OK if err is nil, or a Status with
|
||||||
// non-nil and not a context error.
|
// codes.Unknown if err is non-nil and not a context error.
|
||||||
func FromContextError(err error) *Status {
|
func FromContextError(err error) *Status {
|
||||||
switch err {
|
if err == nil {
|
||||||
case nil:
|
|
||||||
return nil
|
return nil
|
||||||
case context.DeadlineExceeded:
|
}
|
||||||
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
return New(codes.DeadlineExceeded, err.Error())
|
return New(codes.DeadlineExceeded, err.Error())
|
||||||
case context.Canceled:
|
}
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
return New(codes.Canceled, err.Error())
|
return New(codes.Canceled, err.Error())
|
||||||
default:
|
}
|
||||||
return New(codes.Unknown, err.Error())
|
return New(codes.Unknown, err.Error())
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
|
@ -19,4 +19,4 @@
|
||||||
package grpc
|
package grpc
|
||||||
|
|
||||||
// Version is the current grpc version.
|
// Version is the current grpc version.
|
||||||
const Version = "1.42.0"
|
const Version = "1.43.0"
|
||||||
|
|
23
vendor/google.golang.org/grpc/xds/csds/csds.go
generated
vendored
23
vendor/google.golang.org/grpc/xds/csds/csds.go
generated
vendored
|
@ -37,10 +37,11 @@ import (
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
|
||||||
_ "google.golang.org/grpc/xds/internal/xdsclient/v2" // Register v2 xds_client.
|
_ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2" // Register v2 xds_client.
|
||||||
_ "google.golang.org/grpc/xds/internal/xdsclient/v3" // Register v3 xds_client.
|
_ "google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3" // Register v3 xds_client.
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -127,7 +128,7 @@ func (s *ClientStatusDiscoveryServer) buildClientStatusRespForReq(req *v3statusp
|
||||||
ret := &v3statuspb.ClientStatusResponse{
|
ret := &v3statuspb.ClientStatusResponse{
|
||||||
Config: []*v3statuspb.ClientConfig{
|
Config: []*v3statuspb.ClientConfig{
|
||||||
{
|
{
|
||||||
Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().NodeProto),
|
Node: nodeProtoToV3(s.xdsClient.BootstrapConfig().XDSServer.NodeProto),
|
||||||
GenericXdsConfigs: configs,
|
GenericXdsConfigs: configs,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -173,8 +174,8 @@ func nodeProtoToV3(n proto.Message) *v3corepb.Node {
|
||||||
return node
|
return node
|
||||||
}
|
}
|
||||||
|
|
||||||
func dumpToGenericXdsConfig(typeURL string, dumpF func() (string, map[string]xdsclient.UpdateWithMD)) []*v3statuspb.ClientConfig_GenericXdsConfig {
|
func dumpToGenericXdsConfig(typeURL string, dumpF func() map[string]xdsresource.UpdateWithMD) []*v3statuspb.ClientConfig_GenericXdsConfig {
|
||||||
_, dump := dumpF()
|
dump := dumpF()
|
||||||
ret := make([]*v3statuspb.ClientConfig_GenericXdsConfig, 0, len(dump))
|
ret := make([]*v3statuspb.ClientConfig_GenericXdsConfig, 0, len(dump))
|
||||||
for name, d := range dump {
|
for name, d := range dump {
|
||||||
config := &v3statuspb.ClientConfig_GenericXdsConfig{
|
config := &v3statuspb.ClientConfig_GenericXdsConfig{
|
||||||
|
@ -197,17 +198,17 @@ func dumpToGenericXdsConfig(typeURL string, dumpF func() (string, map[string]xds
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
func serviceStatusToProto(serviceStatus xdsclient.ServiceStatus) v3adminpb.ClientResourceStatus {
|
func serviceStatusToProto(serviceStatus xdsresource.ServiceStatus) v3adminpb.ClientResourceStatus {
|
||||||
switch serviceStatus {
|
switch serviceStatus {
|
||||||
case xdsclient.ServiceStatusUnknown:
|
case xdsresource.ServiceStatusUnknown:
|
||||||
return v3adminpb.ClientResourceStatus_UNKNOWN
|
return v3adminpb.ClientResourceStatus_UNKNOWN
|
||||||
case xdsclient.ServiceStatusRequested:
|
case xdsresource.ServiceStatusRequested:
|
||||||
return v3adminpb.ClientResourceStatus_REQUESTED
|
return v3adminpb.ClientResourceStatus_REQUESTED
|
||||||
case xdsclient.ServiceStatusNotExist:
|
case xdsresource.ServiceStatusNotExist:
|
||||||
return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST
|
return v3adminpb.ClientResourceStatus_DOES_NOT_EXIST
|
||||||
case xdsclient.ServiceStatusACKed:
|
case xdsresource.ServiceStatusACKed:
|
||||||
return v3adminpb.ClientResourceStatus_ACKED
|
return v3adminpb.ClientResourceStatus_ACKED
|
||||||
case xdsclient.ServiceStatusNACKed:
|
case xdsresource.ServiceStatusNACKed:
|
||||||
return v3adminpb.ClientResourceStatus_NACKED
|
return v3adminpb.ClientResourceStatus_NACKED
|
||||||
default:
|
default:
|
||||||
return v3adminpb.ClientResourceStatus_UNKNOWN
|
return v3adminpb.ClientResourceStatus_UNKNOWN
|
||||||
|
|
22
vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go
generated
vendored
22
vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go
generated
vendored
|
@ -29,26 +29,27 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/credentials/google"
|
"google.golang.org/grpc/credentials/google"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
"google.golang.org/grpc/internal/googlecloud"
|
"google.golang.org/grpc/internal/googlecloud"
|
||||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
"google.golang.org/grpc/internal/xds/env"
|
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
_ "google.golang.org/grpc/xds" // To register xds resolvers and balancers.
|
_ "google.golang.org/grpc/xds" // To register xds resolvers and balancers.
|
||||||
"google.golang.org/grpc/xds/internal/version"
|
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version"
|
||||||
"google.golang.org/protobuf/types/known/structpb"
|
"google.golang.org/protobuf/types/known/structpb"
|
||||||
|
|
||||||
|
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
c2pScheme = "google-c2p"
|
c2pScheme = "google-c2p-experimental"
|
||||||
|
|
||||||
tdURL = "directpath-pa.googleapis.com"
|
tdURL = "dns:///directpath-pa.googleapis.com"
|
||||||
httpReqTimeout = 10 * time.Second
|
httpReqTimeout = 10 * time.Second
|
||||||
zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone"
|
zoneURL = "http://metadata.google.internal/computeMetadata/v1/instance/zone"
|
||||||
ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s"
|
ipv6URL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ipv6s"
|
||||||
|
@ -74,10 +75,8 @@ var (
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if env.C2PResolverSupport {
|
|
||||||
resolver.Register(c2pResolverBuilder{})
|
resolver.Register(c2pResolverBuilder{})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
type c2pResolverBuilder struct{}
|
type c2pResolverBuilder struct{}
|
||||||
|
|
||||||
|
@ -98,15 +97,18 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts
|
||||||
go func() { zoneCh <- getZone(httpReqTimeout) }()
|
go func() { zoneCh <- getZone(httpReqTimeout) }()
|
||||||
go func() { ipv6CapableCh <- getIPv6Capable(httpReqTimeout) }()
|
go func() { ipv6CapableCh <- getIPv6Capable(httpReqTimeout) }()
|
||||||
|
|
||||||
balancerName := env.C2PResolverTestOnlyTrafficDirectorURI
|
balancerName := envconfig.C2PResolverTestOnlyTrafficDirectorURI
|
||||||
if balancerName == "" {
|
if balancerName == "" {
|
||||||
balancerName = tdURL
|
balancerName = tdURL
|
||||||
}
|
}
|
||||||
config := &bootstrap.Config{
|
config := &bootstrap.Config{
|
||||||
BalancerName: balancerName,
|
XDSServer: &bootstrap.ServerConfig{
|
||||||
|
ServerURI: balancerName,
|
||||||
Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()),
|
Creds: grpc.WithCredentialsBundle(google.NewDefaultCredentials()),
|
||||||
TransportAPI: version.TransportV3,
|
TransportAPI: version.TransportV3,
|
||||||
NodeProto: newNode(<-zoneCh, <-ipv6CapableCh),
|
NodeProto: newNode(<-zoneCh, <-ipv6CapableCh),
|
||||||
|
},
|
||||||
|
ClientDefaultListenerResourceNameTemplate: "%s",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create singleton xds client with this config. The xds client will be
|
// Create singleton xds client with this config. The xds client will be
|
||||||
|
@ -174,5 +176,5 @@ func newNode(zone string, ipv6Capable bool) *v3corepb.Node {
|
||||||
// direct path is enabled if this client is running on GCE, and the normal xDS
|
// direct path is enabled if this client is running on GCE, and the normal xDS
|
||||||
// is not used (bootstrap env vars are not set).
|
// is not used (bootstrap env vars are not set).
|
||||||
func runDirectPath() bool {
|
func runDirectPath() bool {
|
||||||
return env.BootstrapFileName == "" && env.BootstrapFileContent == "" && onGCE()
|
return envconfig.XDSBootstrapFileName == "" && envconfig.XDSBootstrapFileContent == "" && onGCE()
|
||||||
}
|
}
|
||||||
|
|
2
vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go
generated
vendored
2
vendor/google.golang.org/grpc/xds/internal/balancer/balancer.go
generated
vendored
|
@ -20,10 +20,10 @@
|
||||||
package balancer
|
package balancer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
_ "google.golang.org/grpc/balancer/weightedtarget" // Register the weighted_target balancer
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer
|
_ "google.golang.org/grpc/xds/internal/balancer/cdsbalancer" // Register the CDS balancer
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer
|
_ "google.golang.org/grpc/xds/internal/balancer/clusterimpl" // Register the xds_cluster_impl balancer
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer
|
_ "google.golang.org/grpc/xds/internal/balancer/clustermanager" // Register the xds_cluster_manager balancer
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer
|
_ "google.golang.org/grpc/xds/internal/balancer/clusterresolver" // Register the xds_cluster_resolver balancer
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer
|
_ "google.golang.org/grpc/xds/internal/balancer/priority" // Register the priority balancer
|
||||||
_ "google.golang.org/grpc/xds/internal/balancer/weightedtarget" // Register the weighted_target balancer
|
|
||||||
)
|
)
|
||||||
|
|
11
vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go
generated
vendored
11
vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go
generated
vendored
|
@ -38,6 +38,7 @@ import (
|
||||||
"google.golang.org/grpc/xds/internal/balancer/clusterresolver"
|
"google.golang.org/grpc/xds/internal/balancer/clusterresolver"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/ringhash"
|
"google.golang.org/grpc/xds/internal/balancer/ringhash"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -185,7 +186,7 @@ func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) {
|
||||||
// management server, creates appropriate certificate provider plugins, and
|
// management server, creates appropriate certificate provider plugins, and
|
||||||
// updates the HandhakeInfo which is added as an address attribute in
|
// updates the HandhakeInfo which is added as an address attribute in
|
||||||
// NewSubConn() calls.
|
// NewSubConn() calls.
|
||||||
func (b *cdsBalancer) handleSecurityConfig(config *xdsclient.SecurityConfig) error {
|
func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error {
|
||||||
// If xdsCredentials are not in use, i.e, the user did not want to get
|
// If xdsCredentials are not in use, i.e, the user did not want to get
|
||||||
// security configuration from an xDS server, we should not be acting on the
|
// security configuration from an xDS server, we should not be acting on the
|
||||||
// received security config here. Doing so poses a security threat.
|
// received security config here. Doing so poses a security threat.
|
||||||
|
@ -310,7 +311,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) {
|
||||||
dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates))
|
dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates))
|
||||||
for i, cu := range update.updates {
|
for i, cu := range update.updates {
|
||||||
switch cu.ClusterType {
|
switch cu.ClusterType {
|
||||||
case xdsclient.ClusterTypeEDS:
|
case xdsresource.ClusterTypeEDS:
|
||||||
dms[i] = clusterresolver.DiscoveryMechanism{
|
dms[i] = clusterresolver.DiscoveryMechanism{
|
||||||
Type: clusterresolver.DiscoveryMechanismTypeEDS,
|
Type: clusterresolver.DiscoveryMechanismTypeEDS,
|
||||||
Cluster: cu.ClusterName,
|
Cluster: cu.ClusterName,
|
||||||
|
@ -324,7 +325,7 @@ func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) {
|
||||||
dms[i].LoadReportingServerName = new(string)
|
dms[i].LoadReportingServerName = new(string)
|
||||||
|
|
||||||
}
|
}
|
||||||
case xdsclient.ClusterTypeLogicalDNS:
|
case xdsresource.ClusterTypeLogicalDNS:
|
||||||
dms[i] = clusterresolver.DiscoveryMechanism{
|
dms[i] = clusterresolver.DiscoveryMechanism{
|
||||||
Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS,
|
Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS,
|
||||||
DNSHostname: cu.DNSHostName,
|
DNSHostname: cu.DNSHostName,
|
||||||
|
@ -430,11 +431,11 @@ func (b *cdsBalancer) run() {
|
||||||
func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) {
|
func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) {
|
||||||
// This is not necessary today, because xds client never sends connection
|
// This is not necessary today, because xds client never sends connection
|
||||||
// errors.
|
// errors.
|
||||||
if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound {
|
if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound {
|
||||||
b.clusterHandler.close()
|
b.clusterHandler.close()
|
||||||
}
|
}
|
||||||
if b.childLB != nil {
|
if b.childLB != nil {
|
||||||
if xdsclient.ErrType(err) != xdsclient.ErrorTypeConnection {
|
if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection {
|
||||||
// Connection errors will be sent to the child balancers directly.
|
// Connection errors will be sent to the child balancers directly.
|
||||||
// There's no need to forward them.
|
// There's no need to forward them.
|
||||||
b.childLB.ResolverError(err)
|
b.childLB.ResolverError(err)
|
||||||
|
|
21
vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go
generated
vendored
21
vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update")
|
var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update")
|
||||||
|
@ -31,17 +32,17 @@ var errNotReceivedUpdate = errors.New("tried to construct a cluster update on a
|
||||||
// (if one doesn't already exist) and pushing the update to it.
|
// (if one doesn't already exist) and pushing the update to it.
|
||||||
type clusterHandlerUpdate struct {
|
type clusterHandlerUpdate struct {
|
||||||
// securityCfg is the Security Config from the top (root) cluster.
|
// securityCfg is the Security Config from the top (root) cluster.
|
||||||
securityCfg *xdsclient.SecurityConfig
|
securityCfg *xdsresource.SecurityConfig
|
||||||
// lbPolicy is the lb policy from the top (root) cluster.
|
// lbPolicy is the lb policy from the top (root) cluster.
|
||||||
//
|
//
|
||||||
// Currently, we only support roundrobin or ringhash, and since roundrobin
|
// Currently, we only support roundrobin or ringhash, and since roundrobin
|
||||||
// does need configs, this is only set to the ringhash config, if the policy
|
// does need configs, this is only set to the ringhash config, if the policy
|
||||||
// is ringhash. In the future, if we support more policies, we can make this
|
// is ringhash. In the future, if we support more policies, we can make this
|
||||||
// an interface, and set it to config of the other policies.
|
// an interface, and set it to config of the other policies.
|
||||||
lbPolicy *xdsclient.ClusterLBPolicyRingHash
|
lbPolicy *xdsresource.ClusterLBPolicyRingHash
|
||||||
|
|
||||||
// updates is a list of ClusterUpdates from all the leaf clusters.
|
// updates is a list of ClusterUpdates from all the leaf clusters.
|
||||||
updates []xdsclient.ClusterUpdate
|
updates []xdsresource.ClusterUpdate
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,7 +140,7 @@ type clusterNode struct {
|
||||||
|
|
||||||
// A ClusterUpdate in order to build a list of cluster updates for CDS to
|
// A ClusterUpdate in order to build a list of cluster updates for CDS to
|
||||||
// send down to child XdsClusterResolverLoadBalancingPolicy.
|
// send down to child XdsClusterResolverLoadBalancingPolicy.
|
||||||
clusterUpdate xdsclient.ClusterUpdate
|
clusterUpdate xdsresource.ClusterUpdate
|
||||||
|
|
||||||
// This boolean determines whether this Node has received an update or not.
|
// This boolean determines whether this Node has received an update or not.
|
||||||
// This isn't the best practice, but this will protect a list of Cluster
|
// This isn't the best practice, but this will protect a list of Cluster
|
||||||
|
@ -176,7 +177,7 @@ func (c *clusterNode) delete() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct cluster update (potentially a list of ClusterUpdates) for a node.
|
// Construct cluster update (potentially a list of ClusterUpdates) for a node.
|
||||||
func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error) {
|
func (c *clusterNode) constructClusterUpdate() ([]xdsresource.ClusterUpdate, error) {
|
||||||
// If the cluster has not yet received an update, the cluster update is not
|
// If the cluster has not yet received an update, the cluster update is not
|
||||||
// yet ready.
|
// yet ready.
|
||||||
if !c.receivedUpdate {
|
if !c.receivedUpdate {
|
||||||
|
@ -185,13 +186,13 @@ func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error
|
||||||
|
|
||||||
// Base case - LogicalDNS or EDS. Both of these cluster types will be tied
|
// Base case - LogicalDNS or EDS. Both of these cluster types will be tied
|
||||||
// to a single ClusterUpdate.
|
// to a single ClusterUpdate.
|
||||||
if c.clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate {
|
if c.clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate {
|
||||||
return []xdsclient.ClusterUpdate{c.clusterUpdate}, nil
|
return []xdsresource.ClusterUpdate{c.clusterUpdate}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If an aggregate construct a list by recursively calling down to all of
|
// If an aggregate construct a list by recursively calling down to all of
|
||||||
// it's children.
|
// it's children.
|
||||||
var childrenUpdates []xdsclient.ClusterUpdate
|
var childrenUpdates []xdsresource.ClusterUpdate
|
||||||
for _, child := range c.children {
|
for _, child := range c.children {
|
||||||
childUpdateList, err := child.constructClusterUpdate()
|
childUpdateList, err := child.constructClusterUpdate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -206,7 +207,7 @@ func (c *clusterNode) constructClusterUpdate() ([]xdsclient.ClusterUpdate, error
|
||||||
// also handles any logic with regards to any child state that may have changed.
|
// also handles any logic with regards to any child state that may have changed.
|
||||||
// At the end of the handleResp(), the clusterUpdate will be pinged in certain
|
// At the end of the handleResp(), the clusterUpdate will be pinged in certain
|
||||||
// situations to try and construct an update to send back to CDS.
|
// situations to try and construct an update to send back to CDS.
|
||||||
func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err error) {
|
func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err error) {
|
||||||
c.clusterHandler.clusterMutex.Lock()
|
c.clusterHandler.clusterMutex.Lock()
|
||||||
defer c.clusterHandler.clusterMutex.Unlock()
|
defer c.clusterHandler.clusterMutex.Unlock()
|
||||||
if err != nil { // Write this error for run() to pick up in CDS LB policy.
|
if err != nil { // Write this error for run() to pick up in CDS LB policy.
|
||||||
|
@ -230,7 +231,7 @@ func (c *clusterNode) handleResp(clusterUpdate xdsclient.ClusterUpdate, err erro
|
||||||
// handler to return. Also, if there was any children from previously,
|
// handler to return. Also, if there was any children from previously,
|
||||||
// delete the children, as the cluster type is no longer an aggregate
|
// delete the children, as the cluster type is no longer an aggregate
|
||||||
// cluster.
|
// cluster.
|
||||||
if clusterUpdate.ClusterType != xdsclient.ClusterTypeAggregate {
|
if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate {
|
||||||
for _, child := range c.children {
|
for _, child := range c.children {
|
||||||
child.delete()
|
child.delete()
|
||||||
}
|
}
|
||||||
|
|
4
vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go
generated
vendored
4
vendor/google.golang.org/grpc/xds/internal/balancer/clustermanager/clustermanager.go
generated
vendored
|
@ -25,12 +25,12 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/balancergroup"
|
||||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/internal/hierarchy"
|
"google.golang.org/grpc/internal/hierarchy"
|
||||||
"google.golang.org/grpc/internal/pretty"
|
"google.golang.org/grpc/internal/pretty"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/balancergroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const balancerName = "xds_cluster_manager_experimental"
|
const balancerName = "xds_cluster_manager_experimental"
|
||||||
|
@ -46,7 +46,7 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal
|
||||||
b.logger = prefixLogger(b)
|
b.logger = prefixLogger(b)
|
||||||
b.stateAggregator = newBalancerStateAggregator(cc, b.logger)
|
b.stateAggregator = newBalancerStateAggregator(cc, b.logger)
|
||||||
b.stateAggregator.start()
|
b.stateAggregator.start()
|
||||||
b.bg = balancergroup.New(cc, opts, b.stateAggregator, nil, b.logger)
|
b.bg = balancergroup.New(cc, opts, b.stateAggregator, b.logger)
|
||||||
b.bg.Start()
|
b.bg.Start()
|
||||||
b.logger.Infof("Created")
|
b.logger.Infof("Created")
|
||||||
return b
|
return b
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/priority"
|
"google.golang.org/grpc/xds/internal/balancer/priority"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Name is the name of the cluster_resolver balancer.
|
// Name is the name of the cluster_resolver balancer.
|
||||||
|
@ -244,7 +245,7 @@ func (b *clusterResolverBalancer) updateChildConfig() error {
|
||||||
// In both cases, the sub-balancers will be receive the error.
|
// In both cases, the sub-balancers will be receive the error.
|
||||||
func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) {
|
func (b *clusterResolverBalancer) handleErrorFromUpdate(err error, fromParent bool) {
|
||||||
b.logger.Warningf("Received error: %v", err)
|
b.logger.Warningf("Received error: %v", err)
|
||||||
if fromParent && xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound {
|
if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound {
|
||||||
// This is an error from the parent ClientConn (can be the parent CDS
|
// This is an error from the parent ClientConn (can be the parent CDS
|
||||||
// balancer), and is a resource-not-found error. This means the resource
|
// balancer), and is a resource-not-found error. This means the resource
|
||||||
// (can be either LDS or CDS) was removed. Stop the EDS watch.
|
// (can be either LDS or CDS) was removed. Stop the EDS watch.
|
||||||
|
|
22
vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go
generated
vendored
22
vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/configbuilder.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer/roundrobin"
|
"google.golang.org/grpc/balancer/roundrobin"
|
||||||
"google.golang.org/grpc/balancer/weightedroundrobin"
|
"google.golang.org/grpc/balancer/weightedroundrobin"
|
||||||
|
"google.golang.org/grpc/balancer/weightedtarget"
|
||||||
"google.golang.org/grpc/internal/hierarchy"
|
"google.golang.org/grpc/internal/hierarchy"
|
||||||
internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
|
internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
|
@ -32,8 +33,7 @@ import (
|
||||||
"google.golang.org/grpc/xds/internal/balancer/clusterimpl"
|
"google.golang.org/grpc/xds/internal/balancer/clusterimpl"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/priority"
|
"google.golang.org/grpc/xds/internal/balancer/priority"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/ringhash"
|
"google.golang.org/grpc/xds/internal/balancer/ringhash"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/weightedtarget"
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const million = 1000000
|
const million = 1000000
|
||||||
|
@ -48,7 +48,7 @@ const million = 1000000
|
||||||
type priorityConfig struct {
|
type priorityConfig struct {
|
||||||
mechanism DiscoveryMechanism
|
mechanism DiscoveryMechanism
|
||||||
// edsResp is set only if type is EDS.
|
// edsResp is set only if type is EDS.
|
||||||
edsResp xdsclient.EndpointsUpdate
|
edsResp xdsresource.EndpointsUpdate
|
||||||
// addresses is set only if type is DNS.
|
// addresses is set only if type is DNS.
|
||||||
addresses []string
|
addresses []string
|
||||||
}
|
}
|
||||||
|
@ -169,7 +169,7 @@ func buildClusterImplConfigForDNS(parentPriority int, addrStrs []string) (string
|
||||||
// - map{"p0":p0_config, "p1":p1_config}
|
// - map{"p0":p0_config, "p1":p1_config}
|
||||||
// - [p0_address_0, p0_address_1, p1_address_0, p1_address_1]
|
// - [p0_address_0, p0_address_1, p1_address_0, p1_address_1]
|
||||||
// - p0 addresses' hierarchy attributes are set to p0
|
// - p0 addresses' hierarchy attributes are set to p0
|
||||||
func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) {
|
func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsresource.EndpointsUpdate, mechanism DiscoveryMechanism, xdsLBPolicy *internalserviceconfig.BalancerConfig) ([]string, map[string]*clusterimpl.LBConfig, []resolver.Address, error) {
|
||||||
drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops))
|
drops := make([]clusterimpl.DropConfig, 0, len(edsResp.Drops))
|
||||||
for _, d := range edsResp.Drops {
|
for _, d := range edsResp.Drops {
|
||||||
drops = append(drops, clusterimpl.DropConfig{
|
drops = append(drops, clusterimpl.DropConfig{
|
||||||
|
@ -205,9 +205,9 @@ func buildClusterImplConfigForEDS(parentPriority int, edsResp xdsclient.Endpoint
|
||||||
// For example, for L0-p0, L1-p0, L2-p1, results will be
|
// For example, for L0-p0, L1-p0, L2-p1, results will be
|
||||||
// - ["p0", "p1"]
|
// - ["p0", "p1"]
|
||||||
// - map{"p0":[L0, L1], "p1":[L2]}
|
// - map{"p0":[L0, L1], "p1":[L2]}
|
||||||
func groupLocalitiesByPriority(localities []xdsclient.Locality) ([]string, map[string][]xdsclient.Locality) {
|
func groupLocalitiesByPriority(localities []xdsresource.Locality) ([]string, map[string][]xdsresource.Locality) {
|
||||||
var priorityIntSlice []int
|
var priorityIntSlice []int
|
||||||
priorities := make(map[string][]xdsclient.Locality)
|
priorities := make(map[string][]xdsresource.Locality)
|
||||||
for _, locality := range localities {
|
for _, locality := range localities {
|
||||||
if locality.Weight == 0 {
|
if locality.Weight == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -252,7 +252,7 @@ var rrBalancerConfig = &internalserviceconfig.BalancerConfig{Name: roundrobin.Na
|
||||||
// priorityLocalitiesToClusterImpl takes a list of localities (with the same
|
// priorityLocalitiesToClusterImpl takes a list of localities (with the same
|
||||||
// priority), and generates a cluster impl policy config, and a list of
|
// priority), and generates a cluster impl policy config, and a list of
|
||||||
// addresses.
|
// addresses.
|
||||||
func priorityLocalitiesToClusterImpl(localities []xdsclient.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) {
|
func priorityLocalitiesToClusterImpl(localities []xdsresource.Locality, priorityName string, mechanism DiscoveryMechanism, drops []clusterimpl.DropConfig, xdsLBPolicy *internalserviceconfig.BalancerConfig) (*clusterimpl.LBConfig, []resolver.Address, error) {
|
||||||
clusterImplCfg := &clusterimpl.LBConfig{
|
clusterImplCfg := &clusterimpl.LBConfig{
|
||||||
Cluster: mechanism.Cluster,
|
Cluster: mechanism.Cluster,
|
||||||
EDSServiceName: mechanism.EDSServiceName,
|
EDSServiceName: mechanism.EDSServiceName,
|
||||||
|
@ -293,7 +293,7 @@ func priorityLocalitiesToClusterImpl(localities []xdsclient.Locality, priorityNa
|
||||||
//
|
//
|
||||||
// The addresses have path hierarchy set to [priority-name], so priority knows
|
// The addresses have path hierarchy set to [priority-name], so priority knows
|
||||||
// which child policy they are for.
|
// which child policy they are for.
|
||||||
func localitiesToRingHash(localities []xdsclient.Locality, priorityName string) []resolver.Address {
|
func localitiesToRingHash(localities []xdsresource.Locality, priorityName string) []resolver.Address {
|
||||||
var addrs []resolver.Address
|
var addrs []resolver.Address
|
||||||
for _, locality := range localities {
|
for _, locality := range localities {
|
||||||
var lw uint32 = 1
|
var lw uint32 = 1
|
||||||
|
@ -308,7 +308,7 @@ func localitiesToRingHash(localities []xdsclient.Locality, priorityName string)
|
||||||
// Filter out all "unhealthy" endpoints (unknown and healthy are
|
// Filter out all "unhealthy" endpoints (unknown and healthy are
|
||||||
// both considered to be healthy:
|
// both considered to be healthy:
|
||||||
// https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus).
|
// https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus).
|
||||||
if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown {
|
if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -333,7 +333,7 @@ func localitiesToRingHash(localities []xdsclient.Locality, priorityName string)
|
||||||
//
|
//
|
||||||
// The addresses have path hierarchy set to [priority-name, locality-name], so
|
// The addresses have path hierarchy set to [priority-name, locality-name], so
|
||||||
// priority and weighted target know which child policy they are for.
|
// priority and weighted target know which child policy they are for.
|
||||||
func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) {
|
func localitiesToWeightedTarget(localities []xdsresource.Locality, priorityName string, childPolicy *internalserviceconfig.BalancerConfig) (*weightedtarget.LBConfig, []resolver.Address) {
|
||||||
weightedTargets := make(map[string]weightedtarget.Target)
|
weightedTargets := make(map[string]weightedtarget.Target)
|
||||||
var addrs []resolver.Address
|
var addrs []resolver.Address
|
||||||
for _, locality := range localities {
|
for _, locality := range localities {
|
||||||
|
@ -346,7 +346,7 @@ func localitiesToWeightedTarget(localities []xdsclient.Locality, priorityName st
|
||||||
// Filter out all "unhealthy" endpoints (unknown and healthy are
|
// Filter out all "unhealthy" endpoints (unknown and healthy are
|
||||||
// both considered to be healthy:
|
// both considered to be healthy:
|
||||||
// https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus).
|
// https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/core/health_check.proto#envoy-api-enum-core-healthstatus).
|
||||||
if endpoint.HealthStatus != xdsclient.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsclient.EndpointHealthStatusUnknown {
|
if endpoint.HealthStatus != xdsresource.EndpointHealthStatusHealthy && endpoint.HealthStatus != xdsresource.EndpointHealthStatusUnknown {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,6 +22,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
// resourceUpdate is a combined update from all the resources, in the order of
|
// resourceUpdate is a combined update from all the resources, in the order of
|
||||||
|
@ -186,7 +187,7 @@ func (rr *resourceResolver) generate() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
switch uu := u.(type) {
|
switch uu := u.(type) {
|
||||||
case xdsclient.EndpointsUpdate:
|
case xdsresource.EndpointsUpdate:
|
||||||
ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu})
|
ret = append(ret, priorityConfig{mechanism: rDM.dm, edsResp: uu})
|
||||||
case []string:
|
case []string:
|
||||||
ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu})
|
ret = append(ret, priorityConfig{mechanism: rDM.dm, addresses: uu})
|
||||||
|
@ -202,7 +203,7 @@ func (rr *resourceResolver) generate() {
|
||||||
type edsDiscoveryMechanism struct {
|
type edsDiscoveryMechanism struct {
|
||||||
cancel func()
|
cancel func()
|
||||||
|
|
||||||
update xdsclient.EndpointsUpdate
|
update xdsresource.EndpointsUpdate
|
||||||
updateReceived bool
|
updateReceived bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -224,7 +225,7 @@ func (er *edsDiscoveryMechanism) stop() {
|
||||||
func newEDSResolver(nameToWatch string, xdsc xdsclient.XDSClient, topLevelResolver *resourceResolver) *edsDiscoveryMechanism {
|
func newEDSResolver(nameToWatch string, xdsc xdsclient.XDSClient, topLevelResolver *resourceResolver) *edsDiscoveryMechanism {
|
||||||
ret := &edsDiscoveryMechanism{}
|
ret := &edsDiscoveryMechanism{}
|
||||||
topLevelResolver.parent.logger.Infof("EDS watch started on %v", nameToWatch)
|
topLevelResolver.parent.logger.Infof("EDS watch started on %v", nameToWatch)
|
||||||
cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsclient.EndpointsUpdate, err error) {
|
cancel := xdsc.WatchEndpoints(nameToWatch, func(update xdsresource.EndpointsUpdate, err error) {
|
||||||
topLevelResolver.mu.Lock()
|
topLevelResolver.mu.Lock()
|
||||||
defer topLevelResolver.mu.Unlock()
|
defer topLevelResolver.mu.Unlock()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
4
vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go
generated
vendored
4
vendor/google.golang.org/grpc/xds/internal/balancer/priority/balancer.go
generated
vendored
|
@ -30,6 +30,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/internal/balancergroup"
|
||||||
"google.golang.org/grpc/internal/buffer"
|
"google.golang.org/grpc/internal/buffer"
|
||||||
"google.golang.org/grpc/internal/grpclog"
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
|
@ -37,7 +38,6 @@ import (
|
||||||
"google.golang.org/grpc/internal/pretty"
|
"google.golang.org/grpc/internal/pretty"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/serviceconfig"
|
"google.golang.org/grpc/serviceconfig"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/balancergroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Name is the name of the priority balancer.
|
// Name is the name of the priority balancer.
|
||||||
|
@ -59,7 +59,7 @@ func (bb) Build(cc balancer.ClientConn, bOpts balancer.BuildOptions) balancer.Ba
|
||||||
}
|
}
|
||||||
|
|
||||||
b.logger = prefixLogger(b)
|
b.logger = prefixLogger(b)
|
||||||
b.bg = balancergroup.New(cc, bOpts, b, nil, b.logger)
|
b.bg = balancergroup.New(cc, bOpts, b, b.logger)
|
||||||
b.bg.Start()
|
b.bg.Start()
|
||||||
go b.run()
|
go b.run()
|
||||||
b.logger.Infof("Created")
|
b.logger.Infof("Created")
|
||||||
|
|
67
vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go
generated
vendored
Normal file
67
vendor/google.golang.org/grpc/xds/internal/clusterspecifier/cluster_specifier.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2021 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package clusterspecifier contains the ClusterSpecifier interface and a registry for
|
||||||
|
// storing and retrieving their implementations.
|
||||||
|
package clusterspecifier
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BalancerConfig is the Go Native JSON representation of a balancer
|
||||||
|
// configuration.
|
||||||
|
type BalancerConfig []map[string]interface{}
|
||||||
|
|
||||||
|
// ClusterSpecifier defines the parsing functionality of a Cluster Specifier.
|
||||||
|
type ClusterSpecifier interface {
|
||||||
|
// TypeURLs are the proto message types supported by this
|
||||||
|
// ClusterSpecifierPlugin. A ClusterSpecifierPlugin will be registered by
|
||||||
|
// each of its supported message types.
|
||||||
|
TypeURLs() []string
|
||||||
|
// ParseClusterSpecifierConfig parses the provided configuration
|
||||||
|
// proto.Message from the top level RDS configuration. The resulting
|
||||||
|
// BalancerConfig will be used as configuration for a child LB Policy of the
|
||||||
|
// Cluster Manager LB Policy.
|
||||||
|
ParseClusterSpecifierConfig(proto.Message) (BalancerConfig, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// m is a map from scheme to filter.
|
||||||
|
m = make(map[string]ClusterSpecifier)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Register registers the ClusterSpecifierPlugin to the ClusterSpecifier map.
|
||||||
|
// cs.TypeURLs() will be used as the types for this ClusterSpecifierPlugin.
|
||||||
|
//
|
||||||
|
// NOTE: this function must only be called during initialization time (i.e. in
|
||||||
|
// an init() function), and is not thread-safe. If multiple cluster specifier
|
||||||
|
// plugins are registered with the same type URL, the one registered last will
|
||||||
|
// take effect.
|
||||||
|
func Register(cs ClusterSpecifier) {
|
||||||
|
for _, u := range cs.TypeURLs() {
|
||||||
|
m[u] = cs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the ClusterSpecifier registered with typeURL.
|
||||||
|
//
|
||||||
|
// If no cluster specifier is registered with typeURL, nil will be returned.
|
||||||
|
func Get(typeURL string) ClusterSpecifier {
|
||||||
|
return m[typeURL]
|
||||||
|
}
|
88
vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go
generated
vendored
88
vendor/google.golang.org/grpc/xds/internal/httpfilter/rbac/rbac.go
generated
vendored
|
@ -27,8 +27,8 @@ import (
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/golang/protobuf/ptypes"
|
"github.com/golang/protobuf/ptypes"
|
||||||
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
"google.golang.org/grpc/internal/resolver"
|
"google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/internal/xds/env"
|
|
||||||
"google.golang.org/grpc/internal/xds/rbac"
|
"google.golang.org/grpc/internal/xds/rbac"
|
||||||
"google.golang.org/grpc/xds/internal/httpfilter"
|
"google.golang.org/grpc/xds/internal/httpfilter"
|
||||||
"google.golang.org/protobuf/types/known/anypb"
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
@ -38,7 +38,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if env.RBACSupport {
|
if envconfig.XDSRBAC {
|
||||||
httpfilter.Register(builder{})
|
httpfilter.Register(builder{})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ type builder struct {
|
||||||
|
|
||||||
type config struct {
|
type config struct {
|
||||||
httpfilter.FilterConfig
|
httpfilter.FilterConfig
|
||||||
config *rpb.RBAC
|
chainEngine *rbac.ChainEngine
|
||||||
}
|
}
|
||||||
|
|
||||||
func (builder) TypeURLs() []string {
|
func (builder) TypeURLs() []string {
|
||||||
|
@ -90,23 +90,57 @@ func parseConfig(rbacCfg *rpb.RBAC) (httpfilter.FilterConfig, error) {
|
||||||
// "It is also a validation failure if Permission or Principal has a
|
// "It is also a validation failure if Permission or Principal has a
|
||||||
// header matcher for a grpc- prefixed header name or :scheme." - A41
|
// header matcher for a grpc- prefixed header name or :scheme." - A41
|
||||||
for _, principal := range policy.Principals {
|
for _, principal := range policy.Principals {
|
||||||
if principal.GetHeader() != nil {
|
|
||||||
name := principal.GetHeader().GetName()
|
name := principal.GetHeader().GetName()
|
||||||
if name == ":scheme" || strings.HasPrefix(name, "grpc-") {
|
if name == ":scheme" || strings.HasPrefix(name, "grpc-") {
|
||||||
return nil, fmt.Errorf("rbac: principal header matcher for %v is :scheme or starts with grpc", name)
|
return nil, fmt.Errorf("rbac: principal header matcher for %v is :scheme or starts with grpc", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
for _, permission := range policy.Permissions {
|
for _, permission := range policy.Permissions {
|
||||||
if permission.GetHeader() != nil {
|
|
||||||
name := permission.GetHeader().GetName()
|
name := permission.GetHeader().GetName()
|
||||||
if name == ":scheme" || strings.HasPrefix(name, "grpc-") {
|
if name == ":scheme" || strings.HasPrefix(name, "grpc-") {
|
||||||
return nil, fmt.Errorf("rbac: permission header matcher for %v is :scheme or starts with grpc", name)
|
return nil, fmt.Errorf("rbac: permission header matcher for %v is :scheme or starts with grpc", name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// "Envoy aliases :authority and Host in its header map implementation, so
|
||||||
|
// they should be treated equivalent for the RBAC matchers; there must be no
|
||||||
|
// behavior change depending on which of the two header names is used in the
|
||||||
|
// RBAC policy." - A41. Loop through config's principals and policies, change
|
||||||
|
// any header matcher with value "host" to :authority", as that is what
|
||||||
|
// grpc-go shifts both headers to in transport layer.
|
||||||
|
for _, policy := range rbacCfg.GetRules().GetPolicies() {
|
||||||
|
for _, principal := range policy.Principals {
|
||||||
|
if principal.GetHeader().GetName() == "host" {
|
||||||
|
principal.GetHeader().Name = ":authority"
|
||||||
}
|
}
|
||||||
return config{config: rbacCfg}, nil
|
}
|
||||||
|
for _, permission := range policy.Permissions {
|
||||||
|
if permission.GetHeader().GetName() == "host" {
|
||||||
|
permission.GetHeader().Name = ":authority"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Two cases where this HTTP Filter is a no op:
|
||||||
|
// "If absent, no enforcing RBAC policy will be applied" - RBAC
|
||||||
|
// Documentation for Rules field.
|
||||||
|
// "At this time, if the RBAC.action is Action.LOG then the policy will be
|
||||||
|
// completely ignored, as if RBAC was not configurated." - A41
|
||||||
|
if rbacCfg.Rules == nil || rbacCfg.GetRules().GetAction() == v3rbacpb.RBAC_LOG {
|
||||||
|
return config{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{rbacCfg.GetRules()})
|
||||||
|
if err != nil {
|
||||||
|
// "At this time, if the RBAC.action is Action.LOG then the policy will be
|
||||||
|
// completely ignored, as if RBAC was not configurated." - A41
|
||||||
|
if rbacCfg.GetRules().GetAction() != v3rbacpb.RBAC_LOG {
|
||||||
|
return nil, fmt.Errorf("rbac: error constructing matching engine: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return config{chainEngine: ce}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) {
|
func (builder) ParseFilterConfig(cfg proto.Message) (httpfilter.FilterConfig, error) {
|
||||||
|
@ -166,49 +200,15 @@ func (builder) BuildServerInterceptor(cfg httpfilter.FilterConfig, override http
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
icfg := c.config
|
// RBAC HTTP Filter is a no op from one of these two cases:
|
||||||
// "If absent, no enforcing RBAC policy will be applied" - RBAC
|
// "If absent, no enforcing RBAC policy will be applied" - RBAC
|
||||||
// Documentation for Rules field.
|
// Documentation for Rules field.
|
||||||
if icfg.Rules == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// "At this time, if the RBAC.action is Action.LOG then the policy will be
|
// "At this time, if the RBAC.action is Action.LOG then the policy will be
|
||||||
// completely ignored, as if RBAC was not configurated." - A41
|
// completely ignored, as if RBAC was not configurated." - A41
|
||||||
if icfg.Rules.Action == v3rbacpb.RBAC_LOG {
|
if c.chainEngine == nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
return &interceptor{chainEngine: c.chainEngine}, nil
|
||||||
// "Envoy aliases :authority and Host in its header map implementation, so
|
|
||||||
// they should be treated equivalent for the RBAC matchers; there must be no
|
|
||||||
// behavior change depending on which of the two header names is used in the
|
|
||||||
// RBAC policy." - A41. Loop through config's principals and policies, change
|
|
||||||
// any header matcher with value "host" to :authority", as that is what
|
|
||||||
// grpc-go shifts both headers to in transport layer.
|
|
||||||
for _, policy := range icfg.Rules.GetPolicies() {
|
|
||||||
for _, principal := range policy.Principals {
|
|
||||||
if principal.GetHeader() != nil {
|
|
||||||
name := principal.GetHeader().GetName()
|
|
||||||
if name == "host" {
|
|
||||||
principal.GetHeader().Name = ":authority"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, permission := range policy.Permissions {
|
|
||||||
if permission.GetHeader() != nil {
|
|
||||||
name := permission.GetHeader().GetName()
|
|
||||||
if name == "host" {
|
|
||||||
permission.GetHeader().Name = ":authority"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ce, err := rbac.NewChainEngine([]*v3rbacpb.RBAC{icfg.Rules})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error constructing matching engine: %v", err)
|
|
||||||
}
|
|
||||||
return &interceptor{chainEngine: ce}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type interceptor struct {
|
type interceptor struct {
|
||||||
|
|
78
vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go
generated
vendored
78
vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go
generated
vendored
|
@ -29,23 +29,25 @@ import (
|
||||||
|
|
||||||
xxhash "github.com/cespare/xxhash/v2"
|
xxhash "github.com/cespare/xxhash/v2"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
iresolver "google.golang.org/grpc/internal/resolver"
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/internal/serviceconfig"
|
"google.golang.org/grpc/internal/serviceconfig"
|
||||||
"google.golang.org/grpc/internal/wrr"
|
"google.golang.org/grpc/internal/wrr"
|
||||||
"google.golang.org/grpc/internal/xds/env"
|
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/clustermanager"
|
"google.golang.org/grpc/xds/internal/balancer/clustermanager"
|
||||||
"google.golang.org/grpc/xds/internal/balancer/ringhash"
|
"google.golang.org/grpc/xds/internal/balancer/ringhash"
|
||||||
"google.golang.org/grpc/xds/internal/httpfilter"
|
"google.golang.org/grpc/xds/internal/httpfilter"
|
||||||
"google.golang.org/grpc/xds/internal/httpfilter/router"
|
"google.golang.org/grpc/xds/internal/httpfilter/router"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
cdsName = "cds_experimental"
|
cdsName = "cds_experimental"
|
||||||
xdsClusterManagerName = "xds_cluster_manager_experimental"
|
xdsClusterManagerName = "xds_cluster_manager_experimental"
|
||||||
|
clusterPrefix = "cluster:"
|
||||||
|
clusterSpecifierPluginPrefix = "cluster_specifier_plugin:"
|
||||||
)
|
)
|
||||||
|
|
||||||
type serviceConfig struct {
|
type serviceConfig struct {
|
||||||
|
@ -86,10 +88,8 @@ func (r *xdsResolver) pruneActiveClusters() {
|
||||||
func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) {
|
func serviceConfigJSON(activeClusters map[string]*clusterInfo) ([]byte, error) {
|
||||||
// Generate children (all entries in activeClusters).
|
// Generate children (all entries in activeClusters).
|
||||||
children := make(map[string]xdsChildConfig)
|
children := make(map[string]xdsChildConfig)
|
||||||
for cluster := range activeClusters {
|
for cluster, ci := range activeClusters {
|
||||||
children[cluster] = xdsChildConfig{
|
children[cluster] = ci.cfg
|
||||||
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sc := serviceConfig{
|
sc := serviceConfig{
|
||||||
|
@ -109,7 +109,7 @@ type virtualHost struct {
|
||||||
// map from filter name to its config
|
// map from filter name to its config
|
||||||
httpFilterConfigOverride map[string]httpfilter.FilterConfig
|
httpFilterConfigOverride map[string]httpfilter.FilterConfig
|
||||||
// retry policy present in virtual host
|
// retry policy present in virtual host
|
||||||
retryConfig *xdsclient.RetryConfig
|
retryConfig *xdsresource.RetryConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// routeCluster holds information about a cluster as referenced by a route.
|
// routeCluster holds information about a cluster as referenced by a route.
|
||||||
|
@ -120,13 +120,13 @@ type routeCluster struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type route struct {
|
type route struct {
|
||||||
m *xdsclient.CompositeMatcher // converted from route matchers
|
m *xdsresource.CompositeMatcher // converted from route matchers
|
||||||
clusters wrr.WRR // holds *routeCluster entries
|
clusters wrr.WRR // holds *routeCluster entries
|
||||||
maxStreamDuration time.Duration
|
maxStreamDuration time.Duration
|
||||||
// map from filter name to its config
|
// map from filter name to its config
|
||||||
httpFilterConfigOverride map[string]httpfilter.FilterConfig
|
httpFilterConfigOverride map[string]httpfilter.FilterConfig
|
||||||
retryConfig *xdsclient.RetryConfig
|
retryConfig *xdsresource.RetryConfig
|
||||||
hashPolicies []*xdsclient.HashPolicy
|
hashPolicies []*xdsresource.HashPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r route) String() string {
|
func (r route) String() string {
|
||||||
|
@ -138,7 +138,7 @@ type configSelector struct {
|
||||||
virtualHost virtualHost
|
virtualHost virtualHost
|
||||||
routes []route
|
routes []route
|
||||||
clusters map[string]*clusterInfo
|
clusters map[string]*clusterInfo
|
||||||
httpFilterConfig []xdsclient.HTTPFilter
|
httpFilterConfig []xdsresource.HTTPFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found")
|
var errNoMatchedRouteFound = status.Errorf(codes.Unavailable, "no matched route was found")
|
||||||
|
@ -158,10 +158,12 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP
|
||||||
if rt == nil || rt.clusters == nil {
|
if rt == nil || rt.clusters == nil {
|
||||||
return nil, errNoMatchedRouteFound
|
return nil, errNoMatchedRouteFound
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster, ok := rt.clusters.Next().(*routeCluster)
|
cluster, ok := rt.clusters.Next().(*routeCluster)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster)
|
return nil, status.Errorf(codes.Internal, "error retrieving cluster for match: %v (%T)", cluster, cluster)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a ref to the selected cluster, as this RPC needs this cluster until
|
// Add a ref to the selected cluster, as this RPC needs this cluster until
|
||||||
// it is committed.
|
// it is committed.
|
||||||
ref := &cs.clusters[cluster.name].refCount
|
ref := &cs.clusters[cluster.name].refCount
|
||||||
|
@ -174,7 +176,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP
|
||||||
|
|
||||||
lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name)
|
lbCtx := clustermanager.SetPickedCluster(rpcInfo.Context, cluster.name)
|
||||||
// Request Hashes are only applicable for a Ring Hash LB.
|
// Request Hashes are only applicable for a Ring Hash LB.
|
||||||
if env.RingHashSupport {
|
if envconfig.XDSRingHash {
|
||||||
lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies))
|
lbCtx = ringhash.SetRequestHash(lbCtx, cs.generateHash(rpcInfo, rt.hashPolicies))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,7 +210,7 @@ func (cs *configSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RP
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func retryConfigToPolicy(config *xdsclient.RetryConfig) *serviceconfig.RetryPolicy {
|
func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPolicy {
|
||||||
return &serviceconfig.RetryPolicy{
|
return &serviceconfig.RetryPolicy{
|
||||||
MaxAttempts: int(config.NumRetries) + 1,
|
MaxAttempts: int(config.NumRetries) + 1,
|
||||||
InitialBackoff: config.RetryBackoff.BaseInterval,
|
InitialBackoff: config.RetryBackoff.BaseInterval,
|
||||||
|
@ -218,14 +220,14 @@ func retryConfigToPolicy(config *xdsclient.RetryConfig) *serviceconfig.RetryPoli
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsclient.HashPolicy) uint64 {
|
func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 {
|
||||||
var hash uint64
|
var hash uint64
|
||||||
var generatedHash bool
|
var generatedHash bool
|
||||||
for _, policy := range hashPolicies {
|
for _, policy := range hashPolicies {
|
||||||
var policyHash uint64
|
var policyHash uint64
|
||||||
var generatedPolicyHash bool
|
var generatedPolicyHash bool
|
||||||
switch policy.HashPolicyType {
|
switch policy.HashPolicyType {
|
||||||
case xdsclient.HashPolicyTypeHeader:
|
case xdsresource.HashPolicyTypeHeader:
|
||||||
md, ok := metadata.FromOutgoingContext(rpcInfo.Context)
|
md, ok := metadata.FromOutgoingContext(rpcInfo.Context)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
|
@ -242,7 +244,7 @@ func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies [
|
||||||
policyHash = xxhash.Sum64String(joinedValues)
|
policyHash = xxhash.Sum64String(joinedValues)
|
||||||
generatedHash = true
|
generatedHash = true
|
||||||
generatedPolicyHash = true
|
generatedPolicyHash = true
|
||||||
case xdsclient.HashPolicyTypeChannelID:
|
case xdsresource.HashPolicyTypeChannelID:
|
||||||
// Hash the ClientConn pointer which logically uniquely
|
// Hash the ClientConn pointer which logically uniquely
|
||||||
// identifies the client.
|
// identifies the client.
|
||||||
policyHash = xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc))
|
policyHash = xxhash.Sum64String(fmt.Sprintf("%p", &cs.r.cc))
|
||||||
|
@ -353,26 +355,30 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro
|
||||||
|
|
||||||
for i, rt := range su.virtualHost.Routes {
|
for i, rt := range su.virtualHost.Routes {
|
||||||
clusters := newWRR()
|
clusters := newWRR()
|
||||||
for cluster, wc := range rt.WeightedClusters {
|
if rt.ClusterSpecifierPlugin != "" {
|
||||||
|
clusterName := clusterSpecifierPluginPrefix + rt.ClusterSpecifierPlugin
|
||||||
clusters.Add(&routeCluster{
|
clusters.Add(&routeCluster{
|
||||||
name: cluster,
|
name: clusterName,
|
||||||
|
}, 1)
|
||||||
|
cs.initializeCluster(clusterName, xdsChildConfig{
|
||||||
|
ChildPolicy: balancerConfig(su.clusterSpecifierPlugins[rt.ClusterSpecifierPlugin]),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
for cluster, wc := range rt.WeightedClusters {
|
||||||
|
clusterName := clusterPrefix + cluster
|
||||||
|
clusters.Add(&routeCluster{
|
||||||
|
name: clusterName,
|
||||||
httpFilterConfigOverride: wc.HTTPFilterConfigOverride,
|
httpFilterConfigOverride: wc.HTTPFilterConfigOverride,
|
||||||
}, int64(wc.Weight))
|
}, int64(wc.Weight))
|
||||||
|
cs.initializeCluster(clusterName, xdsChildConfig{
|
||||||
// Initialize entries in cs.clusters map, creating entries in
|
ChildPolicy: newBalancerConfig(cdsName, cdsBalancerConfig{Cluster: cluster}),
|
||||||
// r.activeClusters as necessary. Set to zero as they will be
|
})
|
||||||
// incremented by incRefs.
|
|
||||||
ci := r.activeClusters[cluster]
|
|
||||||
if ci == nil {
|
|
||||||
ci = &clusterInfo{refCount: 0}
|
|
||||||
r.activeClusters[cluster] = ci
|
|
||||||
}
|
}
|
||||||
cs.clusters[cluster] = ci
|
|
||||||
}
|
}
|
||||||
cs.routes[i].clusters = clusters
|
cs.routes[i].clusters = clusters
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
cs.routes[i].m, err = xdsclient.RouteToMatcher(rt)
|
cs.routes[i].m, err = xdsresource.RouteToMatcher(rt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -397,9 +403,25 @@ func (r *xdsResolver) newConfigSelector(su serviceUpdate) (*configSelector, erro
|
||||||
return cs, nil
|
return cs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initializeCluster initializes entries in cs.clusters map, creating entries in
|
||||||
|
// r.activeClusters as necessary. Any created entries will have a ref count set
|
||||||
|
// to zero as their ref count will be incremented by incRefs.
|
||||||
|
func (cs *configSelector) initializeCluster(clusterName string, cfg xdsChildConfig) {
|
||||||
|
ci := cs.r.activeClusters[clusterName]
|
||||||
|
if ci == nil {
|
||||||
|
ci = &clusterInfo{refCount: 0}
|
||||||
|
cs.r.activeClusters[clusterName] = ci
|
||||||
|
}
|
||||||
|
cs.clusters[clusterName] = ci
|
||||||
|
cs.clusters[clusterName].cfg = cfg
|
||||||
|
}
|
||||||
|
|
||||||
type clusterInfo struct {
|
type clusterInfo struct {
|
||||||
// number of references to this cluster; accessed atomically
|
// number of references to this cluster; accessed atomically
|
||||||
refCount int32
|
refCount int32
|
||||||
|
// cfg is the child configuration for this cluster, containing either the
|
||||||
|
// csp config or the cds cluster config.
|
||||||
|
cfg xdsChildConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
type interceptorList struct {
|
type interceptorList struct {
|
||||||
|
|
24
vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go
generated
vendored
24
vendor/google.golang.org/grpc/xds/internal/resolver/watch_service.go
generated
vendored
|
@ -25,7 +25,9 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/internal/grpclog"
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/internal/pretty"
|
"google.golang.org/grpc/internal/pretty"
|
||||||
|
"google.golang.org/grpc/xds/internal/clusterspecifier"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
// serviceUpdate contains information received from the LDS/RDS responses which
|
// serviceUpdate contains information received from the LDS/RDS responses which
|
||||||
|
@ -33,7 +35,10 @@ import (
|
||||||
// making a LDS to get the RouteConfig name.
|
// making a LDS to get the RouteConfig name.
|
||||||
type serviceUpdate struct {
|
type serviceUpdate struct {
|
||||||
// virtualHost contains routes and other configuration to route RPCs.
|
// virtualHost contains routes and other configuration to route RPCs.
|
||||||
virtualHost *xdsclient.VirtualHost
|
virtualHost *xdsresource.VirtualHost
|
||||||
|
// clusterSpecifierPlugins contains the configurations for any cluster
|
||||||
|
// specifier plugins emitted by the xdsclient.
|
||||||
|
clusterSpecifierPlugins map[string]clusterspecifier.BalancerConfig
|
||||||
// ldsConfig contains configuration that applies to all routes.
|
// ldsConfig contains configuration that applies to all routes.
|
||||||
ldsConfig ldsConfig
|
ldsConfig ldsConfig
|
||||||
}
|
}
|
||||||
|
@ -44,7 +49,7 @@ type ldsConfig struct {
|
||||||
// maxStreamDuration is from the HTTP connection manager's
|
// maxStreamDuration is from the HTTP connection manager's
|
||||||
// common_http_protocol_options field.
|
// common_http_protocol_options field.
|
||||||
maxStreamDuration time.Duration
|
maxStreamDuration time.Duration
|
||||||
httpFilterConfig []xdsclient.HTTPFilter
|
httpFilterConfig []xdsresource.HTTPFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
// watchService uses LDS and RDS to discover information about the provided
|
// watchService uses LDS and RDS to discover information about the provided
|
||||||
|
@ -81,7 +86,7 @@ type serviceUpdateWatcher struct {
|
||||||
rdsCancel func()
|
rdsCancel func()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, err error) {
|
func (w *serviceUpdateWatcher) handleLDSResp(update xdsresource.ListenerUpdate, err error) {
|
||||||
w.logger.Infof("received LDS update: %+v, err: %v", pretty.ToJSON(update), err)
|
w.logger.Infof("received LDS update: %+v, err: %v", pretty.ToJSON(update), err)
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
defer w.mu.Unlock()
|
defer w.mu.Unlock()
|
||||||
|
@ -93,7 +98,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er
|
||||||
// type we check is ResourceNotFound, which indicates the LDS resource
|
// type we check is ResourceNotFound, which indicates the LDS resource
|
||||||
// was removed, and besides sending the error to callback, we also
|
// was removed, and besides sending the error to callback, we also
|
||||||
// cancel the RDS watch.
|
// cancel the RDS watch.
|
||||||
if xdsclient.ErrType(err) == xdsclient.ErrorTypeResourceNotFound && w.rdsCancel != nil {
|
if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound && w.rdsCancel != nil {
|
||||||
w.rdsCancel()
|
w.rdsCancel()
|
||||||
w.rdsName = ""
|
w.rdsName = ""
|
||||||
w.rdsCancel = nil
|
w.rdsCancel = nil
|
||||||
|
@ -119,7 +124,7 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle the inline RDS update as if it's from an RDS watch.
|
// Handle the inline RDS update as if it's from an RDS watch.
|
||||||
w.updateVirtualHostsFromRDS(*update.InlineRouteConfig)
|
w.applyRouteConfigUpdate(*update.InlineRouteConfig)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -150,8 +155,8 @@ func (w *serviceUpdateWatcher) handleLDSResp(update xdsclient.ListenerUpdate, er
|
||||||
w.rdsCancel = w.c.WatchRouteConfig(update.RouteConfigName, w.handleRDSResp)
|
w.rdsCancel = w.c.WatchRouteConfig(update.RouteConfigName, w.handleRDSResp)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteConfigUpdate) {
|
func (w *serviceUpdateWatcher) applyRouteConfigUpdate(update xdsresource.RouteConfigUpdate) {
|
||||||
matchVh := xdsclient.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts)
|
matchVh := xdsresource.FindBestMatchingVirtualHost(w.serviceName, update.VirtualHosts)
|
||||||
if matchVh == nil {
|
if matchVh == nil {
|
||||||
// No matching virtual host found.
|
// No matching virtual host found.
|
||||||
w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName))
|
w.serviceCb(serviceUpdate{}, fmt.Errorf("no matching virtual host found for %q", w.serviceName))
|
||||||
|
@ -159,10 +164,11 @@ func (w *serviceUpdateWatcher) updateVirtualHostsFromRDS(update xdsclient.RouteC
|
||||||
}
|
}
|
||||||
|
|
||||||
w.lastUpdate.virtualHost = matchVh
|
w.lastUpdate.virtualHost = matchVh
|
||||||
|
w.lastUpdate.clusterSpecifierPlugins = update.ClusterSpecifierPlugins
|
||||||
w.serviceCb(w.lastUpdate, nil)
|
w.serviceCb(w.lastUpdate, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate, err error) {
|
func (w *serviceUpdateWatcher) handleRDSResp(update xdsresource.RouteConfigUpdate, err error) {
|
||||||
w.logger.Infof("received RDS update: %+v, err: %v", pretty.ToJSON(update), err)
|
w.logger.Infof("received RDS update: %+v, err: %v", pretty.ToJSON(update), err)
|
||||||
w.mu.Lock()
|
w.mu.Lock()
|
||||||
defer w.mu.Unlock()
|
defer w.mu.Unlock()
|
||||||
|
@ -178,7 +184,7 @@ func (w *serviceUpdateWatcher) handleRDSResp(update xdsclient.RouteConfigUpdate,
|
||||||
w.serviceCb(serviceUpdate{}, err)
|
w.serviceCb(serviceUpdate{}, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.updateVirtualHostsFromRDS(update)
|
w.applyRouteConfigUpdate(update)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *serviceUpdateWatcher) close() {
|
func (w *serviceUpdateWatcher) close() {
|
||||||
|
|
54
vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go
generated
vendored
54
vendor/google.golang.org/grpc/xds/internal/resolver/xds_resolver.go
generated
vendored
|
@ -22,6 +22,7 @@ package resolver
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
"google.golang.org/grpc/internal/grpclog"
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
|
@ -30,6 +31,8 @@ import (
|
||||||
iresolver "google.golang.org/grpc/internal/resolver"
|
iresolver "google.golang.org/grpc/internal/resolver"
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
const xdsScheme = "xds"
|
const xdsScheme = "xds"
|
||||||
|
@ -60,7 +63,7 @@ type xdsResolverBuilder struct {
|
||||||
//
|
//
|
||||||
// The xds bootstrap process is performed (and a new xds client is built) every
|
// The xds bootstrap process is performed (and a new xds client is built) every
|
||||||
// time an xds resolver is built.
|
// time an xds resolver is built.
|
||||||
func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
|
func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (_ resolver.Resolver, retErr error) {
|
||||||
r := &xdsResolver{
|
r := &xdsResolver{
|
||||||
target: t,
|
target: t,
|
||||||
cc: cc,
|
cc: cc,
|
||||||
|
@ -68,7 +71,12 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op
|
||||||
updateCh: make(chan suWithError, 1),
|
updateCh: make(chan suWithError, 1),
|
||||||
activeClusters: make(map[string]*clusterInfo),
|
activeClusters: make(map[string]*clusterInfo),
|
||||||
}
|
}
|
||||||
r.logger = prefixLogger((r))
|
defer func() {
|
||||||
|
if retErr != nil {
|
||||||
|
r.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
r.logger = prefixLogger(r)
|
||||||
r.logger.Infof("Creating resolver for target: %+v", t)
|
r.logger.Infof("Creating resolver for target: %+v", t)
|
||||||
|
|
||||||
newXDSClient := newXDSClient
|
newXDSClient := newXDSClient
|
||||||
|
@ -81,6 +89,10 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op
|
||||||
return nil, fmt.Errorf("xds: failed to create xds-client: %v", err)
|
return nil, fmt.Errorf("xds: failed to create xds-client: %v", err)
|
||||||
}
|
}
|
||||||
r.client = client
|
r.client = client
|
||||||
|
bootstrapConfig := client.BootstrapConfig()
|
||||||
|
if bootstrapConfig == nil {
|
||||||
|
return nil, errors.New("bootstrap configuration is empty")
|
||||||
|
}
|
||||||
|
|
||||||
// If xds credentials were specified by the user, but bootstrap configs do
|
// If xds credentials were specified by the user, but bootstrap configs do
|
||||||
// not contain any certificate provider configuration, it is better to fail
|
// not contain any certificate provider configuration, it is better to fail
|
||||||
|
@ -94,14 +106,36 @@ func (b *xdsResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, op
|
||||||
creds = opts.CredsBundle.TransportCredentials()
|
creds = opts.CredsBundle.TransportCredentials()
|
||||||
}
|
}
|
||||||
if xc, ok := creds.(interface{ UsesXDS() bool }); ok && xc.UsesXDS() {
|
if xc, ok := creds.(interface{ UsesXDS() bool }); ok && xc.UsesXDS() {
|
||||||
bc := client.BootstrapConfig()
|
if len(bootstrapConfig.CertProviderConfigs) == 0 {
|
||||||
if len(bc.CertProviderConfigs) == 0 {
|
|
||||||
return nil, errors.New("xds: xdsCreds specified but certificate_providers config missing in bootstrap file")
|
return nil, errors.New("xds: xdsCreds specified but certificate_providers config missing in bootstrap file")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Find the client listener template to use from the bootstrap config:
|
||||||
|
// - If authority is not set in the target, use the top level template
|
||||||
|
// - If authority is set, use the template from the authority map.
|
||||||
|
template := bootstrapConfig.ClientDefaultListenerResourceNameTemplate
|
||||||
|
if authority := r.target.URL.Host; authority != "" {
|
||||||
|
a := bootstrapConfig.Authorities[authority]
|
||||||
|
if a == nil {
|
||||||
|
return nil, fmt.Errorf("xds: authority %q is not found in the bootstrap file", authority)
|
||||||
|
}
|
||||||
|
if a.ClientListenerResourceNameTemplate != "" {
|
||||||
|
// This check will never be false, because
|
||||||
|
// ClientListenerResourceNameTemplate is required to start with
|
||||||
|
// xdstp://, and has a default value (not an empty string) if unset.
|
||||||
|
template = a.ClientListenerResourceNameTemplate
|
||||||
|
}
|
||||||
|
}
|
||||||
|
endpoint := r.target.URL.Path
|
||||||
|
if endpoint == "" {
|
||||||
|
endpoint = r.target.URL.Opaque
|
||||||
|
}
|
||||||
|
endpoint = strings.TrimPrefix(endpoint, "/")
|
||||||
|
resourceName := bootstrap.PopulateResourceTemplate(template, endpoint)
|
||||||
|
|
||||||
// Register a watch on the xdsClient for the user's dial target.
|
// Register a watch on the xdsClient for the user's dial target.
|
||||||
cancelWatch := watchService(r.client, r.target.Endpoint, r.handleServiceUpdate, r.logger)
|
cancelWatch := watchService(r.client, resourceName, r.handleServiceUpdate, r.logger)
|
||||||
r.logger.Infof("Watch started on resource name %v with xds-client %p", r.target.Endpoint, r.client)
|
r.logger.Infof("Watch started on resource name %v with xds-client %p", r.target.Endpoint, r.client)
|
||||||
r.cancelWatch = func() {
|
r.cancelWatch = func() {
|
||||||
cancelWatch()
|
cancelWatch()
|
||||||
|
@ -171,7 +205,6 @@ func (r *xdsResolver) sendNewServiceConfig(cs *configSelector) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Produce the service config.
|
|
||||||
sc, err := serviceConfigJSON(r.activeClusters)
|
sc, err := serviceConfigJSON(r.activeClusters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// JSON marshal error; should never happen.
|
// JSON marshal error; should never happen.
|
||||||
|
@ -199,7 +232,7 @@ func (r *xdsResolver) run() {
|
||||||
case update := <-r.updateCh:
|
case update := <-r.updateCh:
|
||||||
if update.err != nil {
|
if update.err != nil {
|
||||||
r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.target.Endpoint, r.client, update.err)
|
r.logger.Warningf("Watch error on resource %v from xds-client %p, %v", r.target.Endpoint, r.client, update.err)
|
||||||
if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound {
|
if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound {
|
||||||
// If error is resource-not-found, it means the LDS
|
// If error is resource-not-found, it means the LDS
|
||||||
// resource was removed. Ultimately send an empty service
|
// resource was removed. Ultimately send an empty service
|
||||||
// config, which picks pick-first, with no address, and
|
// config, which picks pick-first, with no address, and
|
||||||
|
@ -268,8 +301,15 @@ func (*xdsResolver) ResolveNow(o resolver.ResolveNowOptions) {}
|
||||||
|
|
||||||
// Close closes the resolver, and also closes the underlying xdsClient.
|
// Close closes the resolver, and also closes the underlying xdsClient.
|
||||||
func (r *xdsResolver) Close() {
|
func (r *xdsResolver) Close() {
|
||||||
|
// Note that Close needs to check for nils even if some of them are always
|
||||||
|
// set in the constructor. This is because the constructor defers Close() in
|
||||||
|
// error cases, and the fields might not be set when the error happens.
|
||||||
|
if r.cancelWatch != nil {
|
||||||
r.cancelWatch()
|
r.cancelWatch()
|
||||||
|
}
|
||||||
|
if r.client != nil {
|
||||||
r.client.Close()
|
r.client.Close()
|
||||||
|
}
|
||||||
r.closed.Fire()
|
r.closed.Fire()
|
||||||
r.logger.Infof("Shutdown")
|
r.logger.Infof("Shutdown")
|
||||||
}
|
}
|
||||||
|
|
8
vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go
generated
vendored
8
vendor/google.golang.org/grpc/xds/internal/server/conn_wrapper.go
generated
vendored
|
@ -27,7 +27,7 @@ import (
|
||||||
|
|
||||||
"google.golang.org/grpc/credentials/tls/certprovider"
|
"google.golang.org/grpc/credentials/tls/certprovider"
|
||||||
xdsinternal "google.golang.org/grpc/internal/credentials/xds"
|
xdsinternal "google.golang.org/grpc/internal/credentials/xds"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
// connWrapper is a thin wrapper around a net.Conn returned by Accept(). It
|
// connWrapper is a thin wrapper around a net.Conn returned by Accept(). It
|
||||||
|
@ -43,7 +43,7 @@ type connWrapper struct {
|
||||||
net.Conn
|
net.Conn
|
||||||
|
|
||||||
// The specific filter chain picked for handling this connection.
|
// The specific filter chain picked for handling this connection.
|
||||||
filterChain *xdsclient.FilterChain
|
filterChain *xdsresource.FilterChain
|
||||||
|
|
||||||
// A reference fo the listenerWrapper on which this connection was accepted.
|
// A reference fo the listenerWrapper on which this connection was accepted.
|
||||||
parent *listenerWrapper
|
parent *listenerWrapper
|
||||||
|
@ -61,11 +61,11 @@ type connWrapper struct {
|
||||||
|
|
||||||
// The virtual hosts with matchable routes and instantiated HTTP Filters per
|
// The virtual hosts with matchable routes and instantiated HTTP Filters per
|
||||||
// route.
|
// route.
|
||||||
virtualHosts []xdsclient.VirtualHostWithInterceptors
|
virtualHosts []xdsresource.VirtualHostWithInterceptors
|
||||||
}
|
}
|
||||||
|
|
||||||
// VirtualHosts returns the virtual hosts to be used for server side routing.
|
// VirtualHosts returns the virtual hosts to be used for server side routing.
|
||||||
func (c *connWrapper) VirtualHosts() []xdsclient.VirtualHostWithInterceptors {
|
func (c *connWrapper) VirtualHosts() []xdsresource.VirtualHostWithInterceptors {
|
||||||
return c.virtualHosts
|
return c.virtualHosts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
30
vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go
generated
vendored
30
vendor/google.golang.org/grpc/xds/internal/server/listener_wrapper.go
generated
vendored
|
@ -33,11 +33,11 @@ import (
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||||
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
internalgrpclog "google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
"google.golang.org/grpc/internal/xds/env"
|
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -73,8 +73,8 @@ func prefixLogger(p *listenerWrapper) *internalgrpclog.PrefixLogger {
|
||||||
// XDSClient wraps the methods on the XDSClient which are required by
|
// XDSClient wraps the methods on the XDSClient which are required by
|
||||||
// the listenerWrapper.
|
// the listenerWrapper.
|
||||||
type XDSClient interface {
|
type XDSClient interface {
|
||||||
WatchListener(string, func(xdsclient.ListenerUpdate, error)) func()
|
WatchListener(string, func(xdsresource.ListenerUpdate, error)) func()
|
||||||
WatchRouteConfig(string, func(xdsclient.RouteConfigUpdate, error)) func()
|
WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func()
|
||||||
BootstrapConfig() *bootstrap.Config
|
BootstrapConfig() *bootstrap.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,7 +136,7 @@ func NewListenerWrapper(params ListenerWrapperParams) (net.Listener, <-chan stru
|
||||||
}
|
}
|
||||||
|
|
||||||
type ldsUpdateWithError struct {
|
type ldsUpdateWithError struct {
|
||||||
update xdsclient.ListenerUpdate
|
update xdsresource.ListenerUpdate
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ type listenerWrapper struct {
|
||||||
// Current serving mode.
|
// Current serving mode.
|
||||||
mode connectivity.ServingMode
|
mode connectivity.ServingMode
|
||||||
// Filter chains received as part of the last good update.
|
// Filter chains received as part of the last good update.
|
||||||
filterChains *xdsclient.FilterChainManager
|
filterChains *xdsresource.FilterChainManager
|
||||||
|
|
||||||
// rdsHandler is used for any dynamic RDS resources specified in a LDS
|
// rdsHandler is used for any dynamic RDS resources specified in a LDS
|
||||||
// update.
|
// update.
|
||||||
|
@ -250,7 +250,7 @@ func (l *listenerWrapper) Accept() (net.Conn, error) {
|
||||||
conn.Close()
|
conn.Close()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fc, err := l.filterChains.Lookup(xdsclient.FilterChainLookupParams{
|
fc, err := l.filterChains.Lookup(xdsresource.FilterChainLookupParams{
|
||||||
IsUnspecifiedListener: l.isUnspecifiedAddr,
|
IsUnspecifiedListener: l.isUnspecifiedAddr,
|
||||||
DestAddr: destAddr.IP,
|
DestAddr: destAddr.IP,
|
||||||
SourceAddr: srcAddr.IP,
|
SourceAddr: srcAddr.IP,
|
||||||
|
@ -273,15 +273,15 @@ func (l *listenerWrapper) Accept() (net.Conn, error) {
|
||||||
conn.Close()
|
conn.Close()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !env.RBACSupport {
|
if !envconfig.XDSRBAC {
|
||||||
return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil
|
return &connWrapper{Conn: conn, filterChain: fc, parent: l}, nil
|
||||||
}
|
}
|
||||||
var rc xdsclient.RouteConfigUpdate
|
var rc xdsresource.RouteConfigUpdate
|
||||||
if fc.InlineRouteConfig != nil {
|
if fc.InlineRouteConfig != nil {
|
||||||
rc = *fc.InlineRouteConfig
|
rc = *fc.InlineRouteConfig
|
||||||
} else {
|
} else {
|
||||||
rcPtr := atomic.LoadPointer(&l.rdsUpdates)
|
rcPtr := atomic.LoadPointer(&l.rdsUpdates)
|
||||||
rcuPtr := (*map[string]xdsclient.RouteConfigUpdate)(rcPtr)
|
rcuPtr := (*map[string]xdsresource.RouteConfigUpdate)(rcPtr)
|
||||||
// This shouldn't happen, but this error protects against a panic.
|
// This shouldn't happen, but this error protects against a panic.
|
||||||
if rcuPtr == nil {
|
if rcuPtr == nil {
|
||||||
return nil, errors.New("route configuration pointer is nil")
|
return nil, errors.New("route configuration pointer is nil")
|
||||||
|
@ -340,7 +340,7 @@ func (l *listenerWrapper) run() {
|
||||||
// handleLDSUpdate is the callback which handles LDS Updates. It writes the
|
// handleLDSUpdate is the callback which handles LDS Updates. It writes the
|
||||||
// received update to the update channel, which is picked up by the run
|
// received update to the update channel, which is picked up by the run
|
||||||
// goroutine.
|
// goroutine.
|
||||||
func (l *listenerWrapper) handleListenerUpdate(update xdsclient.ListenerUpdate, err error) {
|
func (l *listenerWrapper) handleListenerUpdate(update xdsresource.ListenerUpdate, err error) {
|
||||||
if l.closed.HasFired() {
|
if l.closed.HasFired() {
|
||||||
l.logger.Warningf("Resource %q received update: %v with error: %v, after listener was closed", l.name, update, err)
|
l.logger.Warningf("Resource %q received update: %v with error: %v, after listener was closed", l.name, update, err)
|
||||||
return
|
return
|
||||||
|
@ -364,7 +364,7 @@ func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) {
|
||||||
}
|
}
|
||||||
if update.err != nil {
|
if update.err != nil {
|
||||||
l.logger.Warningf("Received error for rds names specified in resource %q: %+v", l.name, update.err)
|
l.logger.Warningf("Received error for rds names specified in resource %q: %+v", l.name, update.err)
|
||||||
if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound {
|
if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound {
|
||||||
l.switchMode(nil, connectivity.ServingModeNotServing, update.err)
|
l.switchMode(nil, connectivity.ServingModeNotServing, update.err)
|
||||||
}
|
}
|
||||||
// For errors which are anything other than "resource-not-found", we
|
// For errors which are anything other than "resource-not-found", we
|
||||||
|
@ -380,7 +380,7 @@ func (l *listenerWrapper) handleRDSUpdate(update rdsHandlerUpdate) {
|
||||||
func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) {
|
func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) {
|
||||||
if update.err != nil {
|
if update.err != nil {
|
||||||
l.logger.Warningf("Received error for resource %q: %+v", l.name, update.err)
|
l.logger.Warningf("Received error for resource %q: %+v", l.name, update.err)
|
||||||
if xdsclient.ErrType(update.err) == xdsclient.ErrorTypeResourceNotFound {
|
if xdsresource.ErrType(update.err) == xdsresource.ErrorTypeResourceNotFound {
|
||||||
l.switchMode(nil, connectivity.ServingModeNotServing, update.err)
|
l.switchMode(nil, connectivity.ServingModeNotServing, update.err)
|
||||||
}
|
}
|
||||||
// For errors which are anything other than "resource-not-found", we
|
// For errors which are anything other than "resource-not-found", we
|
||||||
|
@ -414,7 +414,7 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) {
|
||||||
// Server's state to ServingModeNotServing. That prevents new connections
|
// Server's state to ServingModeNotServing. That prevents new connections
|
||||||
// from being accepted, whereas here we simply want the clients to reconnect
|
// from being accepted, whereas here we simply want the clients to reconnect
|
||||||
// to get the updated configuration.
|
// to get the updated configuration.
|
||||||
if env.RBACSupport {
|
if envconfig.XDSRBAC {
|
||||||
if l.drainCallback != nil {
|
if l.drainCallback != nil {
|
||||||
l.drainCallback(l.Listener.Addr())
|
l.drainCallback(l.Listener.Addr())
|
||||||
}
|
}
|
||||||
|
@ -429,7 +429,7 @@ func (l *listenerWrapper) handleLDSUpdate(update ldsUpdateWithError) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *listenerWrapper) switchMode(fcs *xdsclient.FilterChainManager, newMode connectivity.ServingMode, err error) {
|
func (l *listenerWrapper) switchMode(fcs *xdsresource.FilterChainManager, newMode connectivity.ServingMode, err error) {
|
||||||
l.mu.Lock()
|
l.mu.Lock()
|
||||||
defer l.mu.Unlock()
|
defer l.mu.Unlock()
|
||||||
|
|
||||||
|
|
12
vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go
generated
vendored
12
vendor/google.golang.org/grpc/xds/internal/server/rds_handler.go
generated
vendored
|
@ -21,13 +21,13 @@ package server
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient"
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
// rdsHandlerUpdate wraps the full RouteConfigUpdate that are dynamically
|
// rdsHandlerUpdate wraps the full RouteConfigUpdate that are dynamically
|
||||||
// queried for a given server side listener.
|
// queried for a given server side listener.
|
||||||
type rdsHandlerUpdate struct {
|
type rdsHandlerUpdate struct {
|
||||||
updates map[string]xdsclient.RouteConfigUpdate
|
updates map[string]xdsresource.RouteConfigUpdate
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ type rdsHandler struct {
|
||||||
xdsC XDSClient
|
xdsC XDSClient
|
||||||
|
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
updates map[string]xdsclient.RouteConfigUpdate
|
updates map[string]xdsresource.RouteConfigUpdate
|
||||||
cancels map[string]func()
|
cancels map[string]func()
|
||||||
|
|
||||||
// For a rdsHandler update, the only update wrapped listener cares about is
|
// For a rdsHandler update, the only update wrapped listener cares about is
|
||||||
|
@ -53,7 +53,7 @@ func newRDSHandler(xdsC XDSClient, ch chan rdsHandlerUpdate) *rdsHandler {
|
||||||
return &rdsHandler{
|
return &rdsHandler{
|
||||||
xdsC: xdsC,
|
xdsC: xdsC,
|
||||||
updateChannel: ch,
|
updateChannel: ch,
|
||||||
updates: make(map[string]xdsclient.RouteConfigUpdate),
|
updates: make(map[string]xdsresource.RouteConfigUpdate),
|
||||||
cancels: make(map[string]func()),
|
cancels: make(map[string]func()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool)
|
||||||
for routeName := range routeNamesToWatch {
|
for routeName := range routeNamesToWatch {
|
||||||
if _, ok := rh.cancels[routeName]; !ok {
|
if _, ok := rh.cancels[routeName]; !ok {
|
||||||
func(routeName string) {
|
func(routeName string) {
|
||||||
rh.cancels[routeName] = rh.xdsC.WatchRouteConfig(routeName, func(update xdsclient.RouteConfigUpdate, err error) {
|
rh.cancels[routeName] = rh.xdsC.WatchRouteConfig(routeName, func(update xdsresource.RouteConfigUpdate, err error) {
|
||||||
rh.handleRouteUpdate(routeName, update, err)
|
rh.handleRouteUpdate(routeName, update, err)
|
||||||
})
|
})
|
||||||
}(routeName)
|
}(routeName)
|
||||||
|
@ -97,7 +97,7 @@ func (rh *rdsHandler) updateRouteNamesToWatch(routeNamesToWatch map[string]bool)
|
||||||
// handleRouteUpdate persists the route config for a given route name, and also
|
// handleRouteUpdate persists the route config for a given route name, and also
|
||||||
// sends an update to the Listener Wrapper on an error received or if the rds
|
// sends an update to the Listener Wrapper on an error received or if the rds
|
||||||
// handler has a full collection of updates.
|
// handler has a full collection of updates.
|
||||||
func (rh *rdsHandler) handleRouteUpdate(routeName string, update xdsclient.RouteConfigUpdate, err error) {
|
func (rh *rdsHandler) handleRouteUpdate(routeName string, update xdsresource.RouteConfigUpdate, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
drainAndPush(rh.updateChannel, rdsHandlerUpdate{err: err})
|
drainAndPush(rh.updateChannel, rdsHandlerUpdate{err: err})
|
||||||
return
|
return
|
||||||
|
|
17
vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go
generated
vendored
17
vendor/google.golang.org/grpc/xds/internal/xdsclient/attributes.go
generated
vendored
|
@ -21,6 +21,7 @@ import (
|
||||||
"google.golang.org/grpc/resolver"
|
"google.golang.org/grpc/resolver"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient/load"
|
"google.golang.org/grpc/xds/internal/xdsclient/load"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
type clientKeyType string
|
type clientKeyType string
|
||||||
|
@ -31,16 +32,16 @@ const clientKey = clientKeyType("grpc.xds.internal.client.Client")
|
||||||
// (collectively termed as xDS) on a remote management server, to discover
|
// (collectively termed as xDS) on a remote management server, to discover
|
||||||
// various dynamic resources.
|
// various dynamic resources.
|
||||||
type XDSClient interface {
|
type XDSClient interface {
|
||||||
WatchListener(string, func(ListenerUpdate, error)) func()
|
WatchListener(string, func(xdsresource.ListenerUpdate, error)) func()
|
||||||
WatchRouteConfig(string, func(RouteConfigUpdate, error)) func()
|
WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func()
|
||||||
WatchCluster(string, func(ClusterUpdate, error)) func()
|
WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func()
|
||||||
WatchEndpoints(clusterName string, edsCb func(EndpointsUpdate, error)) (cancel func())
|
WatchEndpoints(clusterName string, edsCb func(xdsresource.EndpointsUpdate, error)) (cancel func())
|
||||||
ReportLoad(server string) (*load.Store, func())
|
ReportLoad(server string) (*load.Store, func())
|
||||||
|
|
||||||
DumpLDS() (string, map[string]UpdateWithMD)
|
DumpLDS() map[string]xdsresource.UpdateWithMD
|
||||||
DumpRDS() (string, map[string]UpdateWithMD)
|
DumpRDS() map[string]xdsresource.UpdateWithMD
|
||||||
DumpCDS() (string, map[string]UpdateWithMD)
|
DumpCDS() map[string]xdsresource.UpdateWithMD
|
||||||
DumpEDS() (string, map[string]UpdateWithMD)
|
DumpEDS() map[string]xdsresource.UpdateWithMD
|
||||||
|
|
||||||
BootstrapConfig() *bootstrap.Config
|
BootstrapConfig() *bootstrap.Config
|
||||||
Close()
|
Close()
|
||||||
|
|
359
vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go
generated
vendored
359
vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/bootstrap.go
generated
vendored
|
@ -25,9 +25,8 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
|
||||||
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
|
||||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
|
||||||
"github.com/golang/protobuf/jsonpb"
|
"github.com/golang/protobuf/jsonpb"
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
@ -35,9 +34,12 @@ import (
|
||||||
"google.golang.org/grpc/credentials/insecure"
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"google.golang.org/grpc/credentials/tls/certprovider"
|
"google.golang.org/grpc/credentials/tls/certprovider"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
"google.golang.org/grpc/internal/pretty"
|
"google.golang.org/grpc/internal/pretty"
|
||||||
"google.golang.org/grpc/internal/xds/env"
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version"
|
||||||
"google.golang.org/grpc/xds/internal/version"
|
|
||||||
|
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||||
|
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -58,34 +60,184 @@ var gRPCVersion = fmt.Sprintf("%s %s", gRPCUserAgentName, grpc.Version)
|
||||||
// For overriding in unit tests.
|
// For overriding in unit tests.
|
||||||
var bootstrapFileReadFunc = ioutil.ReadFile
|
var bootstrapFileReadFunc = ioutil.ReadFile
|
||||||
|
|
||||||
// Config provides the xDS client with several key bits of information that it
|
// ServerConfig contains the configuration to connect to a server, including
|
||||||
// requires in its interaction with the management server. The Config is
|
// URI, creds, and transport API version (e.g. v2 or v3).
|
||||||
// initialized from the bootstrap file.
|
type ServerConfig struct {
|
||||||
type Config struct {
|
// ServerURI is the management server to connect to.
|
||||||
// BalancerName is the name of the management server to connect to.
|
|
||||||
//
|
//
|
||||||
// The bootstrap file contains a list of servers (with name+creds), but we
|
// The bootstrap file contains an ordered list of xDS servers to contact for
|
||||||
// pick the first one.
|
// this authority. The first one is picked.
|
||||||
BalancerName string
|
ServerURI string
|
||||||
// Creds contains the credentials to be used while talking to the xDS
|
// Creds contains the credentials to be used while talking to the xDS
|
||||||
// server, as a grpc.DialOption.
|
// server, as a grpc.DialOption.
|
||||||
Creds grpc.DialOption
|
Creds grpc.DialOption
|
||||||
|
// CredsType is the type of the creds. It will be used to dedup servers.
|
||||||
|
CredsType string
|
||||||
// TransportAPI indicates the API version of xDS transport protocol to use.
|
// TransportAPI indicates the API version of xDS transport protocol to use.
|
||||||
// This describes the xDS gRPC endpoint and version of
|
// This describes the xDS gRPC endpoint and version of
|
||||||
// DiscoveryRequest/Response used on the wire.
|
// DiscoveryRequest/Response used on the wire.
|
||||||
TransportAPI version.TransportAPI
|
TransportAPI version.TransportAPI
|
||||||
// NodeProto contains the Node proto to be used in xDS requests. The actual
|
// NodeProto contains the Node proto to be used in xDS requests. The actual
|
||||||
// type depends on the transport protocol version used.
|
// type depends on the transport protocol version used.
|
||||||
|
//
|
||||||
|
// Note that it's specified in the bootstrap globally for all the servers,
|
||||||
|
// but we keep it in each server config so that its type (e.g. *v2pb.Node or
|
||||||
|
// *v3pb.Node) is consistent with the transport API version.
|
||||||
NodeProto proto.Message
|
NodeProto proto.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the string representation of the ServerConfig.
|
||||||
|
//
|
||||||
|
// This string representation will be used as map keys in federation
|
||||||
|
// (`map[ServerConfig]authority`), so that the xDS ClientConn and stream will be
|
||||||
|
// shared by authorities with different names but the same server config.
|
||||||
|
//
|
||||||
|
// It covers (almost) all the fields so the string can represent the config
|
||||||
|
// content. It doesn't cover NodeProto because NodeProto isn't used by
|
||||||
|
// federation.
|
||||||
|
func (sc *ServerConfig) String() string {
|
||||||
|
var ver string
|
||||||
|
switch sc.TransportAPI {
|
||||||
|
case version.TransportV3:
|
||||||
|
ver = "xDSv3"
|
||||||
|
case version.TransportV2:
|
||||||
|
ver = "xDSv2"
|
||||||
|
}
|
||||||
|
return strings.Join([]string{sc.ServerURI, sc.CredsType, ver}, "-")
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON takes the json data (a list of servers) and unmarshals the
|
||||||
|
// first one in the list.
|
||||||
|
func (sc *ServerConfig) UnmarshalJSON(data []byte) error {
|
||||||
|
var servers []*xdsServer
|
||||||
|
if err := json.Unmarshal(data, &servers); err != nil {
|
||||||
|
return fmt.Errorf("xds: json.Unmarshal(data) for field xds_servers failed during bootstrap: %v", err)
|
||||||
|
}
|
||||||
|
if len(servers) < 1 {
|
||||||
|
return fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any management server to connect to")
|
||||||
|
}
|
||||||
|
xs := servers[0]
|
||||||
|
sc.ServerURI = xs.ServerURI
|
||||||
|
for _, cc := range xs.ChannelCreds {
|
||||||
|
// We stop at the first credential type that we support.
|
||||||
|
sc.CredsType = cc.Type
|
||||||
|
if cc.Type == credsGoogleDefault {
|
||||||
|
sc.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials())
|
||||||
|
break
|
||||||
|
} else if cc.Type == credsInsecure {
|
||||||
|
sc.Creds = grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range xs.ServerFeatures {
|
||||||
|
if f == serverFeaturesV3 {
|
||||||
|
sc.TransportAPI = version.TransportV3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Authority contains configuration for an Authority for an xDS control plane
|
||||||
|
// server. See the Authorities field in the Config struct for how it's used.
|
||||||
|
type Authority struct {
|
||||||
|
// ClientListenerResourceNameTemplate is template for the name of the
|
||||||
|
// Listener resource to subscribe to for a gRPC client channel. Used only
|
||||||
|
// when the channel is created using an "xds:" URI with this authority name.
|
||||||
|
//
|
||||||
|
// The token "%s", if present in this string, will be replaced
|
||||||
|
// with %-encoded service authority (i.e., the path part of the target
|
||||||
|
// URI used to create the gRPC channel).
|
||||||
|
//
|
||||||
|
// Must start with "xdstp://<authority_name>/". If it does not,
|
||||||
|
// that is considered a bootstrap file parsing error.
|
||||||
|
//
|
||||||
|
// If not present in the bootstrap file, defaults to
|
||||||
|
// "xdstp://<authority_name>/envoy.config.listener.v3.Listener/%s".
|
||||||
|
ClientListenerResourceNameTemplate string
|
||||||
|
// XDSServer contains the management server and config to connect to for
|
||||||
|
// this authority.
|
||||||
|
XDSServer *ServerConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implement json unmarshaller.
|
||||||
|
func (a *Authority) UnmarshalJSON(data []byte) error {
|
||||||
|
var jsonData map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(data, &jsonData); err != nil {
|
||||||
|
return fmt.Errorf("xds: failed to parse authority: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range jsonData {
|
||||||
|
switch k {
|
||||||
|
case "xds_servers":
|
||||||
|
if err := json.Unmarshal(v, &a.XDSServer); err != nil {
|
||||||
|
return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||||
|
}
|
||||||
|
case "client_listener_resource_name_template":
|
||||||
|
if err := json.Unmarshal(v, &a.ClientListenerResourceNameTemplate); err != nil {
|
||||||
|
return fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config provides the xDS client with several key bits of information that it
|
||||||
|
// requires in its interaction with the management server. The Config is
|
||||||
|
// initialized from the bootstrap file.
|
||||||
|
type Config struct {
|
||||||
|
// XDSServer is the management server to connect to.
|
||||||
|
//
|
||||||
|
// The bootstrap file contains a list of servers (with name+creds), but we
|
||||||
|
// pick the first one.
|
||||||
|
XDSServer *ServerConfig
|
||||||
// CertProviderConfigs contains a mapping from certificate provider plugin
|
// CertProviderConfigs contains a mapping from certificate provider plugin
|
||||||
// instance names to parsed buildable configs.
|
// instance names to parsed buildable configs.
|
||||||
CertProviderConfigs map[string]*certprovider.BuildableConfig
|
CertProviderConfigs map[string]*certprovider.BuildableConfig
|
||||||
// ServerListenerResourceNameTemplate is a template for the name of the
|
// ServerListenerResourceNameTemplate is a template for the name of the
|
||||||
// Listener resource to subscribe to for a gRPC server. If the token `%s` is
|
// Listener resource to subscribe to for a gRPC server.
|
||||||
// present in the string, it will be replaced with the server's listening
|
//
|
||||||
// "IP:port" (e.g., "0.0.0.0:8080", "[::]:8080"). For example, a value of
|
// If starts with "xdstp:", will be interpreted as a new-style name,
|
||||||
// "example/resource/%s" could become "example/resource/0.0.0.0:8080".
|
// in which case the authority of the URI will be used to select the
|
||||||
|
// relevant configuration in the "authorities" map.
|
||||||
|
//
|
||||||
|
// The token "%s", if present in this string, will be replaced with the IP
|
||||||
|
// and port on which the server is listening. (e.g., "0.0.0.0:8080",
|
||||||
|
// "[::]:8080"). For example, a value of "example/resource/%s" could become
|
||||||
|
// "example/resource/0.0.0.0:8080". If the template starts with "xdstp:",
|
||||||
|
// the replaced string will be %-encoded.
|
||||||
|
//
|
||||||
|
// There is no default; if unset, xDS-based server creation fails.
|
||||||
ServerListenerResourceNameTemplate string
|
ServerListenerResourceNameTemplate string
|
||||||
|
// A template for the name of the Listener resource to subscribe to
|
||||||
|
// for a gRPC client channel. Used only when the channel is created
|
||||||
|
// with an "xds:" URI with no authority.
|
||||||
|
//
|
||||||
|
// If starts with "xdstp:", will be interpreted as a new-style name,
|
||||||
|
// in which case the authority of the URI will be used to select the
|
||||||
|
// relevant configuration in the "authorities" map.
|
||||||
|
//
|
||||||
|
// The token "%s", if present in this string, will be replaced with
|
||||||
|
// the service authority (i.e., the path part of the target URI
|
||||||
|
// used to create the gRPC channel). If the template starts with
|
||||||
|
// "xdstp:", the replaced string will be %-encoded.
|
||||||
|
//
|
||||||
|
// Defaults to "%s".
|
||||||
|
ClientDefaultListenerResourceNameTemplate string
|
||||||
|
|
||||||
|
// Authorities is a map of authority name to corresponding configuration.
|
||||||
|
//
|
||||||
|
// This is used in the following cases:
|
||||||
|
// - A gRPC client channel is created using an "xds:" URI that includes
|
||||||
|
// an authority.
|
||||||
|
// - A gRPC client channel is created using an "xds:" URI with no
|
||||||
|
// authority, but the "client_default_listener_resource_name_template"
|
||||||
|
// field above turns it into an "xdstp:" URI.
|
||||||
|
// - A gRPC server is created and the
|
||||||
|
// "server_listener_resource_name_template" field is an "xdstp:" URI.
|
||||||
|
//
|
||||||
|
// In any of those cases, it is an error if the specified authority is
|
||||||
|
// not present in this map.
|
||||||
|
Authorities map[string]*Authority
|
||||||
}
|
}
|
||||||
|
|
||||||
type channelCreds struct {
|
type channelCreds struct {
|
||||||
|
@ -100,8 +252,8 @@ type xdsServer struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func bootstrapConfigFromEnvVariable() ([]byte, error) {
|
func bootstrapConfigFromEnvVariable() ([]byte, error) {
|
||||||
fName := env.BootstrapFileName
|
fName := envconfig.XDSBootstrapFileName
|
||||||
fContent := env.BootstrapFileContent
|
fContent := envconfig.XDSBootstrapFileContent
|
||||||
|
|
||||||
// Bootstrap file name has higher priority than bootstrap content.
|
// Bootstrap file name has higher priority than bootstrap content.
|
||||||
if fName != "" {
|
if fName != "" {
|
||||||
|
@ -119,40 +271,13 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) {
|
||||||
return []byte(fContent), nil
|
return []byte(fContent), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined", env.BootstrapFileNameEnv, env.BootstrapFileContentEnv)
|
return nil, fmt.Errorf("none of the bootstrap environment variables (%q or %q) defined",
|
||||||
|
envconfig.XDSBootstrapFileNameEnv, envconfig.XDSBootstrapFileContentEnv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConfig returns a new instance of Config initialized by reading the
|
// NewConfig returns a new instance of Config initialized by reading the
|
||||||
// bootstrap file found at ${GRPC_XDS_BOOTSTRAP}.
|
// bootstrap file found at ${GRPC_XDS_BOOTSTRAP}.
|
||||||
//
|
//
|
||||||
// The format of the bootstrap file will be as follows:
|
|
||||||
// {
|
|
||||||
// "xds_servers": [
|
|
||||||
// {
|
|
||||||
// "server_uri": <string containing URI of management server>,
|
|
||||||
// "channel_creds": [
|
|
||||||
// {
|
|
||||||
// "type": <string containing channel cred type>,
|
|
||||||
// "config": <JSON object containing config for the type>
|
|
||||||
// }
|
|
||||||
// ],
|
|
||||||
// "server_features": [ ... ],
|
|
||||||
// }
|
|
||||||
// ],
|
|
||||||
// "node": <JSON form of Node proto>,
|
|
||||||
// "certificate_providers" : {
|
|
||||||
// "default": {
|
|
||||||
// "plugin_name": "default-plugin-name",
|
|
||||||
// "config": { default plugin config in JSON }
|
|
||||||
// },
|
|
||||||
// "foo": {
|
|
||||||
// "plugin_name": "foo",
|
|
||||||
// "config": { foo plugin config in JSON }
|
|
||||||
// }
|
|
||||||
// },
|
|
||||||
// "server_listener_resource_name_template": "grpc/server?xds.resource.listening_address=%s"
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// Currently, we support exactly one type of credential, which is
|
// Currently, we support exactly one type of credential, which is
|
||||||
// "google_default", where we use the host's default certs for transport
|
// "google_default", where we use the host's default certs for transport
|
||||||
// credentials and a Google oauth token for call credentials.
|
// credentials and a Google oauth token for call credentials.
|
||||||
|
@ -162,6 +287,8 @@ func bootstrapConfigFromEnvVariable() ([]byte, error) {
|
||||||
// fields left unspecified, in which case the caller should use some sane
|
// fields left unspecified, in which case the caller should use some sane
|
||||||
// defaults.
|
// defaults.
|
||||||
func NewConfig() (*Config, error) {
|
func NewConfig() (*Config, error) {
|
||||||
|
// Examples of the bootstrap json can be found in the generator tests
|
||||||
|
// https://github.com/GoogleCloudPlatform/traffic-director-grpc-bootstrap/blob/master/main_test.go.
|
||||||
data, err := bootstrapConfigFromEnvVariable()
|
data, err := bootstrapConfigFromEnvVariable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err)
|
return nil, fmt.Errorf("xds: Failed to read bootstrap config: %v", err)
|
||||||
|
@ -181,7 +308,7 @@ func NewConfigFromContents(data []byte) (*Config, error) {
|
||||||
return nil, fmt.Errorf("xds: Failed to parse bootstrap config: %v", err)
|
return nil, fmt.Errorf("xds: Failed to parse bootstrap config: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
serverSupportsV3 := false
|
var node *v3corepb.Node
|
||||||
m := jsonpb.Unmarshaler{AllowUnknownFields: true}
|
m := jsonpb.Unmarshaler{AllowUnknownFields: true}
|
||||||
for k, v := range jsonData {
|
for k, v := range jsonData {
|
||||||
switch k {
|
switch k {
|
||||||
|
@ -192,37 +319,14 @@ func NewConfigFromContents(data []byte) (*Config, error) {
|
||||||
// "build_version" field. In any case, the unmarshal will succeed
|
// "build_version" field. In any case, the unmarshal will succeed
|
||||||
// because we have set the `AllowUnknownFields` option on the
|
// because we have set the `AllowUnknownFields` option on the
|
||||||
// unmarshaler.
|
// unmarshaler.
|
||||||
n := &v3corepb.Node{}
|
node = &v3corepb.Node{}
|
||||||
if err := m.Unmarshal(bytes.NewReader(v), n); err != nil {
|
if err := m.Unmarshal(bytes.NewReader(v), node); err != nil {
|
||||||
return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
return nil, fmt.Errorf("xds: jsonpb.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||||
}
|
}
|
||||||
config.NodeProto = n
|
|
||||||
case "xds_servers":
|
case "xds_servers":
|
||||||
var servers []*xdsServer
|
if err := json.Unmarshal(v, &config.XDSServer); err != nil {
|
||||||
if err := json.Unmarshal(v, &servers); err != nil {
|
|
||||||
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||||
}
|
}
|
||||||
if len(servers) < 1 {
|
|
||||||
return nil, fmt.Errorf("xds: bootstrap file parsing failed during bootstrap: file doesn't contain any management server to connect to")
|
|
||||||
}
|
|
||||||
xs := servers[0]
|
|
||||||
config.BalancerName = xs.ServerURI
|
|
||||||
for _, cc := range xs.ChannelCreds {
|
|
||||||
// We stop at the first credential type that we support.
|
|
||||||
if cc.Type == credsGoogleDefault {
|
|
||||||
config.Creds = grpc.WithCredentialsBundle(google.NewDefaultCredentials())
|
|
||||||
break
|
|
||||||
} else if cc.Type == credsInsecure {
|
|
||||||
config.Creds = grpc.WithTransportCredentials(insecure.NewCredentials())
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, f := range xs.ServerFeatures {
|
|
||||||
switch f {
|
|
||||||
case serverFeaturesV3:
|
|
||||||
serverSupportsV3 = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "certificate_providers":
|
case "certificate_providers":
|
||||||
var providerInstances map[string]json.RawMessage
|
var providerInstances map[string]json.RawMessage
|
||||||
if err := json.Unmarshal(v, &providerInstances); err != nil {
|
if err := json.Unmarshal(v, &providerInstances); err != nil {
|
||||||
|
@ -256,27 +360,58 @@ func NewConfigFromContents(data []byte) (*Config, error) {
|
||||||
if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil {
|
if err := json.Unmarshal(v, &config.ServerListenerResourceNameTemplate); err != nil {
|
||||||
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||||
}
|
}
|
||||||
|
case "client_default_listener_resource_name_template":
|
||||||
|
if !envconfig.XDSFederation {
|
||||||
|
logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(v, &config.ClientDefaultListenerResourceNameTemplate); err != nil {
|
||||||
|
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||||
|
}
|
||||||
|
case "authorities":
|
||||||
|
if !envconfig.XDSFederation {
|
||||||
|
logger.Warningf("xds: bootstrap field %v is not support when Federation is disabled", k)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(v, &config.Authorities); err != nil {
|
||||||
|
return nil, fmt.Errorf("xds: json.Unmarshal(%v) for field %q failed during bootstrap: %v", string(v), k, err)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
logger.Warningf("Bootstrap content has unknown field: %s", k)
|
||||||
}
|
}
|
||||||
// Do not fail the xDS bootstrap when an unknown field is seen. This can
|
// Do not fail the xDS bootstrap when an unknown field is seen. This can
|
||||||
// happen when an older version client reads a newer version bootstrap
|
// happen when an older version client reads a newer version bootstrap
|
||||||
// file with new fields.
|
// file with new fields.
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.BalancerName == "" {
|
if config.ClientDefaultListenerResourceNameTemplate == "" {
|
||||||
|
// Default value of the default client listener name template is "%s".
|
||||||
|
config.ClientDefaultListenerResourceNameTemplate = "%s"
|
||||||
|
}
|
||||||
|
if config.XDSServer == nil {
|
||||||
|
return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers", jsonData["xds_servers"])
|
||||||
|
}
|
||||||
|
if config.XDSServer.ServerURI == "" {
|
||||||
return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"])
|
return nil, fmt.Errorf("xds: Required field %q not found in bootstrap %s", "xds_servers.server_uri", jsonData["xds_servers"])
|
||||||
}
|
}
|
||||||
if config.Creds == nil {
|
if config.XDSServer.Creds == nil {
|
||||||
return nil, fmt.Errorf("xds: Required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"])
|
return nil, fmt.Errorf("xds: Required field %q doesn't contain valid value in bootstrap %s", "xds_servers.channel_creds", jsonData["xds_servers"])
|
||||||
}
|
}
|
||||||
|
// Post-process the authorities' client listener resource template field:
|
||||||
// We end up using v3 transport protocol version only if the server supports
|
// - if set, it must start with "xdstp://<authority_name>/"
|
||||||
// v3, indicated by the presence of "xds_v3" in server_features. The default
|
// - if not set, it defaults to "xdstp://<authority_name>/envoy.config.listener.v3.Listener/%s"
|
||||||
// value of the enum type "version.TransportAPI" is v2.
|
for name, authority := range config.Authorities {
|
||||||
if serverSupportsV3 {
|
prefix := fmt.Sprintf("xdstp://%s", name)
|
||||||
config.TransportAPI = version.TransportV3
|
if authority.ClientListenerResourceNameTemplate == "" {
|
||||||
|
authority.ClientListenerResourceNameTemplate = prefix + "/envoy.config.listener.v3.Listener/%s"
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !strings.HasPrefix(authority.ClientListenerResourceNameTemplate, prefix) {
|
||||||
|
return nil, fmt.Errorf("xds: field ClientListenerResourceNameTemplate %q of authority %q doesn't start with prefix %q", authority.ClientListenerResourceNameTemplate, name, prefix)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := config.updateNodeProto(); err != nil {
|
if err := config.updateNodeProto(node); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config))
|
logger.Infof("Bootstrap config for creating xds-client: %v", pretty.ToJSON(config))
|
||||||
|
@ -285,47 +420,57 @@ func NewConfigFromContents(data []byte) (*Config, error) {
|
||||||
|
|
||||||
// updateNodeProto updates the node proto read from the bootstrap file.
|
// updateNodeProto updates the node proto read from the bootstrap file.
|
||||||
//
|
//
|
||||||
// Node proto in Config contains a v3.Node protobuf message corresponding to the
|
// The input node is a v3.Node protobuf message corresponding to the JSON
|
||||||
// JSON contents found in the bootstrap file. This method performs some post
|
// contents found in the bootstrap file. This method performs some post
|
||||||
// processing on it:
|
// processing on it:
|
||||||
// 1. If we don't find a nodeProto in the bootstrap file, we create an empty one
|
// 1. If the node is nil, we create an empty one here. That way, callers of this
|
||||||
// here. That way, callers of this function can always expect that the NodeProto
|
// function can always expect that the NodeProto field is non-nil.
|
||||||
// field is non-nil.
|
// 2. Some additional fields which are not expected to be set in the bootstrap
|
||||||
// 2. If the transport protocol version to be used is not v3, we convert the
|
|
||||||
// current v3.Node proto in a v2.Node proto.
|
|
||||||
// 3. Some additional fields which are not expected to be set in the bootstrap
|
|
||||||
// file are populated here.
|
// file are populated here.
|
||||||
func (c *Config) updateNodeProto() error {
|
// 3. For each server config (both top level and in each authority), we set its
|
||||||
if c.TransportAPI == version.TransportV3 {
|
// node field to the v3.Node, or a v2.Node with the same content, depending on
|
||||||
v3, _ := c.NodeProto.(*v3corepb.Node)
|
// the server's transprot API version.
|
||||||
|
func (c *Config) updateNodeProto(node *v3corepb.Node) error {
|
||||||
|
v3 := node
|
||||||
if v3 == nil {
|
if v3 == nil {
|
||||||
v3 = &v3corepb.Node{}
|
v3 = &v3corepb.Node{}
|
||||||
}
|
}
|
||||||
v3.UserAgentName = gRPCUserAgentName
|
v3.UserAgentName = gRPCUserAgentName
|
||||||
v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
|
v3.UserAgentVersionType = &v3corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
|
||||||
v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning)
|
v3.ClientFeatures = append(v3.ClientFeatures, clientFeatureNoOverprovisioning)
|
||||||
c.NodeProto = v3
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
v2 := &v2corepb.Node{}
|
v2 := &v2corepb.Node{}
|
||||||
if c.NodeProto != nil {
|
v3bytes, err := proto.Marshal(v3)
|
||||||
v3, err := proto.Marshal(c.NodeProto)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("xds: proto.Marshal(%v): %v", c.NodeProto, err)
|
return fmt.Errorf("xds: proto.Marshal(%v): %v", v3, err)
|
||||||
}
|
}
|
||||||
if err := proto.Unmarshal(v3, v2); err != nil {
|
if err := proto.Unmarshal(v3bytes, v2); err != nil {
|
||||||
return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3, err)
|
return fmt.Errorf("xds: proto.Unmarshal(%v): %v", v3bytes, err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
c.NodeProto = v2
|
|
||||||
|
|
||||||
// BuildVersion is deprecated, and is replaced by user_agent_name and
|
// BuildVersion is deprecated, and is replaced by user_agent_name and
|
||||||
// user_agent_version. But the management servers are still using the old
|
// user_agent_version. But the management servers are still using the old
|
||||||
// field, so we will keep both set.
|
// field, so we will keep both set.
|
||||||
v2.BuildVersion = gRPCVersion
|
v2.BuildVersion = gRPCVersion
|
||||||
v2.UserAgentName = gRPCUserAgentName
|
|
||||||
v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
|
v2.UserAgentVersionType = &v2corepb.Node_UserAgentVersion{UserAgentVersion: grpc.Version}
|
||||||
v2.ClientFeatures = append(v2.ClientFeatures, clientFeatureNoOverprovisioning)
|
|
||||||
|
switch c.XDSServer.TransportAPI {
|
||||||
|
case version.TransportV2:
|
||||||
|
c.XDSServer.NodeProto = v2
|
||||||
|
case version.TransportV3:
|
||||||
|
c.XDSServer.NodeProto = v3
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range c.Authorities {
|
||||||
|
if a.XDSServer == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch a.XDSServer.TransportAPI {
|
||||||
|
case version.TransportV2:
|
||||||
|
a.XDSServer.NodeProto = v2
|
||||||
|
case version.TransportV3:
|
||||||
|
a.XDSServer.NodeProto = v3
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
47
vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/template.go
generated
vendored
Normal file
47
vendor/google.golang.org/grpc/xds/internal/xdsclient/bootstrap/template.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2021 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package bootstrap
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PopulateResourceTemplate populates the given template using the target
|
||||||
|
// string. "%s", if exists in the template, will be replaced with target.
|
||||||
|
//
|
||||||
|
// If the template starts with "xdstp:", the replaced string will be %-encoded.
|
||||||
|
// But note that "/" is not percent encoded.
|
||||||
|
func PopulateResourceTemplate(template, target string) string {
|
||||||
|
if !strings.Contains(template, "%s") {
|
||||||
|
return template
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(template, "xdstp:") {
|
||||||
|
target = percentEncode(target)
|
||||||
|
}
|
||||||
|
return strings.Replace(template, "%s", target, -1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// percentEncode percent encode t, except for "/". See the tests for examples.
|
||||||
|
func percentEncode(t string) string {
|
||||||
|
segs := strings.Split(t, "/")
|
||||||
|
for i := range segs {
|
||||||
|
segs[i] = url.PathEscape(segs[i])
|
||||||
|
}
|
||||||
|
return strings.Join(segs, "/")
|
||||||
|
}
|
290
vendor/google.golang.org/grpc/xds/internal/xdsclient/callback.go
generated
vendored
290
vendor/google.golang.org/grpc/xds/internal/xdsclient/callback.go
generated
vendored
|
@ -19,128 +19,16 @@
|
||||||
package xdsclient
|
package xdsclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"google.golang.org/grpc/internal/pretty"
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
"google.golang.org/protobuf/proto"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type watcherInfoWithUpdate struct {
|
|
||||||
wi *watchInfo
|
|
||||||
update interface{}
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// scheduleCallback should only be called by methods of watchInfo, which checks
|
|
||||||
// for watcher states and maintain consistency.
|
|
||||||
func (c *clientImpl) scheduleCallback(wi *watchInfo, update interface{}, err error) {
|
|
||||||
c.updateCh.Put(&watcherInfoWithUpdate{
|
|
||||||
wi: wi,
|
|
||||||
update: update,
|
|
||||||
err: err,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *clientImpl) callCallback(wiu *watcherInfoWithUpdate) {
|
|
||||||
c.mu.Lock()
|
|
||||||
// Use a closure to capture the callback and type assertion, to save one
|
|
||||||
// more switch case.
|
|
||||||
//
|
|
||||||
// The callback must be called without c.mu. Otherwise if the callback calls
|
|
||||||
// another watch() inline, it will cause a deadlock. This leaves a small
|
|
||||||
// window that a watcher's callback could be called after the watcher is
|
|
||||||
// canceled, and the user needs to take care of it.
|
|
||||||
var ccb func()
|
|
||||||
switch wiu.wi.rType {
|
|
||||||
case ListenerResource:
|
|
||||||
if s, ok := c.ldsWatchers[wiu.wi.target]; ok && s[wiu.wi] {
|
|
||||||
ccb = func() { wiu.wi.ldsCallback(wiu.update.(ListenerUpdate), wiu.err) }
|
|
||||||
}
|
|
||||||
case RouteConfigResource:
|
|
||||||
if s, ok := c.rdsWatchers[wiu.wi.target]; ok && s[wiu.wi] {
|
|
||||||
ccb = func() { wiu.wi.rdsCallback(wiu.update.(RouteConfigUpdate), wiu.err) }
|
|
||||||
}
|
|
||||||
case ClusterResource:
|
|
||||||
if s, ok := c.cdsWatchers[wiu.wi.target]; ok && s[wiu.wi] {
|
|
||||||
ccb = func() { wiu.wi.cdsCallback(wiu.update.(ClusterUpdate), wiu.err) }
|
|
||||||
}
|
|
||||||
case EndpointsResource:
|
|
||||||
if s, ok := c.edsWatchers[wiu.wi.target]; ok && s[wiu.wi] {
|
|
||||||
ccb = func() { wiu.wi.edsCallback(wiu.update.(EndpointsUpdate), wiu.err) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.mu.Unlock()
|
|
||||||
|
|
||||||
if ccb != nil {
|
|
||||||
ccb()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListeners is called by the underlying xdsAPIClient when it receives an
|
// NewListeners is called by the underlying xdsAPIClient when it receives an
|
||||||
// xDS response.
|
// xDS response.
|
||||||
//
|
//
|
||||||
// A response can contain multiple resources. They will be parsed and put in a
|
// A response can contain multiple resources. They will be parsed and put in a
|
||||||
// map from resource name to the resource content.
|
// map from resource name to the resource content.
|
||||||
func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, metadata UpdateMetadata) {
|
func (c *clientImpl) NewListeners(updates map[string]xdsresource.ListenerUpdateErrTuple, metadata xdsresource.UpdateMetadata) {
|
||||||
c.mu.Lock()
|
c.pubsub.NewListeners(updates, metadata)
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
c.ldsVersion = metadata.Version
|
|
||||||
if metadata.ErrState != nil {
|
|
||||||
c.ldsVersion = metadata.ErrState.Version
|
|
||||||
}
|
|
||||||
for name, uErr := range updates {
|
|
||||||
if s, ok := c.ldsWatchers[name]; ok {
|
|
||||||
if uErr.Err != nil {
|
|
||||||
// On error, keep previous version for each resource. But update
|
|
||||||
// status and error.
|
|
||||||
mdCopy := c.ldsMD[name]
|
|
||||||
mdCopy.ErrState = metadata.ErrState
|
|
||||||
mdCopy.Status = metadata.Status
|
|
||||||
c.ldsMD[name] = mdCopy
|
|
||||||
for wi := range s {
|
|
||||||
wi.newError(uErr.Err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// If we get here, it means that the update is a valid one. Notify
|
|
||||||
// watchers only if this is a first time update or it is different
|
|
||||||
// from the one currently cached.
|
|
||||||
if cur, ok := c.ldsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {
|
|
||||||
for wi := range s {
|
|
||||||
wi.newUpdate(uErr.Update)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Sync cache.
|
|
||||||
c.logger.Debugf("LDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr))
|
|
||||||
c.ldsCache[name] = uErr.Update
|
|
||||||
// Set status to ACK, and clear error state. The metadata might be a
|
|
||||||
// NACK metadata because some other resources in the same response
|
|
||||||
// are invalid.
|
|
||||||
mdCopy := metadata
|
|
||||||
mdCopy.Status = ServiceStatusACKed
|
|
||||||
mdCopy.ErrState = nil
|
|
||||||
if metadata.ErrState != nil {
|
|
||||||
mdCopy.Version = metadata.ErrState.Version
|
|
||||||
}
|
|
||||||
c.ldsMD[name] = mdCopy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Resources not in the new update were removed by the server, so delete
|
|
||||||
// them.
|
|
||||||
for name := range c.ldsCache {
|
|
||||||
if _, ok := updates[name]; !ok {
|
|
||||||
// If resource exists in cache, but not in the new update, delete
|
|
||||||
// the resource from cache, and also send an resource not found
|
|
||||||
// error to indicate resource removed.
|
|
||||||
delete(c.ldsCache, name)
|
|
||||||
c.ldsMD[name] = UpdateMetadata{Status: ServiceStatusNotExist}
|
|
||||||
for wi := range c.ldsWatchers[name] {
|
|
||||||
wi.resourceNotFound()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// When LDS resource is removed, we don't delete corresponding RDS cached
|
|
||||||
// data. The RDS watch will be canceled, and cache entry is removed when the
|
|
||||||
// last watch is canceled.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRouteConfigs is called by the underlying xdsAPIClient when it receives an
|
// NewRouteConfigs is called by the underlying xdsAPIClient when it receives an
|
||||||
|
@ -148,52 +36,8 @@ func (c *clientImpl) NewListeners(updates map[string]ListenerUpdateErrTuple, met
|
||||||
//
|
//
|
||||||
// A response can contain multiple resources. They will be parsed and put in a
|
// A response can contain multiple resources. They will be parsed and put in a
|
||||||
// map from resource name to the resource content.
|
// map from resource name to the resource content.
|
||||||
func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTuple, metadata UpdateMetadata) {
|
func (c *clientImpl) NewRouteConfigs(updates map[string]xdsresource.RouteConfigUpdateErrTuple, metadata xdsresource.UpdateMetadata) {
|
||||||
c.mu.Lock()
|
c.pubsub.NewRouteConfigs(updates, metadata)
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
// If no error received, the status is ACK.
|
|
||||||
c.rdsVersion = metadata.Version
|
|
||||||
if metadata.ErrState != nil {
|
|
||||||
c.rdsVersion = metadata.ErrState.Version
|
|
||||||
}
|
|
||||||
for name, uErr := range updates {
|
|
||||||
if s, ok := c.rdsWatchers[name]; ok {
|
|
||||||
if uErr.Err != nil {
|
|
||||||
// On error, keep previous version for each resource. But update
|
|
||||||
// status and error.
|
|
||||||
mdCopy := c.rdsMD[name]
|
|
||||||
mdCopy.ErrState = metadata.ErrState
|
|
||||||
mdCopy.Status = metadata.Status
|
|
||||||
c.rdsMD[name] = mdCopy
|
|
||||||
for wi := range s {
|
|
||||||
wi.newError(uErr.Err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// If we get here, it means that the update is a valid one. Notify
|
|
||||||
// watchers only if this is a first time update or it is different
|
|
||||||
// from the one currently cached.
|
|
||||||
if cur, ok := c.rdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {
|
|
||||||
for wi := range s {
|
|
||||||
wi.newUpdate(uErr.Update)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Sync cache.
|
|
||||||
c.logger.Debugf("RDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr))
|
|
||||||
c.rdsCache[name] = uErr.Update
|
|
||||||
// Set status to ACK, and clear error state. The metadata might be a
|
|
||||||
// NACK metadata because some other resources in the same response
|
|
||||||
// are invalid.
|
|
||||||
mdCopy := metadata
|
|
||||||
mdCopy.Status = ServiceStatusACKed
|
|
||||||
mdCopy.ErrState = nil
|
|
||||||
if metadata.ErrState != nil {
|
|
||||||
mdCopy.Version = metadata.ErrState.Version
|
|
||||||
}
|
|
||||||
c.rdsMD[name] = mdCopy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClusters is called by the underlying xdsAPIClient when it receives an xDS
|
// NewClusters is called by the underlying xdsAPIClient when it receives an xDS
|
||||||
|
@ -201,70 +45,8 @@ func (c *clientImpl) NewRouteConfigs(updates map[string]RouteConfigUpdateErrTupl
|
||||||
//
|
//
|
||||||
// A response can contain multiple resources. They will be parsed and put in a
|
// A response can contain multiple resources. They will be parsed and put in a
|
||||||
// map from resource name to the resource content.
|
// map from resource name to the resource content.
|
||||||
func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metadata UpdateMetadata) {
|
func (c *clientImpl) NewClusters(updates map[string]xdsresource.ClusterUpdateErrTuple, metadata xdsresource.UpdateMetadata) {
|
||||||
c.mu.Lock()
|
c.pubsub.NewClusters(updates, metadata)
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
c.cdsVersion = metadata.Version
|
|
||||||
if metadata.ErrState != nil {
|
|
||||||
c.cdsVersion = metadata.ErrState.Version
|
|
||||||
}
|
|
||||||
for name, uErr := range updates {
|
|
||||||
if s, ok := c.cdsWatchers[name]; ok {
|
|
||||||
if uErr.Err != nil {
|
|
||||||
// On error, keep previous version for each resource. But update
|
|
||||||
// status and error.
|
|
||||||
mdCopy := c.cdsMD[name]
|
|
||||||
mdCopy.ErrState = metadata.ErrState
|
|
||||||
mdCopy.Status = metadata.Status
|
|
||||||
c.cdsMD[name] = mdCopy
|
|
||||||
for wi := range s {
|
|
||||||
// Send the watcher the individual error, instead of the
|
|
||||||
// overall combined error from the metadata.ErrState.
|
|
||||||
wi.newError(uErr.Err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// If we get here, it means that the update is a valid one. Notify
|
|
||||||
// watchers only if this is a first time update or it is different
|
|
||||||
// from the one currently cached.
|
|
||||||
if cur, ok := c.cdsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {
|
|
||||||
for wi := range s {
|
|
||||||
wi.newUpdate(uErr.Update)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Sync cache.
|
|
||||||
c.logger.Debugf("CDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr))
|
|
||||||
c.cdsCache[name] = uErr.Update
|
|
||||||
// Set status to ACK, and clear error state. The metadata might be a
|
|
||||||
// NACK metadata because some other resources in the same response
|
|
||||||
// are invalid.
|
|
||||||
mdCopy := metadata
|
|
||||||
mdCopy.Status = ServiceStatusACKed
|
|
||||||
mdCopy.ErrState = nil
|
|
||||||
if metadata.ErrState != nil {
|
|
||||||
mdCopy.Version = metadata.ErrState.Version
|
|
||||||
}
|
|
||||||
c.cdsMD[name] = mdCopy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Resources not in the new update were removed by the server, so delete
|
|
||||||
// them.
|
|
||||||
for name := range c.cdsCache {
|
|
||||||
if _, ok := updates[name]; !ok {
|
|
||||||
// If resource exists in cache, but not in the new update, delete it
|
|
||||||
// from cache, and also send an resource not found error to indicate
|
|
||||||
// resource removed.
|
|
||||||
delete(c.cdsCache, name)
|
|
||||||
c.ldsMD[name] = UpdateMetadata{Status: ServiceStatusNotExist}
|
|
||||||
for wi := range c.cdsWatchers[name] {
|
|
||||||
wi.resourceNotFound()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// When CDS resource is removed, we don't delete corresponding EDS cached
|
|
||||||
// data. The EDS watch will be canceled, and cache entry is removed when the
|
|
||||||
// last watch is canceled.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpoints is called by the underlying xdsAPIClient when it receives an
|
// NewEndpoints is called by the underlying xdsAPIClient when it receives an
|
||||||
|
@ -272,64 +54,12 @@ func (c *clientImpl) NewClusters(updates map[string]ClusterUpdateErrTuple, metad
|
||||||
//
|
//
|
||||||
// A response can contain multiple resources. They will be parsed and put in a
|
// A response can contain multiple resources. They will be parsed and put in a
|
||||||
// map from resource name to the resource content.
|
// map from resource name to the resource content.
|
||||||
func (c *clientImpl) NewEndpoints(updates map[string]EndpointsUpdateErrTuple, metadata UpdateMetadata) {
|
func (c *clientImpl) NewEndpoints(updates map[string]xdsresource.EndpointsUpdateErrTuple, metadata xdsresource.UpdateMetadata) {
|
||||||
c.mu.Lock()
|
c.pubsub.NewEndpoints(updates, metadata)
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
c.edsVersion = metadata.Version
|
|
||||||
if metadata.ErrState != nil {
|
|
||||||
c.edsVersion = metadata.ErrState.Version
|
|
||||||
}
|
|
||||||
for name, uErr := range updates {
|
|
||||||
if s, ok := c.edsWatchers[name]; ok {
|
|
||||||
if uErr.Err != nil {
|
|
||||||
// On error, keep previous version for each resource. But update
|
|
||||||
// status and error.
|
|
||||||
mdCopy := c.edsMD[name]
|
|
||||||
mdCopy.ErrState = metadata.ErrState
|
|
||||||
mdCopy.Status = metadata.Status
|
|
||||||
c.edsMD[name] = mdCopy
|
|
||||||
for wi := range s {
|
|
||||||
// Send the watcher the individual error, instead of the
|
|
||||||
// overall combined error from the metadata.ErrState.
|
|
||||||
wi.newError(uErr.Err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// If we get here, it means that the update is a valid one. Notify
|
|
||||||
// watchers only if this is a first time update or it is different
|
|
||||||
// from the one currently cached.
|
|
||||||
if cur, ok := c.edsCache[name]; !ok || !proto.Equal(cur.Raw, uErr.Update.Raw) {
|
|
||||||
for wi := range s {
|
|
||||||
wi.newUpdate(uErr.Update)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Sync cache.
|
|
||||||
c.logger.Debugf("EDS resource with name %v, value %+v added to cache", name, pretty.ToJSON(uErr))
|
|
||||||
c.edsCache[name] = uErr.Update
|
|
||||||
// Set status to ACK, and clear error state. The metadata might be a
|
|
||||||
// NACK metadata because some other resources in the same response
|
|
||||||
// are invalid.
|
|
||||||
mdCopy := metadata
|
|
||||||
mdCopy.Status = ServiceStatusACKed
|
|
||||||
mdCopy.ErrState = nil
|
|
||||||
if metadata.ErrState != nil {
|
|
||||||
mdCopy.Version = metadata.ErrState.Version
|
|
||||||
}
|
|
||||||
c.edsMD[name] = mdCopy
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConnectionError is called by the underlying xdsAPIClient when it receives
|
// NewConnectionError is called by the underlying xdsAPIClient when it receives
|
||||||
// a connection error. The error will be forwarded to all the resource watchers.
|
// a connection error. The error will be forwarded to all the resource watchers.
|
||||||
func (c *clientImpl) NewConnectionError(err error) {
|
func (c *clientImpl) NewConnectionError(err error) {
|
||||||
c.mu.Lock()
|
c.pubsub.NewConnectionError(err)
|
||||||
defer c.mu.Unlock()
|
|
||||||
|
|
||||||
for _, s := range c.edsWatchers {
|
|
||||||
for wi := range s {
|
|
||||||
wi.newError(NewErrorf(ErrorTypeConnection, "xds: error received from xDS stream: %v", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
787
vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go
generated
vendored
787
vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go
generated
vendored
|
@ -21,567 +21,16 @@
|
||||||
package xdsclient
|
package xdsclient
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
|
||||||
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"google.golang.org/protobuf/types/known/anypb"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/internal/xds/matcher"
|
|
||||||
"google.golang.org/grpc/xds/internal/httpfilter"
|
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient/load"
|
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/internal/backoff"
|
|
||||||
"google.golang.org/grpc/internal/buffer"
|
|
||||||
"google.golang.org/grpc/internal/grpclog"
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
"google.golang.org/grpc/internal/grpcsync"
|
"google.golang.org/grpc/internal/grpcsync"
|
||||||
"google.golang.org/grpc/keepalive"
|
|
||||||
"google.golang.org/grpc/xds/internal"
|
|
||||||
"google.golang.org/grpc/xds/internal/version"
|
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/pubsub"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
m = make(map[version.TransportAPI]APIClientBuilder)
|
|
||||||
)
|
|
||||||
|
|
||||||
// RegisterAPIClientBuilder registers a client builder for xDS transport protocol
|
|
||||||
// version specified by b.Version().
|
|
||||||
//
|
|
||||||
// NOTE: this function must only be called during initialization time (i.e. in
|
|
||||||
// an init() function), and is not thread-safe. If multiple builders are
|
|
||||||
// registered for the same version, the one registered last will take effect.
|
|
||||||
func RegisterAPIClientBuilder(b APIClientBuilder) {
|
|
||||||
m[b.Version()] = b
|
|
||||||
}
|
|
||||||
|
|
||||||
// getAPIClientBuilder returns the client builder registered for the provided
|
|
||||||
// xDS transport API version.
|
|
||||||
func getAPIClientBuilder(version version.TransportAPI) APIClientBuilder {
|
|
||||||
if b, ok := m[version]; ok {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateValidatorFunc performs validations on update structs using
|
|
||||||
// context/logic available at the xdsClient layer. Since these validation are
|
|
||||||
// performed on internal update structs, they can be shared between different
|
|
||||||
// API clients.
|
|
||||||
type UpdateValidatorFunc func(interface{}) error
|
|
||||||
|
|
||||||
// BuildOptions contains options to be passed to client builders.
|
|
||||||
type BuildOptions struct {
|
|
||||||
// Parent is a top-level xDS client which has the intelligence to take
|
|
||||||
// appropriate action based on xDS responses received from the management
|
|
||||||
// server.
|
|
||||||
Parent UpdateHandler
|
|
||||||
// Validator performs post unmarshal validation checks.
|
|
||||||
Validator UpdateValidatorFunc
|
|
||||||
// NodeProto contains the Node proto to be used in xDS requests. The actual
|
|
||||||
// type depends on the transport protocol version used.
|
|
||||||
NodeProto proto.Message
|
|
||||||
// Backoff returns the amount of time to backoff before retrying broken
|
|
||||||
// streams.
|
|
||||||
Backoff func(int) time.Duration
|
|
||||||
// Logger provides enhanced logging capabilities.
|
|
||||||
Logger *grpclog.PrefixLogger
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIClientBuilder creates an xDS client for a specific xDS transport protocol
|
|
||||||
// version.
|
|
||||||
type APIClientBuilder interface {
|
|
||||||
// Build builds a transport protocol specific implementation of the xDS
|
|
||||||
// client based on the provided clientConn to the management server and the
|
|
||||||
// provided options.
|
|
||||||
Build(*grpc.ClientConn, BuildOptions) (APIClient, error)
|
|
||||||
// Version returns the xDS transport protocol version used by clients build
|
|
||||||
// using this builder.
|
|
||||||
Version() version.TransportAPI
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIClient represents the functionality provided by transport protocol
|
|
||||||
// version specific implementations of the xDS client.
|
|
||||||
//
|
|
||||||
// TODO: unexport this interface and all the methods after the PR to make
|
|
||||||
// xdsClient sharable by clients. AddWatch and RemoveWatch are exported for
|
|
||||||
// v2/v3 to override because they need to keep track of LDS name for RDS to use.
|
|
||||||
// After the share xdsClient change, that's no longer necessary. After that, we
|
|
||||||
// will still keep this interface for testing purposes.
|
|
||||||
type APIClient interface {
|
|
||||||
// AddWatch adds a watch for an xDS resource given its type and name.
|
|
||||||
AddWatch(ResourceType, string)
|
|
||||||
|
|
||||||
// RemoveWatch cancels an already registered watch for an xDS resource
|
|
||||||
// given its type and name.
|
|
||||||
RemoveWatch(ResourceType, string)
|
|
||||||
|
|
||||||
// reportLoad starts an LRS stream to periodically report load using the
|
|
||||||
// provided ClientConn, which represent a connection to the management
|
|
||||||
// server.
|
|
||||||
reportLoad(ctx context.Context, cc *grpc.ClientConn, opts loadReportingOptions)
|
|
||||||
|
|
||||||
// Close cleans up resources allocated by the API client.
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadReportingOptions contains configuration knobs for reporting load data.
|
|
||||||
type loadReportingOptions struct {
|
|
||||||
loadStore *load.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateHandler receives and processes (by taking appropriate actions) xDS
|
|
||||||
// resource updates from an APIClient for a specific version.
|
|
||||||
type UpdateHandler interface {
|
|
||||||
// NewListeners handles updates to xDS listener resources.
|
|
||||||
NewListeners(map[string]ListenerUpdateErrTuple, UpdateMetadata)
|
|
||||||
// NewRouteConfigs handles updates to xDS RouteConfiguration resources.
|
|
||||||
NewRouteConfigs(map[string]RouteConfigUpdateErrTuple, UpdateMetadata)
|
|
||||||
// NewClusters handles updates to xDS Cluster resources.
|
|
||||||
NewClusters(map[string]ClusterUpdateErrTuple, UpdateMetadata)
|
|
||||||
// NewEndpoints handles updates to xDS ClusterLoadAssignment (or tersely
|
|
||||||
// referred to as Endpoints) resources.
|
|
||||||
NewEndpoints(map[string]EndpointsUpdateErrTuple, UpdateMetadata)
|
|
||||||
// NewConnectionError handles connection errors from the xDS stream. The
|
|
||||||
// error will be reported to all the resource watchers.
|
|
||||||
NewConnectionError(err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServiceStatus is the status of the update.
|
|
||||||
type ServiceStatus int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ServiceStatusUnknown is the default state, before a watch is started for
|
|
||||||
// the resource.
|
|
||||||
ServiceStatusUnknown ServiceStatus = iota
|
|
||||||
// ServiceStatusRequested is when the watch is started, but before and
|
|
||||||
// response is received.
|
|
||||||
ServiceStatusRequested
|
|
||||||
// ServiceStatusNotExist is when the resource doesn't exist in
|
|
||||||
// state-of-the-world responses (e.g. LDS and CDS), which means the resource
|
|
||||||
// is removed by the management server.
|
|
||||||
ServiceStatusNotExist // Resource is removed in the server, in LDS/CDS.
|
|
||||||
// ServiceStatusACKed is when the resource is ACKed.
|
|
||||||
ServiceStatusACKed
|
|
||||||
// ServiceStatusNACKed is when the resource is NACKed.
|
|
||||||
ServiceStatusNACKed
|
|
||||||
)
|
|
||||||
|
|
||||||
// UpdateErrorMetadata is part of UpdateMetadata. It contains the error state
|
|
||||||
// when a response is NACKed.
|
|
||||||
type UpdateErrorMetadata struct {
|
|
||||||
// Version is the version of the NACKed response.
|
|
||||||
Version string
|
|
||||||
// Err contains why the response was NACKed.
|
|
||||||
Err error
|
|
||||||
// Timestamp is when the NACKed response was received.
|
|
||||||
Timestamp time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// UpdateMetadata contains the metadata for each update, including timestamp,
|
|
||||||
// raw message, and so on.
|
|
||||||
type UpdateMetadata struct {
|
|
||||||
// Status is the status of this resource, e.g. ACKed, NACKed, or
|
|
||||||
// Not_exist(removed).
|
|
||||||
Status ServiceStatus
|
|
||||||
// Version is the version of the xds response. Note that this is the version
|
|
||||||
// of the resource in use (previous ACKed). If a response is NACKed, the
|
|
||||||
// NACKed version is in ErrState.
|
|
||||||
Version string
|
|
||||||
// Timestamp is when the response is received.
|
|
||||||
Timestamp time.Time
|
|
||||||
// ErrState is set when the update is NACKed.
|
|
||||||
ErrState *UpdateErrorMetadata
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListenerUpdate contains information received in an LDS response, which is of
|
|
||||||
// interest to the registered LDS watcher.
|
|
||||||
type ListenerUpdate struct {
|
|
||||||
// RouteConfigName is the route configuration name corresponding to the
|
|
||||||
// target which is being watched through LDS.
|
|
||||||
//
|
|
||||||
// Only one of RouteConfigName and InlineRouteConfig is set.
|
|
||||||
RouteConfigName string
|
|
||||||
// InlineRouteConfig is the inline route configuration (RDS response)
|
|
||||||
// returned inside LDS.
|
|
||||||
//
|
|
||||||
// Only one of RouteConfigName and InlineRouteConfig is set.
|
|
||||||
InlineRouteConfig *RouteConfigUpdate
|
|
||||||
|
|
||||||
// MaxStreamDuration contains the HTTP connection manager's
|
|
||||||
// common_http_protocol_options.max_stream_duration field, or zero if
|
|
||||||
// unset.
|
|
||||||
MaxStreamDuration time.Duration
|
|
||||||
// HTTPFilters is a list of HTTP filters (name, config) from the LDS
|
|
||||||
// response.
|
|
||||||
HTTPFilters []HTTPFilter
|
|
||||||
// InboundListenerCfg contains inbound listener configuration.
|
|
||||||
InboundListenerCfg *InboundListenerConfig
|
|
||||||
|
|
||||||
// Raw is the resource from the xds response.
|
|
||||||
Raw *anypb.Any
|
|
||||||
}
|
|
||||||
|
|
||||||
// HTTPFilter represents one HTTP filter from an LDS response's HTTP connection
|
|
||||||
// manager field.
|
|
||||||
type HTTPFilter struct {
|
|
||||||
// Name is an arbitrary name of the filter. Used for applying override
|
|
||||||
// settings in virtual host / route / weighted cluster configuration (not
|
|
||||||
// yet supported).
|
|
||||||
Name string
|
|
||||||
// Filter is the HTTP filter found in the registry for the config type.
|
|
||||||
Filter httpfilter.Filter
|
|
||||||
// Config contains the filter's configuration
|
|
||||||
Config httpfilter.FilterConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// InboundListenerConfig contains information about the inbound listener, i.e
|
|
||||||
// the server-side listener.
|
|
||||||
type InboundListenerConfig struct {
|
|
||||||
// Address is the local address on which the inbound listener is expected to
|
|
||||||
// accept incoming connections.
|
|
||||||
Address string
|
|
||||||
// Port is the local port on which the inbound listener is expected to
|
|
||||||
// accept incoming connections.
|
|
||||||
Port string
|
|
||||||
// FilterChains is the list of filter chains associated with this listener.
|
|
||||||
FilterChains *FilterChainManager
|
|
||||||
}
|
|
||||||
|
|
||||||
// RouteConfigUpdate contains information received in an RDS response, which is
|
|
||||||
// of interest to the registered RDS watcher.
|
|
||||||
type RouteConfigUpdate struct {
|
|
||||||
VirtualHosts []*VirtualHost
|
|
||||||
// Raw is the resource from the xds response.
|
|
||||||
Raw *anypb.Any
|
|
||||||
}
|
|
||||||
|
|
||||||
// VirtualHost contains the routes for a list of Domains.
|
|
||||||
//
|
|
||||||
// Note that the domains in this slice can be a wildcard, not an exact string.
|
|
||||||
// The consumer of this struct needs to find the best match for its hostname.
|
|
||||||
type VirtualHost struct {
|
|
||||||
Domains []string
|
|
||||||
// Routes contains a list of routes, each containing matchers and
|
|
||||||
// corresponding action.
|
|
||||||
Routes []*Route
|
|
||||||
// HTTPFilterConfigOverride contains any HTTP filter config overrides for
|
|
||||||
// the virtual host which may be present. An individual filter's override
|
|
||||||
// may be unused if the matching Route contains an override for that
|
|
||||||
// filter.
|
|
||||||
HTTPFilterConfigOverride map[string]httpfilter.FilterConfig
|
|
||||||
RetryConfig *RetryConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetryConfig contains all retry-related configuration in either a VirtualHost
|
|
||||||
// or Route.
|
|
||||||
type RetryConfig struct {
|
|
||||||
// RetryOn is a set of status codes on which to retry. Only Canceled,
|
|
||||||
// DeadlineExceeded, Internal, ResourceExhausted, and Unavailable are
|
|
||||||
// supported; any other values will be omitted.
|
|
||||||
RetryOn map[codes.Code]bool
|
|
||||||
NumRetries uint32 // maximum number of retry attempts
|
|
||||||
RetryBackoff RetryBackoff // retry backoff policy
|
|
||||||
}
|
|
||||||
|
|
||||||
// RetryBackoff describes the backoff policy for retries.
|
|
||||||
type RetryBackoff struct {
|
|
||||||
BaseInterval time.Duration // initial backoff duration between attempts
|
|
||||||
MaxInterval time.Duration // maximum backoff duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashPolicyType specifies the type of HashPolicy from a received RDS Response.
|
|
||||||
type HashPolicyType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// HashPolicyTypeHeader specifies to hash a Header in the incoming request.
|
|
||||||
HashPolicyTypeHeader HashPolicyType = iota
|
|
||||||
// HashPolicyTypeChannelID specifies to hash a unique Identifier of the
|
|
||||||
// Channel. In grpc-go, this will be done using the ClientConn pointer.
|
|
||||||
HashPolicyTypeChannelID
|
|
||||||
)
|
|
||||||
|
|
||||||
// HashPolicy specifies the HashPolicy if the upstream cluster uses a hashing
|
|
||||||
// load balancer.
|
|
||||||
type HashPolicy struct {
|
|
||||||
HashPolicyType HashPolicyType
|
|
||||||
Terminal bool
|
|
||||||
// Fields used for type HEADER.
|
|
||||||
HeaderName string
|
|
||||||
Regex *regexp.Regexp
|
|
||||||
RegexSubstitution string
|
|
||||||
}
|
|
||||||
|
|
||||||
// RouteAction is the action of the route from a received RDS response.
|
|
||||||
type RouteAction int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RouteActionUnsupported are routing types currently unsupported by grpc.
|
|
||||||
// According to A36, "A Route with an inappropriate action causes RPCs
|
|
||||||
// matching that route to fail."
|
|
||||||
RouteActionUnsupported RouteAction = iota
|
|
||||||
// RouteActionRoute is the expected route type on the client side. Route
|
|
||||||
// represents routing a request to some upstream cluster. On the client
|
|
||||||
// side, if an RPC matches to a route that is not RouteActionRoute, the RPC
|
|
||||||
// will fail according to A36.
|
|
||||||
RouteActionRoute
|
|
||||||
// RouteActionNonForwardingAction is the expected route type on the server
|
|
||||||
// side. NonForwardingAction represents when a route will generate a
|
|
||||||
// response directly, without forwarding to an upstream host.
|
|
||||||
RouteActionNonForwardingAction
|
|
||||||
)
|
|
||||||
|
|
||||||
// Route is both a specification of how to match a request as well as an
|
|
||||||
// indication of the action to take upon match.
|
|
||||||
type Route struct {
|
|
||||||
Path *string
|
|
||||||
Prefix *string
|
|
||||||
Regex *regexp.Regexp
|
|
||||||
// Indicates if prefix/path matching should be case insensitive. The default
|
|
||||||
// is false (case sensitive).
|
|
||||||
CaseInsensitive bool
|
|
||||||
Headers []*HeaderMatcher
|
|
||||||
Fraction *uint32
|
|
||||||
|
|
||||||
HashPolicies []*HashPolicy
|
|
||||||
|
|
||||||
// If the matchers above indicate a match, the below configuration is used.
|
|
||||||
WeightedClusters map[string]WeightedCluster
|
|
||||||
// If MaxStreamDuration is nil, it indicates neither of the route action's
|
|
||||||
// max_stream_duration fields (grpc_timeout_header_max nor
|
|
||||||
// max_stream_duration) were set. In this case, the ListenerUpdate's
|
|
||||||
// MaxStreamDuration field should be used. If MaxStreamDuration is set to
|
|
||||||
// an explicit zero duration, the application's deadline should be used.
|
|
||||||
MaxStreamDuration *time.Duration
|
|
||||||
// HTTPFilterConfigOverride contains any HTTP filter config overrides for
|
|
||||||
// the route which may be present. An individual filter's override may be
|
|
||||||
// unused if the matching WeightedCluster contains an override for that
|
|
||||||
// filter.
|
|
||||||
HTTPFilterConfigOverride map[string]httpfilter.FilterConfig
|
|
||||||
RetryConfig *RetryConfig
|
|
||||||
|
|
||||||
RouteAction RouteAction
|
|
||||||
}
|
|
||||||
|
|
||||||
// WeightedCluster contains settings for an xds RouteAction.WeightedCluster.
|
|
||||||
type WeightedCluster struct {
|
|
||||||
// Weight is the relative weight of the cluster. It will never be zero.
|
|
||||||
Weight uint32
|
|
||||||
// HTTPFilterConfigOverride contains any HTTP filter config overrides for
|
|
||||||
// the weighted cluster which may be present.
|
|
||||||
HTTPFilterConfigOverride map[string]httpfilter.FilterConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeaderMatcher represents header matchers.
|
|
||||||
type HeaderMatcher struct {
|
|
||||||
Name string
|
|
||||||
InvertMatch *bool
|
|
||||||
ExactMatch *string
|
|
||||||
RegexMatch *regexp.Regexp
|
|
||||||
PrefixMatch *string
|
|
||||||
SuffixMatch *string
|
|
||||||
RangeMatch *Int64Range
|
|
||||||
PresentMatch *bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Int64Range is a range for header range match.
|
|
||||||
type Int64Range struct {
|
|
||||||
Start int64
|
|
||||||
End int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// SecurityConfig contains the security configuration received as part of the
|
|
||||||
// Cluster resource on the client-side, and as part of the Listener resource on
|
|
||||||
// the server-side.
|
|
||||||
type SecurityConfig struct {
|
|
||||||
// RootInstanceName identifies the certProvider plugin to be used to fetch
|
|
||||||
// root certificates. This instance name will be resolved to the plugin name
|
|
||||||
// and its associated configuration from the certificate_providers field of
|
|
||||||
// the bootstrap file.
|
|
||||||
RootInstanceName string
|
|
||||||
// RootCertName is the certificate name to be passed to the plugin (looked
|
|
||||||
// up from the bootstrap file) while fetching root certificates.
|
|
||||||
RootCertName string
|
|
||||||
// IdentityInstanceName identifies the certProvider plugin to be used to
|
|
||||||
// fetch identity certificates. This instance name will be resolved to the
|
|
||||||
// plugin name and its associated configuration from the
|
|
||||||
// certificate_providers field of the bootstrap file.
|
|
||||||
IdentityInstanceName string
|
|
||||||
// IdentityCertName is the certificate name to be passed to the plugin
|
|
||||||
// (looked up from the bootstrap file) while fetching identity certificates.
|
|
||||||
IdentityCertName string
|
|
||||||
// SubjectAltNameMatchers is an optional list of match criteria for SANs
|
|
||||||
// specified on the peer certificate. Used only on the client-side.
|
|
||||||
//
|
|
||||||
// Some intricacies:
|
|
||||||
// - If this field is empty, then any peer certificate is accepted.
|
|
||||||
// - If the peer certificate contains a wildcard DNS SAN, and an `exact`
|
|
||||||
// matcher is configured, a wildcard DNS match is performed instead of a
|
|
||||||
// regular string comparison.
|
|
||||||
SubjectAltNameMatchers []matcher.StringMatcher
|
|
||||||
// RequireClientCert indicates if the server handshake process expects the
|
|
||||||
// client to present a certificate. Set to true when performing mTLS. Used
|
|
||||||
// only on the server-side.
|
|
||||||
RequireClientCert bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Equal returns true if sc is equal to other.
|
|
||||||
func (sc *SecurityConfig) Equal(other *SecurityConfig) bool {
|
|
||||||
switch {
|
|
||||||
case sc == nil && other == nil:
|
|
||||||
return true
|
|
||||||
case (sc != nil) != (other != nil):
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
switch {
|
|
||||||
case sc.RootInstanceName != other.RootInstanceName:
|
|
||||||
return false
|
|
||||||
case sc.RootCertName != other.RootCertName:
|
|
||||||
return false
|
|
||||||
case sc.IdentityInstanceName != other.IdentityInstanceName:
|
|
||||||
return false
|
|
||||||
case sc.IdentityCertName != other.IdentityCertName:
|
|
||||||
return false
|
|
||||||
case sc.RequireClientCert != other.RequireClientCert:
|
|
||||||
return false
|
|
||||||
default:
|
|
||||||
if len(sc.SubjectAltNameMatchers) != len(other.SubjectAltNameMatchers) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for i := 0; i < len(sc.SubjectAltNameMatchers); i++ {
|
|
||||||
if !sc.SubjectAltNameMatchers[i].Equal(other.SubjectAltNameMatchers[i]) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClusterType is the type of cluster from a received CDS response.
|
|
||||||
type ClusterType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ClusterTypeEDS represents the EDS cluster type, which will delegate endpoint
|
|
||||||
// discovery to the management server.
|
|
||||||
ClusterTypeEDS ClusterType = iota
|
|
||||||
// ClusterTypeLogicalDNS represents the Logical DNS cluster type, which essentially
|
|
||||||
// maps to the gRPC behavior of using the DNS resolver with pick_first LB policy.
|
|
||||||
ClusterTypeLogicalDNS
|
|
||||||
// ClusterTypeAggregate represents the Aggregate Cluster type, which provides a
|
|
||||||
// prioritized list of clusters to use. It is used for failover between clusters
|
|
||||||
// with a different configuration.
|
|
||||||
ClusterTypeAggregate
|
|
||||||
)
|
|
||||||
|
|
||||||
// ClusterLBPolicyRingHash represents ring_hash lb policy, and also contains its
|
|
||||||
// config.
|
|
||||||
type ClusterLBPolicyRingHash struct {
|
|
||||||
MinimumRingSize uint64
|
|
||||||
MaximumRingSize uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClusterUpdate contains information from a received CDS response, which is of
|
|
||||||
// interest to the registered CDS watcher.
|
|
||||||
type ClusterUpdate struct {
|
|
||||||
ClusterType ClusterType
|
|
||||||
// ClusterName is the clusterName being watched for through CDS.
|
|
||||||
ClusterName string
|
|
||||||
// EDSServiceName is an optional name for EDS. If it's not set, the balancer
|
|
||||||
// should watch ClusterName for the EDS resources.
|
|
||||||
EDSServiceName string
|
|
||||||
// EnableLRS indicates whether or not load should be reported through LRS.
|
|
||||||
EnableLRS bool
|
|
||||||
// SecurityCfg contains security configuration sent by the control plane.
|
|
||||||
SecurityCfg *SecurityConfig
|
|
||||||
// MaxRequests for circuit breaking, if any (otherwise nil).
|
|
||||||
MaxRequests *uint32
|
|
||||||
// DNSHostName is used only for cluster type DNS. It's the DNS name to
|
|
||||||
// resolve in "host:port" form
|
|
||||||
DNSHostName string
|
|
||||||
// PrioritizedClusterNames is used only for cluster type aggregate. It represents
|
|
||||||
// a prioritized list of cluster names.
|
|
||||||
PrioritizedClusterNames []string
|
|
||||||
|
|
||||||
// LBPolicy is the lb policy for this cluster.
|
|
||||||
//
|
|
||||||
// This only support round_robin and ring_hash.
|
|
||||||
// - if it's nil, the lb policy is round_robin
|
|
||||||
// - if it's not nil, the lb policy is ring_hash, the this field has the config.
|
|
||||||
//
|
|
||||||
// When we add more support policies, this can be made an interface, and
|
|
||||||
// will be set to different types based on the policy type.
|
|
||||||
LBPolicy *ClusterLBPolicyRingHash
|
|
||||||
|
|
||||||
// Raw is the resource from the xds response.
|
|
||||||
Raw *anypb.Any
|
|
||||||
}
|
|
||||||
|
|
||||||
// OverloadDropConfig contains the config to drop overloads.
|
|
||||||
type OverloadDropConfig struct {
|
|
||||||
Category string
|
|
||||||
Numerator uint32
|
|
||||||
Denominator uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// EndpointHealthStatus represents the health status of an endpoint.
|
|
||||||
type EndpointHealthStatus int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
// EndpointHealthStatusUnknown represents HealthStatus UNKNOWN.
|
|
||||||
EndpointHealthStatusUnknown EndpointHealthStatus = iota
|
|
||||||
// EndpointHealthStatusHealthy represents HealthStatus HEALTHY.
|
|
||||||
EndpointHealthStatusHealthy
|
|
||||||
// EndpointHealthStatusUnhealthy represents HealthStatus UNHEALTHY.
|
|
||||||
EndpointHealthStatusUnhealthy
|
|
||||||
// EndpointHealthStatusDraining represents HealthStatus DRAINING.
|
|
||||||
EndpointHealthStatusDraining
|
|
||||||
// EndpointHealthStatusTimeout represents HealthStatus TIMEOUT.
|
|
||||||
EndpointHealthStatusTimeout
|
|
||||||
// EndpointHealthStatusDegraded represents HealthStatus DEGRADED.
|
|
||||||
EndpointHealthStatusDegraded
|
|
||||||
)
|
|
||||||
|
|
||||||
// Endpoint contains information of an endpoint.
|
|
||||||
type Endpoint struct {
|
|
||||||
Address string
|
|
||||||
HealthStatus EndpointHealthStatus
|
|
||||||
Weight uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// Locality contains information of a locality.
|
|
||||||
type Locality struct {
|
|
||||||
Endpoints []Endpoint
|
|
||||||
ID internal.LocalityID
|
|
||||||
Priority uint32
|
|
||||||
Weight uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// EndpointsUpdate contains an EDS update.
|
|
||||||
type EndpointsUpdate struct {
|
|
||||||
Drops []OverloadDropConfig
|
|
||||||
Localities []Locality
|
|
||||||
|
|
||||||
// Raw is the resource from the xds response.
|
|
||||||
Raw *anypb.Any
|
|
||||||
}
|
|
||||||
|
|
||||||
// Function to be overridden in tests.
|
|
||||||
var newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, opts BuildOptions) (APIClient, error) {
|
|
||||||
cb := getAPIClientBuilder(apiVersion)
|
|
||||||
if cb == nil {
|
|
||||||
return nil, fmt.Errorf("no client builder for xDS API version: %v", apiVersion)
|
|
||||||
}
|
|
||||||
return cb.Build(cc, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
// clientImpl is the real implementation of the xds client. The exported Client
|
// clientImpl is the real implementation of the xds client. The exported Client
|
||||||
// is a wrapper of this struct with a ref count.
|
// is a wrapper of this struct with a ref count.
|
||||||
//
|
//
|
||||||
|
@ -592,113 +41,38 @@ var newAPIClient = func(apiVersion version.TransportAPI, cc *grpc.ClientConn, op
|
||||||
type clientImpl struct {
|
type clientImpl struct {
|
||||||
done *grpcsync.Event
|
done *grpcsync.Event
|
||||||
config *bootstrap.Config
|
config *bootstrap.Config
|
||||||
cc *grpc.ClientConn // Connection to the management server.
|
|
||||||
apiClient APIClient
|
controller controllerInterface
|
||||||
watchExpiryTimeout time.Duration
|
|
||||||
|
|
||||||
logger *grpclog.PrefixLogger
|
logger *grpclog.PrefixLogger
|
||||||
|
pubsub *pubsub.Pubsub
|
||||||
updateCh *buffer.Unbounded // chan *watcherInfoWithUpdate
|
|
||||||
// All the following maps are to keep the updates/metadata in a cache.
|
|
||||||
// TODO: move them to a separate struct/package, to cleanup the xds_client.
|
|
||||||
// And CSDS handler can be implemented directly by the cache.
|
|
||||||
mu sync.Mutex
|
|
||||||
ldsWatchers map[string]map[*watchInfo]bool
|
|
||||||
ldsVersion string // Only used in CSDS.
|
|
||||||
ldsCache map[string]ListenerUpdate
|
|
||||||
ldsMD map[string]UpdateMetadata
|
|
||||||
rdsWatchers map[string]map[*watchInfo]bool
|
|
||||||
rdsVersion string // Only used in CSDS.
|
|
||||||
rdsCache map[string]RouteConfigUpdate
|
|
||||||
rdsMD map[string]UpdateMetadata
|
|
||||||
cdsWatchers map[string]map[*watchInfo]bool
|
|
||||||
cdsVersion string // Only used in CSDS.
|
|
||||||
cdsCache map[string]ClusterUpdate
|
|
||||||
cdsMD map[string]UpdateMetadata
|
|
||||||
edsWatchers map[string]map[*watchInfo]bool
|
|
||||||
edsVersion string // Only used in CSDS.
|
|
||||||
edsCache map[string]EndpointsUpdate
|
|
||||||
edsMD map[string]UpdateMetadata
|
|
||||||
|
|
||||||
// Changes to map lrsClients and the lrsClient inside the map need to be
|
|
||||||
// protected by lrsMu.
|
|
||||||
lrsMu sync.Mutex
|
|
||||||
lrsClients map[string]*lrsClient
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newWithConfig returns a new xdsClient with the given config.
|
// newWithConfig returns a new xdsClient with the given config.
|
||||||
func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (*clientImpl, error) {
|
func newWithConfig(config *bootstrap.Config, watchExpiryTimeout time.Duration) (_ *clientImpl, retErr error) {
|
||||||
switch {
|
|
||||||
case config.BalancerName == "":
|
|
||||||
return nil, errors.New("xds: no xds_server name provided in options")
|
|
||||||
case config.Creds == nil:
|
|
||||||
return nil, errors.New("xds: no credentials provided in options")
|
|
||||||
case config.NodeProto == nil:
|
|
||||||
return nil, errors.New("xds: no node_proto provided in options")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch config.TransportAPI {
|
|
||||||
case version.TransportV2:
|
|
||||||
if _, ok := config.NodeProto.(*v2corepb.Node); !ok {
|
|
||||||
return nil, fmt.Errorf("xds: Node proto type (%T) does not match API version: %v", config.NodeProto, config.TransportAPI)
|
|
||||||
}
|
|
||||||
case version.TransportV3:
|
|
||||||
if _, ok := config.NodeProto.(*v3corepb.Node); !ok {
|
|
||||||
return nil, fmt.Errorf("xds: Node proto type (%T) does not match API version: %v", config.NodeProto, config.TransportAPI)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dopts := []grpc.DialOption{
|
|
||||||
config.Creds,
|
|
||||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
|
||||||
Time: 5 * time.Minute,
|
|
||||||
Timeout: 20 * time.Second,
|
|
||||||
}),
|
|
||||||
}
|
|
||||||
|
|
||||||
c := &clientImpl{
|
c := &clientImpl{
|
||||||
done: grpcsync.NewEvent(),
|
done: grpcsync.NewEvent(),
|
||||||
config: config,
|
config: config,
|
||||||
watchExpiryTimeout: watchExpiryTimeout,
|
|
||||||
|
|
||||||
updateCh: buffer.NewUnbounded(),
|
|
||||||
ldsWatchers: make(map[string]map[*watchInfo]bool),
|
|
||||||
ldsCache: make(map[string]ListenerUpdate),
|
|
||||||
ldsMD: make(map[string]UpdateMetadata),
|
|
||||||
rdsWatchers: make(map[string]map[*watchInfo]bool),
|
|
||||||
rdsCache: make(map[string]RouteConfigUpdate),
|
|
||||||
rdsMD: make(map[string]UpdateMetadata),
|
|
||||||
cdsWatchers: make(map[string]map[*watchInfo]bool),
|
|
||||||
cdsCache: make(map[string]ClusterUpdate),
|
|
||||||
cdsMD: make(map[string]UpdateMetadata),
|
|
||||||
edsWatchers: make(map[string]map[*watchInfo]bool),
|
|
||||||
edsCache: make(map[string]EndpointsUpdate),
|
|
||||||
edsMD: make(map[string]UpdateMetadata),
|
|
||||||
lrsClients: make(map[string]*lrsClient),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cc, err := grpc.Dial(config.BalancerName, dopts...)
|
defer func() {
|
||||||
|
if retErr != nil {
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
c.logger = prefixLogger(c)
|
||||||
|
c.logger.Infof("Created ClientConn to xDS management server: %s", config.XDSServer)
|
||||||
|
|
||||||
|
c.pubsub = pubsub.New(watchExpiryTimeout, c.logger)
|
||||||
|
|
||||||
|
controller, err := newController(config.XDSServer, c.pubsub, c.updateValidator, c.logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// An error from a non-blocking dial indicates something serious.
|
return nil, fmt.Errorf("xds: failed to connect to the control plane: %v", err)
|
||||||
return nil, fmt.Errorf("xds: failed to dial balancer {%s}: %v", config.BalancerName, err)
|
|
||||||
}
|
}
|
||||||
c.cc = cc
|
c.controller = controller
|
||||||
c.logger = prefixLogger((c))
|
|
||||||
c.logger.Infof("Created ClientConn to xDS management server: %s", config.BalancerName)
|
|
||||||
|
|
||||||
apiClient, err := newAPIClient(config.TransportAPI, cc, BuildOptions{
|
|
||||||
Parent: c,
|
|
||||||
Validator: c.updateValidator,
|
|
||||||
NodeProto: config.NodeProto,
|
|
||||||
Backoff: backoff.DefaultExponential.Backoff,
|
|
||||||
Logger: c.logger,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c.apiClient = apiClient
|
|
||||||
c.logger.Infof("Created")
|
c.logger.Infof("Created")
|
||||||
go c.run()
|
|
||||||
return c, nil
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -708,27 +82,6 @@ func (c *clientRefCounted) BootstrapConfig() *bootstrap.Config {
|
||||||
return c.config
|
return c.config
|
||||||
}
|
}
|
||||||
|
|
||||||
// run is a goroutine for all the callbacks.
|
|
||||||
//
|
|
||||||
// Callback can be called in watch(), if an item is found in cache. Without this
|
|
||||||
// goroutine, the callback will be called inline, which might cause a deadlock
|
|
||||||
// in user's code. Callbacks also cannot be simple `go callback()` because the
|
|
||||||
// order matters.
|
|
||||||
func (c *clientImpl) run() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case t := <-c.updateCh.Get():
|
|
||||||
c.updateCh.Load()
|
|
||||||
if c.done.HasFired() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.callCallback(t.(*watcherInfoWithUpdate))
|
|
||||||
case <-c.done.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the gRPC connection to the management server.
|
// Close closes the gRPC connection to the management server.
|
||||||
func (c *clientImpl) Close() {
|
func (c *clientImpl) Close() {
|
||||||
if c.done.HasFired() {
|
if c.done.HasFired() {
|
||||||
|
@ -737,19 +90,27 @@ func (c *clientImpl) Close() {
|
||||||
c.done.Fire()
|
c.done.Fire()
|
||||||
// TODO: Should we invoke the registered callbacks here with an error that
|
// TODO: Should we invoke the registered callbacks here with an error that
|
||||||
// the client is closed?
|
// the client is closed?
|
||||||
c.apiClient.Close()
|
|
||||||
c.cc.Close()
|
// Note that Close needs to check for nils even if some of them are always
|
||||||
|
// set in the constructor. This is because the constructor defers Close() in
|
||||||
|
// error cases, and the fields might not be set when the error happens.
|
||||||
|
if c.controller != nil {
|
||||||
|
c.controller.Close()
|
||||||
|
}
|
||||||
|
if c.pubsub != nil {
|
||||||
|
c.pubsub.Close()
|
||||||
|
}
|
||||||
c.logger.Infof("Shutdown")
|
c.logger.Infof("Shutdown")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *clientImpl) filterChainUpdateValidator(fc *FilterChain) error {
|
func (c *clientImpl) filterChainUpdateValidator(fc *xdsresource.FilterChain) error {
|
||||||
if fc == nil {
|
if fc == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return c.securityConfigUpdateValidator(fc.SecurityCfg)
|
return c.securityConfigUpdateValidator(fc.SecurityCfg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *clientImpl) securityConfigUpdateValidator(sc *SecurityConfig) error {
|
func (c *clientImpl) securityConfigUpdateValidator(sc *xdsresource.SecurityConfig) error {
|
||||||
if sc == nil {
|
if sc == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -768,28 +129,12 @@ func (c *clientImpl) securityConfigUpdateValidator(sc *SecurityConfig) error {
|
||||||
|
|
||||||
func (c *clientImpl) updateValidator(u interface{}) error {
|
func (c *clientImpl) updateValidator(u interface{}) error {
|
||||||
switch update := u.(type) {
|
switch update := u.(type) {
|
||||||
case ListenerUpdate:
|
case xdsresource.ListenerUpdate:
|
||||||
if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil {
|
if update.InboundListenerCfg == nil || update.InboundListenerCfg.FilterChains == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
return update.InboundListenerCfg.FilterChains.Validate(c.filterChainUpdateValidator)
|
||||||
fcm := update.InboundListenerCfg.FilterChains
|
case xdsresource.ClusterUpdate:
|
||||||
for _, dst := range fcm.dstPrefixMap {
|
|
||||||
for _, srcType := range dst.srcTypeArr {
|
|
||||||
if srcType == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, src := range srcType.srcPrefixMap {
|
|
||||||
for _, fc := range src.srcPortMap {
|
|
||||||
if err := c.filterChainUpdateValidator(fc); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c.filterChainUpdateValidator(fcm.def)
|
|
||||||
case ClusterUpdate:
|
|
||||||
return c.securityConfigUpdateValidator(update.SecurityCfg)
|
return c.securityConfigUpdateValidator(update.SecurityCfg)
|
||||||
default:
|
default:
|
||||||
// We currently invoke this update validation function only for LDS and
|
// We currently invoke this update validation function only for LDS and
|
||||||
|
@ -799,65 +144,3 @@ func (c *clientImpl) updateValidator(u interface{}) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourceType identifies resources in a transport protocol agnostic way. These
|
|
||||||
// will be used in transport version agnostic code, while the versioned API
|
|
||||||
// clients will map these to appropriate version URLs.
|
|
||||||
type ResourceType int
|
|
||||||
|
|
||||||
// Version agnostic resource type constants.
|
|
||||||
const (
|
|
||||||
UnknownResource ResourceType = iota
|
|
||||||
ListenerResource
|
|
||||||
HTTPConnManagerResource
|
|
||||||
RouteConfigResource
|
|
||||||
ClusterResource
|
|
||||||
EndpointsResource
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r ResourceType) String() string {
|
|
||||||
switch r {
|
|
||||||
case ListenerResource:
|
|
||||||
return "ListenerResource"
|
|
||||||
case HTTPConnManagerResource:
|
|
||||||
return "HTTPConnManagerResource"
|
|
||||||
case RouteConfigResource:
|
|
||||||
return "RouteConfigResource"
|
|
||||||
case ClusterResource:
|
|
||||||
return "ClusterResource"
|
|
||||||
case EndpointsResource:
|
|
||||||
return "EndpointsResource"
|
|
||||||
default:
|
|
||||||
return "UnknownResource"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsListenerResource returns true if the provider URL corresponds to an xDS
|
|
||||||
// Listener resource.
|
|
||||||
func IsListenerResource(url string) bool {
|
|
||||||
return url == version.V2ListenerURL || url == version.V3ListenerURL
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsHTTPConnManagerResource returns true if the provider URL corresponds to an xDS
|
|
||||||
// HTTPConnManager resource.
|
|
||||||
func IsHTTPConnManagerResource(url string) bool {
|
|
||||||
return url == version.V2HTTPConnManagerURL || url == version.V3HTTPConnManagerURL
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsRouteConfigResource returns true if the provider URL corresponds to an xDS
|
|
||||||
// RouteConfig resource.
|
|
||||||
func IsRouteConfigResource(url string) bool {
|
|
||||||
return url == version.V2RouteConfigURL || url == version.V3RouteConfigURL
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsClusterResource returns true if the provider URL corresponds to an xDS
|
|
||||||
// Cluster resource.
|
|
||||||
func IsClusterResource(url string) bool {
|
|
||||||
return url == version.V2ClusterURL || url == version.V3ClusterURL
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsEndpointsResource returns true if the provider URL corresponds to an xDS
|
|
||||||
// Endpoints resource.
|
|
||||||
func IsEndpointsResource(url string) bool {
|
|
||||||
return url == version.V2EndpointsURL || url == version.V3EndpointsURL
|
|
||||||
}
|
|
||||||
|
|
38
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller.go
generated
vendored
Normal file
38
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2021 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package xdsclient
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/controller"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/load"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/pubsub"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
type controllerInterface interface {
|
||||||
|
AddWatch(resourceType xdsresource.ResourceType, resourceName string)
|
||||||
|
RemoveWatch(resourceType xdsresource.ResourceType, resourceName string)
|
||||||
|
ReportLoad(server string) (*load.Store, func())
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
var newController = func(config *bootstrap.ServerConfig, pubsub *pubsub.Pubsub, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (controllerInterface, error) {
|
||||||
|
return controller.New(config, pubsub, validator, logger)
|
||||||
|
}
|
168
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/controller.go
generated
vendored
Normal file
168
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/controller.go
generated
vendored
Normal file
|
@ -0,0 +1,168 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2021 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package controller contains implementation to connect to the control plane.
|
||||||
|
// Including starting the ClientConn, starting the xDS stream, and
|
||||||
|
// sending/receiving messages.
|
||||||
|
//
|
||||||
|
// All the messages are parsed by the resource package (e.g.
|
||||||
|
// UnmarshalListener()) and sent to the Pubsub watchers.
|
||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/internal/backoff"
|
||||||
|
"google.golang.org/grpc/internal/buffer"
|
||||||
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
|
"google.golang.org/grpc/keepalive"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/bootstrap"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/controller/version"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/pubsub"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Controller manages the connection and stream to the control plane.
|
||||||
|
//
|
||||||
|
// It keeps track of what resources are being watched, and send new requests
|
||||||
|
// when new watches are added.
|
||||||
|
//
|
||||||
|
// It takes a pubsub (as an interface) as input. When a response is received,
|
||||||
|
// it's parsed, and the updates are sent to the pubsub.
|
||||||
|
type Controller struct {
|
||||||
|
config *bootstrap.ServerConfig
|
||||||
|
updateHandler pubsub.UpdateHandler
|
||||||
|
updateValidator xdsresource.UpdateValidatorFunc
|
||||||
|
logger *grpclog.PrefixLogger
|
||||||
|
|
||||||
|
cc *grpc.ClientConn // Connection to the management server.
|
||||||
|
vClient version.VersionedClient
|
||||||
|
stopRunGoroutine context.CancelFunc
|
||||||
|
|
||||||
|
backoff func(int) time.Duration
|
||||||
|
streamCh chan grpc.ClientStream
|
||||||
|
sendCh *buffer.Unbounded
|
||||||
|
|
||||||
|
mu sync.Mutex
|
||||||
|
// Message specific watch infos, protected by the above mutex. These are
|
||||||
|
// written to, after successfully reading from the update channel, and are
|
||||||
|
// read from when recovering from a broken stream to resend the xDS
|
||||||
|
// messages. When the user of this client object cancels a watch call,
|
||||||
|
// these are set to nil. All accesses to the map protected and any value
|
||||||
|
// inside the map should be protected with the above mutex.
|
||||||
|
watchMap map[xdsresource.ResourceType]map[string]bool
|
||||||
|
// versionMap contains the version that was acked (the version in the ack
|
||||||
|
// request that was sent on wire). The key is rType, the value is the
|
||||||
|
// version string, becaues the versions for different resource types should
|
||||||
|
// be independent.
|
||||||
|
versionMap map[xdsresource.ResourceType]string
|
||||||
|
// nonceMap contains the nonce from the most recent received response.
|
||||||
|
nonceMap map[xdsresource.ResourceType]string
|
||||||
|
|
||||||
|
// Changes to map lrsClients and the lrsClient inside the map need to be
|
||||||
|
// protected by lrsMu.
|
||||||
|
//
|
||||||
|
// TODO: after LRS refactoring, each controller should only manage the LRS
|
||||||
|
// stream to its server. LRS streams to other servers should be managed by
|
||||||
|
// other controllers.
|
||||||
|
lrsMu sync.Mutex
|
||||||
|
lrsClients map[string]*lrsClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new controller.
|
||||||
|
func New(config *bootstrap.ServerConfig, updateHandler pubsub.UpdateHandler, validator xdsresource.UpdateValidatorFunc, logger *grpclog.PrefixLogger) (_ *Controller, retErr error) {
|
||||||
|
switch {
|
||||||
|
case config == nil:
|
||||||
|
return nil, errors.New("xds: no xds_server provided")
|
||||||
|
case config.ServerURI == "":
|
||||||
|
return nil, errors.New("xds: no xds_server name provided in options")
|
||||||
|
case config.Creds == nil:
|
||||||
|
return nil, errors.New("xds: no credentials provided in options")
|
||||||
|
case config.NodeProto == nil:
|
||||||
|
return nil, errors.New("xds: no node_proto provided in options")
|
||||||
|
}
|
||||||
|
|
||||||
|
dopts := []grpc.DialOption{
|
||||||
|
config.Creds,
|
||||||
|
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||||
|
Time: 5 * time.Minute,
|
||||||
|
Timeout: 20 * time.Second,
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := &Controller{
|
||||||
|
config: config,
|
||||||
|
updateValidator: validator,
|
||||||
|
updateHandler: updateHandler,
|
||||||
|
|
||||||
|
backoff: backoff.DefaultExponential.Backoff, // TODO: should this be configurable?
|
||||||
|
streamCh: make(chan grpc.ClientStream, 1),
|
||||||
|
sendCh: buffer.NewUnbounded(),
|
||||||
|
watchMap: make(map[xdsresource.ResourceType]map[string]bool),
|
||||||
|
versionMap: make(map[xdsresource.ResourceType]string),
|
||||||
|
nonceMap: make(map[xdsresource.ResourceType]string),
|
||||||
|
|
||||||
|
lrsClients: make(map[string]*lrsClient),
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if retErr != nil {
|
||||||
|
ret.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
cc, err := grpc.Dial(config.ServerURI, dopts...)
|
||||||
|
if err != nil {
|
||||||
|
// An error from a non-blocking dial indicates something serious.
|
||||||
|
return nil, fmt.Errorf("xds: failed to dial control plane {%s}: %v", config.ServerURI, err)
|
||||||
|
}
|
||||||
|
ret.cc = cc
|
||||||
|
|
||||||
|
builder := version.GetAPIClientBuilder(config.TransportAPI)
|
||||||
|
if builder == nil {
|
||||||
|
return nil, fmt.Errorf("no client builder for xDS API version: %v", config.TransportAPI)
|
||||||
|
}
|
||||||
|
apiClient, err := builder(version.BuildOptions{NodeProto: config.NodeProto, Logger: logger})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ret.vClient = apiClient
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
ret.stopRunGoroutine = cancel
|
||||||
|
go ret.run(ctx)
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the controller.
|
||||||
|
func (t *Controller) Close() {
|
||||||
|
// Note that Close needs to check for nils even if some of them are always
|
||||||
|
// set in the constructor. This is because the constructor defers Close() in
|
||||||
|
// error cases, and the fields might not be set when the error happens.
|
||||||
|
if t.stopRunGoroutine != nil {
|
||||||
|
t.stopRunGoroutine()
|
||||||
|
}
|
||||||
|
if t.cc != nil {
|
||||||
|
t.cc.Close()
|
||||||
|
}
|
||||||
|
}
|
144
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/loadreport.go
generated
vendored
Normal file
144
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/loadreport.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2021 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/controller/version"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/load"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReportLoad starts an load reporting stream to the given server. If the server
|
||||||
|
// is not an empty string, and is different from the management server, a new
|
||||||
|
// ClientConn will be created.
|
||||||
|
//
|
||||||
|
// The same options used for creating the Client will be used (including
|
||||||
|
// NodeProto, and dial options if necessary).
|
||||||
|
//
|
||||||
|
// It returns a Store for the user to report loads, a function to cancel the
|
||||||
|
// load reporting stream.
|
||||||
|
//
|
||||||
|
// TODO: LRS refactor; maybe a new controller should be created for a separate
|
||||||
|
// server, so that the same stream can be shared by different reporters to the
|
||||||
|
// same server, even if they originate from different Controllers.
|
||||||
|
func (c *Controller) ReportLoad(server string) (*load.Store, func()) {
|
||||||
|
c.lrsMu.Lock()
|
||||||
|
defer c.lrsMu.Unlock()
|
||||||
|
|
||||||
|
// If there's already a client to this server, use it. Otherwise, create
|
||||||
|
// one.
|
||||||
|
lrsC, ok := c.lrsClients[server]
|
||||||
|
if !ok {
|
||||||
|
lrsC = newLRSClient(c, server)
|
||||||
|
c.lrsClients[server] = lrsC
|
||||||
|
}
|
||||||
|
|
||||||
|
store := lrsC.ref()
|
||||||
|
return store, func() {
|
||||||
|
// This is a callback, need to hold lrsMu.
|
||||||
|
c.lrsMu.Lock()
|
||||||
|
defer c.lrsMu.Unlock()
|
||||||
|
if lrsC.unRef() {
|
||||||
|
// Delete the lrsClient from map if this is the last reference.
|
||||||
|
delete(c.lrsClients, server)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// lrsClient maps to one lrsServer. It contains:
|
||||||
|
// - a ClientConn to this server (only if it's different from the management
|
||||||
|
// server)
|
||||||
|
// - a load.Store that contains loads only for this server
|
||||||
|
type lrsClient struct {
|
||||||
|
parent *Controller
|
||||||
|
server string
|
||||||
|
|
||||||
|
cc *grpc.ClientConn // nil if the server is same as the management server
|
||||||
|
refCount int
|
||||||
|
cancelStream func()
|
||||||
|
loadStore *load.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLRSClient creates a new LRS stream to the server.
|
||||||
|
func newLRSClient(parent *Controller, server string) *lrsClient {
|
||||||
|
return &lrsClient{
|
||||||
|
parent: parent,
|
||||||
|
server: server,
|
||||||
|
refCount: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ref increments the refCount. If this is the first ref, it starts the LRS stream.
|
||||||
|
//
|
||||||
|
// Not thread-safe, caller needs to synchronize.
|
||||||
|
func (lrsC *lrsClient) ref() *load.Store {
|
||||||
|
lrsC.refCount++
|
||||||
|
if lrsC.refCount == 1 {
|
||||||
|
lrsC.startStream()
|
||||||
|
}
|
||||||
|
return lrsC.loadStore
|
||||||
|
}
|
||||||
|
|
||||||
|
// unRef decrements the refCount, and closes the stream if refCount reaches 0
|
||||||
|
// (and close the cc if cc is not xDS cc). It returns whether refCount reached 0
|
||||||
|
// after this call.
|
||||||
|
//
|
||||||
|
// Not thread-safe, caller needs to synchronize.
|
||||||
|
func (lrsC *lrsClient) unRef() (closed bool) {
|
||||||
|
lrsC.refCount--
|
||||||
|
if lrsC.refCount != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
lrsC.parent.logger.Infof("Stopping load report to server: %s", lrsC.server)
|
||||||
|
lrsC.cancelStream()
|
||||||
|
if lrsC.cc != nil {
|
||||||
|
lrsC.cc.Close()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// startStream starts the LRS stream to the server. If server is not the same
|
||||||
|
// management server from the parent, it also creates a ClientConn.
|
||||||
|
func (lrsC *lrsClient) startStream() {
|
||||||
|
var cc *grpc.ClientConn
|
||||||
|
|
||||||
|
lrsC.parent.logger.Infof("Starting load report to server: %s", lrsC.server)
|
||||||
|
if lrsC.server == "" || lrsC.server == lrsC.parent.config.ServerURI {
|
||||||
|
// Reuse the xDS client if server is the same.
|
||||||
|
cc = lrsC.parent.cc
|
||||||
|
} else {
|
||||||
|
lrsC.parent.logger.Infof("LRS server is different from management server, starting a new ClientConn")
|
||||||
|
ccNew, err := grpc.Dial(lrsC.server, lrsC.parent.config.Creds)
|
||||||
|
if err != nil {
|
||||||
|
// An error from a non-blocking dial indicates something serious.
|
||||||
|
lrsC.parent.logger.Infof("xds: failed to dial load report server {%s}: %v", lrsC.server, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
cc = ccNew
|
||||||
|
lrsC.cc = ccNew
|
||||||
|
}
|
||||||
|
|
||||||
|
var ctx context.Context
|
||||||
|
ctx, lrsC.cancelStream = context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
// Create the store and stream.
|
||||||
|
lrsC.loadStore = load.NewStore()
|
||||||
|
go lrsC.parent.reportLoad(ctx, cc, version.LoadReportingOptions{LoadStore: lrsC.loadStore})
|
||||||
|
}
|
|
@ -16,139 +16,23 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package xdsclient
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"google.golang.org/grpc/xds/internal/xdsclient/load"
|
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/internal/buffer"
|
controllerversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version"
|
||||||
"google.golang.org/grpc/internal/grpclog"
|
xdsresourceversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/load"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ErrResourceTypeUnsupported is an error used to indicate an unsupported xDS
|
|
||||||
// resource type. The wrapped ErrStr contains the details.
|
|
||||||
type ErrResourceTypeUnsupported struct {
|
|
||||||
ErrStr string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error helps implements the error interface.
|
|
||||||
func (e ErrResourceTypeUnsupported) Error() string {
|
|
||||||
return e.ErrStr
|
|
||||||
}
|
|
||||||
|
|
||||||
// VersionedClient is the interface to be provided by the transport protocol
|
|
||||||
// specific client implementations. This mainly deals with the actual sending
|
|
||||||
// and receiving of messages.
|
|
||||||
type VersionedClient interface {
|
|
||||||
// NewStream returns a new xDS client stream specific to the underlying
|
|
||||||
// transport protocol version.
|
|
||||||
NewStream(ctx context.Context) (grpc.ClientStream, error)
|
|
||||||
|
|
||||||
// SendRequest constructs and sends out a DiscoveryRequest message specific
|
|
||||||
// to the underlying transport protocol version.
|
|
||||||
SendRequest(s grpc.ClientStream, resourceNames []string, rType ResourceType, version, nonce, errMsg string) error
|
|
||||||
|
|
||||||
// RecvResponse uses the provided stream to receive a response specific to
|
|
||||||
// the underlying transport protocol version.
|
|
||||||
RecvResponse(s grpc.ClientStream) (proto.Message, error)
|
|
||||||
|
|
||||||
// HandleResponse parses and validates the received response and notifies
|
|
||||||
// the top-level client which in turn notifies the registered watchers.
|
|
||||||
//
|
|
||||||
// Return values are: resourceType, version, nonce, error.
|
|
||||||
// If the provided protobuf message contains a resource type which is not
|
|
||||||
// supported, implementations must return an error of type
|
|
||||||
// ErrResourceTypeUnsupported.
|
|
||||||
HandleResponse(proto.Message) (ResourceType, string, string, error)
|
|
||||||
|
|
||||||
// NewLoadStatsStream returns a new LRS client stream specific to the underlying
|
|
||||||
// transport protocol version.
|
|
||||||
NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error)
|
|
||||||
|
|
||||||
// SendFirstLoadStatsRequest constructs and sends the first request on the
|
|
||||||
// LRS stream.
|
|
||||||
SendFirstLoadStatsRequest(s grpc.ClientStream) error
|
|
||||||
|
|
||||||
// HandleLoadStatsResponse receives the first response from the server which
|
|
||||||
// contains the load reporting interval and the clusters for which the
|
|
||||||
// server asks the client to report load for.
|
|
||||||
//
|
|
||||||
// If the response sets SendAllClusters to true, the returned clusters is
|
|
||||||
// nil.
|
|
||||||
HandleLoadStatsResponse(s grpc.ClientStream) (clusters []string, _ time.Duration, _ error)
|
|
||||||
|
|
||||||
// SendLoadStatsRequest will be invoked at regular intervals to send load
|
|
||||||
// report with load data reported since the last time this method was
|
|
||||||
// invoked.
|
|
||||||
SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// TransportHelper contains all xDS transport protocol related functionality
|
|
||||||
// which is common across different versioned client implementations.
|
|
||||||
//
|
|
||||||
// TransportHelper takes care of sending and receiving xDS requests and
|
|
||||||
// responses on an ADS stream. It also takes care of ACK/NACK handling. It
|
|
||||||
// delegates to the actual versioned client implementations wherever
|
|
||||||
// appropriate.
|
|
||||||
//
|
|
||||||
// Implements the APIClient interface which makes it possible for versioned
|
|
||||||
// client implementations to embed this type, and thereby satisfy the interface
|
|
||||||
// requirements.
|
|
||||||
type TransportHelper struct {
|
|
||||||
cancelCtx context.CancelFunc
|
|
||||||
|
|
||||||
vClient VersionedClient
|
|
||||||
logger *grpclog.PrefixLogger
|
|
||||||
backoff func(int) time.Duration
|
|
||||||
streamCh chan grpc.ClientStream
|
|
||||||
sendCh *buffer.Unbounded
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
// Message specific watch infos, protected by the above mutex. These are
|
|
||||||
// written to, after successfully reading from the update channel, and are
|
|
||||||
// read from when recovering from a broken stream to resend the xDS
|
|
||||||
// messages. When the user of this client object cancels a watch call,
|
|
||||||
// these are set to nil. All accesses to the map protected and any value
|
|
||||||
// inside the map should be protected with the above mutex.
|
|
||||||
watchMap map[ResourceType]map[string]bool
|
|
||||||
// versionMap contains the version that was acked (the version in the ack
|
|
||||||
// request that was sent on wire). The key is rType, the value is the
|
|
||||||
// version string, becaues the versions for different resource types should
|
|
||||||
// be independent.
|
|
||||||
versionMap map[ResourceType]string
|
|
||||||
// nonceMap contains the nonce from the most recent received response.
|
|
||||||
nonceMap map[ResourceType]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTransportHelper creates a new transport helper to be used by versioned
|
|
||||||
// client implementations.
|
|
||||||
func NewTransportHelper(vc VersionedClient, logger *grpclog.PrefixLogger, backoff func(int) time.Duration) *TransportHelper {
|
|
||||||
ctx, cancelCtx := context.WithCancel(context.Background())
|
|
||||||
t := &TransportHelper{
|
|
||||||
cancelCtx: cancelCtx,
|
|
||||||
vClient: vc,
|
|
||||||
logger: logger,
|
|
||||||
backoff: backoff,
|
|
||||||
|
|
||||||
streamCh: make(chan grpc.ClientStream, 1),
|
|
||||||
sendCh: buffer.NewUnbounded(),
|
|
||||||
watchMap: make(map[ResourceType]map[string]bool),
|
|
||||||
versionMap: make(map[ResourceType]string),
|
|
||||||
nonceMap: make(map[ResourceType]string),
|
|
||||||
}
|
|
||||||
|
|
||||||
go t.run(ctx)
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddWatch adds a watch for an xDS resource given its type and name.
|
// AddWatch adds a watch for an xDS resource given its type and name.
|
||||||
func (t *TransportHelper) AddWatch(rType ResourceType, resourceName string) {
|
func (t *Controller) AddWatch(rType xdsresource.ResourceType, resourceName string) {
|
||||||
t.sendCh.Put(&watchAction{
|
t.sendCh.Put(&watchAction{
|
||||||
rType: rType,
|
rType: rType,
|
||||||
remove: false,
|
remove: false,
|
||||||
|
@ -158,7 +42,7 @@ func (t *TransportHelper) AddWatch(rType ResourceType, resourceName string) {
|
||||||
|
|
||||||
// RemoveWatch cancels an already registered watch for an xDS resource
|
// RemoveWatch cancels an already registered watch for an xDS resource
|
||||||
// given its type and name.
|
// given its type and name.
|
||||||
func (t *TransportHelper) RemoveWatch(rType ResourceType, resourceName string) {
|
func (t *Controller) RemoveWatch(rType xdsresource.ResourceType, resourceName string) {
|
||||||
t.sendCh.Put(&watchAction{
|
t.sendCh.Put(&watchAction{
|
||||||
rType: rType,
|
rType: rType,
|
||||||
remove: true,
|
remove: true,
|
||||||
|
@ -166,15 +50,10 @@ func (t *TransportHelper) RemoveWatch(rType ResourceType, resourceName string) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the transport helper.
|
|
||||||
func (t *TransportHelper) Close() {
|
|
||||||
t.cancelCtx()
|
|
||||||
}
|
|
||||||
|
|
||||||
// run starts an ADS stream (and backs off exponentially, if the previous
|
// run starts an ADS stream (and backs off exponentially, if the previous
|
||||||
// stream failed without receiving a single reply) and runs the sender and
|
// stream failed without receiving a single reply) and runs the sender and
|
||||||
// receiver routines to send and receive data from the stream respectively.
|
// receiver routines to send and receive data from the stream respectively.
|
||||||
func (t *TransportHelper) run(ctx context.Context) {
|
func (t *Controller) run(ctx context.Context) {
|
||||||
go t.send(ctx)
|
go t.send(ctx)
|
||||||
// TODO: start a goroutine monitoring ClientConn's connectivity state, and
|
// TODO: start a goroutine monitoring ClientConn's connectivity state, and
|
||||||
// report error (and log) when stats is transient failure.
|
// report error (and log) when stats is transient failure.
|
||||||
|
@ -200,8 +79,9 @@ func (t *TransportHelper) run(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
retries++
|
retries++
|
||||||
stream, err := t.vClient.NewStream(ctx)
|
stream, err := t.vClient.NewStream(ctx, t.cc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
t.updateHandler.NewConnectionError(err)
|
||||||
t.logger.Warningf("xds: ADS stream creation failed: %v", err)
|
t.logger.Warningf("xds: ADS stream creation failed: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -234,7 +114,7 @@ func (t *TransportHelper) run(ctx context.Context) {
|
||||||
// Note that this goroutine doesn't do anything to the old stream when there's a
|
// Note that this goroutine doesn't do anything to the old stream when there's a
|
||||||
// new one. In fact, there should be only one stream in progress, and new one
|
// new one. In fact, there should be only one stream in progress, and new one
|
||||||
// should only be created when the old one fails (recv returns an error).
|
// should only be created when the old one fails (recv returns an error).
|
||||||
func (t *TransportHelper) send(ctx context.Context) {
|
func (t *Controller) send(ctx context.Context) {
|
||||||
var stream grpc.ClientStream
|
var stream grpc.ClientStream
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
@ -250,7 +130,7 @@ func (t *TransportHelper) send(ctx context.Context) {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
target []string
|
target []string
|
||||||
rType ResourceType
|
rType xdsresource.ResourceType
|
||||||
version, nonce, errMsg string
|
version, nonce, errMsg string
|
||||||
send bool
|
send bool
|
||||||
)
|
)
|
||||||
|
@ -287,13 +167,13 @@ func (t *TransportHelper) send(ctx context.Context) {
|
||||||
// that here because the stream has just started and Send() usually returns
|
// that here because the stream has just started and Send() usually returns
|
||||||
// quickly (once it pushes the message onto the transport layer) and is only
|
// quickly (once it pushes the message onto the transport layer) and is only
|
||||||
// ever blocked if we don't have enough flow control quota.
|
// ever blocked if we don't have enough flow control quota.
|
||||||
func (t *TransportHelper) sendExisting(stream grpc.ClientStream) bool {
|
func (t *Controller) sendExisting(stream grpc.ClientStream) bool {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
|
|
||||||
// Reset the ack versions when the stream restarts.
|
// Reset the ack versions when the stream restarts.
|
||||||
t.versionMap = make(map[ResourceType]string)
|
t.versionMap = make(map[xdsresource.ResourceType]string)
|
||||||
t.nonceMap = make(map[ResourceType]string)
|
t.nonceMap = make(map[xdsresource.ResourceType]string)
|
||||||
|
|
||||||
for rType, s := range t.watchMap {
|
for rType, s := range t.watchMap {
|
||||||
if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, "", "", ""); err != nil {
|
if err := t.vClient.SendRequest(stream, mapToSlice(s), rType, "", "", ""); err != nil {
|
||||||
|
@ -307,16 +187,19 @@ func (t *TransportHelper) sendExisting(stream grpc.ClientStream) bool {
|
||||||
|
|
||||||
// recv receives xDS responses on the provided ADS stream and branches out to
|
// recv receives xDS responses on the provided ADS stream and branches out to
|
||||||
// message specific handlers.
|
// message specific handlers.
|
||||||
func (t *TransportHelper) recv(stream grpc.ClientStream) bool {
|
func (t *Controller) recv(stream grpc.ClientStream) bool {
|
||||||
success := false
|
success := false
|
||||||
for {
|
for {
|
||||||
resp, err := t.vClient.RecvResponse(stream)
|
resp, err := t.vClient.RecvResponse(stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
t.updateHandler.NewConnectionError(err)
|
||||||
t.logger.Warningf("ADS stream is closed with error: %v", err)
|
t.logger.Warningf("ADS stream is closed with error: %v", err)
|
||||||
return success
|
return success
|
||||||
}
|
}
|
||||||
rType, version, nonce, err := t.vClient.HandleResponse(resp)
|
|
||||||
if e, ok := err.(ErrResourceTypeUnsupported); ok {
|
rType, version, nonce, err := t.handleResponse(resp)
|
||||||
|
|
||||||
|
if e, ok := err.(xdsresourceversion.ErrResourceTypeUnsupported); ok {
|
||||||
t.logger.Warningf("%s", e.ErrStr)
|
t.logger.Warningf("%s", e.ErrStr)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -342,6 +225,43 @@ func (t *TransportHelper) recv(stream grpc.ClientStream) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Controller) handleResponse(resp proto.Message) (xdsresource.ResourceType, string, string, error) {
|
||||||
|
rType, resource, version, nonce, err := t.vClient.ParseResponse(resp)
|
||||||
|
if err != nil {
|
||||||
|
return rType, version, nonce, err
|
||||||
|
}
|
||||||
|
opts := &xdsresource.UnmarshalOptions{
|
||||||
|
Version: version,
|
||||||
|
Resources: resource,
|
||||||
|
Logger: t.logger,
|
||||||
|
UpdateValidator: t.updateValidator,
|
||||||
|
}
|
||||||
|
var md xdsresource.UpdateMetadata
|
||||||
|
switch rType {
|
||||||
|
case xdsresource.ListenerResource:
|
||||||
|
var update map[string]xdsresource.ListenerUpdateErrTuple
|
||||||
|
update, md, err = xdsresource.UnmarshalListener(opts)
|
||||||
|
t.updateHandler.NewListeners(update, md)
|
||||||
|
case xdsresource.RouteConfigResource:
|
||||||
|
var update map[string]xdsresource.RouteConfigUpdateErrTuple
|
||||||
|
update, md, err = xdsresource.UnmarshalRouteConfig(opts)
|
||||||
|
t.updateHandler.NewRouteConfigs(update, md)
|
||||||
|
case xdsresource.ClusterResource:
|
||||||
|
var update map[string]xdsresource.ClusterUpdateErrTuple
|
||||||
|
update, md, err = xdsresource.UnmarshalCluster(opts)
|
||||||
|
t.updateHandler.NewClusters(update, md)
|
||||||
|
case xdsresource.EndpointsResource:
|
||||||
|
var update map[string]xdsresource.EndpointsUpdateErrTuple
|
||||||
|
update, md, err = xdsresource.UnmarshalEndpoints(opts)
|
||||||
|
t.updateHandler.NewEndpoints(update, md)
|
||||||
|
default:
|
||||||
|
return rType, "", "", xdsresourceversion.ErrResourceTypeUnsupported{
|
||||||
|
ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", rType),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rType, version, nonce, err
|
||||||
|
}
|
||||||
|
|
||||||
func mapToSlice(m map[string]bool) []string {
|
func mapToSlice(m map[string]bool) []string {
|
||||||
ret := make([]string, 0, len(m))
|
ret := make([]string, 0, len(m))
|
||||||
for i := range m {
|
for i := range m {
|
||||||
|
@ -351,7 +271,7 @@ func mapToSlice(m map[string]bool) []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type watchAction struct {
|
type watchAction struct {
|
||||||
rType ResourceType
|
rType xdsresource.ResourceType
|
||||||
remove bool // Whether this is to remove watch for the resource.
|
remove bool // Whether this is to remove watch for the resource.
|
||||||
resource string
|
resource string
|
||||||
}
|
}
|
||||||
|
@ -359,7 +279,7 @@ type watchAction struct {
|
||||||
// processWatchInfo pulls the fields needed by the request from a watchAction.
|
// processWatchInfo pulls the fields needed by the request from a watchAction.
|
||||||
//
|
//
|
||||||
// It also updates the watch map.
|
// It also updates the watch map.
|
||||||
func (t *TransportHelper) processWatchInfo(w *watchAction) (target []string, rType ResourceType, ver, nonce string) {
|
func (t *Controller) processWatchInfo(w *watchAction) (target []string, rType xdsresource.ResourceType, ver, nonce string) {
|
||||||
t.mu.Lock()
|
t.mu.Lock()
|
||||||
defer t.mu.Unlock()
|
defer t.mu.Unlock()
|
||||||
|
|
||||||
|
@ -390,7 +310,7 @@ func (t *TransportHelper) processWatchInfo(w *watchAction) (target []string, rTy
|
||||||
}
|
}
|
||||||
|
|
||||||
type ackAction struct {
|
type ackAction struct {
|
||||||
rType ResourceType
|
rType xdsresource.ResourceType
|
||||||
version string // NACK if version is an empty string.
|
version string // NACK if version is an empty string.
|
||||||
nonce string
|
nonce string
|
||||||
errMsg string // Empty unless it's a NACK.
|
errMsg string // Empty unless it's a NACK.
|
||||||
|
@ -403,13 +323,13 @@ type ackAction struct {
|
||||||
// processAckInfo pulls the fields needed by the ack request from a ackAction.
|
// processAckInfo pulls the fields needed by the ack request from a ackAction.
|
||||||
//
|
//
|
||||||
// If no active watch is found for this ack, it returns false for send.
|
// If no active watch is found for this ack, it returns false for send.
|
||||||
func (t *TransportHelper) processAckInfo(ack *ackAction, stream grpc.ClientStream) (target []string, rType ResourceType, version, nonce string, send bool) {
|
func (t *Controller) processAckInfo(ack *ackAction, stream grpc.ClientStream) (target []string, rType xdsresource.ResourceType, version, nonce string, send bool) {
|
||||||
if ack.stream != stream {
|
if ack.stream != stream {
|
||||||
// If ACK's stream isn't the current sending stream, this means the ACK
|
// If ACK's stream isn't the current sending stream, this means the ACK
|
||||||
// was pushed to queue before the old stream broke, and a new stream has
|
// was pushed to queue before the old stream broke, and a new stream has
|
||||||
// been started since. Return immediately here so we don't update the
|
// been started since. Return immediately here so we don't update the
|
||||||
// nonce for the new stream.
|
// nonce for the new stream.
|
||||||
return nil, UnknownResource, "", "", false
|
return nil, xdsresource.UnknownResource, "", "", false
|
||||||
}
|
}
|
||||||
rType = ack.rType
|
rType = ack.rType
|
||||||
|
|
||||||
|
@ -429,7 +349,7 @@ func (t *TransportHelper) processAckInfo(ack *ackAction, stream grpc.ClientStrea
|
||||||
// canceled while the ackAction is in queue), because there's no resource
|
// canceled while the ackAction is in queue), because there's no resource
|
||||||
// name. And if we send a request with empty resource name list, the
|
// name. And if we send a request with empty resource name list, the
|
||||||
// server may treat it as a wild card and send us everything.
|
// server may treat it as a wild card and send us everything.
|
||||||
return nil, UnknownResource, "", "", false
|
return nil, xdsresource.UnknownResource, "", "", false
|
||||||
}
|
}
|
||||||
send = true
|
send = true
|
||||||
target = mapToSlice(s)
|
target = mapToSlice(s)
|
||||||
|
@ -449,7 +369,7 @@ func (t *TransportHelper) processAckInfo(ack *ackAction, stream grpc.ClientStrea
|
||||||
|
|
||||||
// reportLoad starts an LRS stream to report load data to the management server.
|
// reportLoad starts an LRS stream to report load data to the management server.
|
||||||
// It blocks until the context is cancelled.
|
// It blocks until the context is cancelled.
|
||||||
func (t *TransportHelper) reportLoad(ctx context.Context, cc *grpc.ClientConn, opts loadReportingOptions) {
|
func (t *Controller) reportLoad(ctx context.Context, cc *grpc.ClientConn, opts controllerversion.LoadReportingOptions) {
|
||||||
retries := 0
|
retries := 0
|
||||||
for {
|
for {
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
|
@ -471,28 +391,28 @@ func (t *TransportHelper) reportLoad(ctx context.Context, cc *grpc.ClientConn, o
|
||||||
retries++
|
retries++
|
||||||
stream, err := t.vClient.NewLoadStatsStream(ctx, cc)
|
stream, err := t.vClient.NewLoadStatsStream(ctx, cc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warningf("lrs: failed to create stream: %v", err)
|
t.logger.Warningf("lrs: failed to create stream: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
logger.Infof("lrs: created LRS stream")
|
t.logger.Infof("lrs: created LRS stream")
|
||||||
|
|
||||||
if err := t.vClient.SendFirstLoadStatsRequest(stream); err != nil {
|
if err := t.vClient.SendFirstLoadStatsRequest(stream); err != nil {
|
||||||
logger.Warningf("lrs: failed to send first request: %v", err)
|
t.logger.Warningf("lrs: failed to send first request: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
clusters, interval, err := t.vClient.HandleLoadStatsResponse(stream)
|
clusters, interval, err := t.vClient.HandleLoadStatsResponse(stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warning(err)
|
t.logger.Warningf("%v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
retries = 0
|
retries = 0
|
||||||
t.sendLoads(ctx, stream, opts.loadStore, clusters, interval)
|
t.sendLoads(ctx, stream, opts.LoadStore, clusters, interval)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TransportHelper) sendLoads(ctx context.Context, stream grpc.ClientStream, store *load.Store, clusterNames []string, interval time.Duration) {
|
func (t *Controller) sendLoads(ctx context.Context, stream grpc.ClientStream, store *load.Store, clusterNames []string, interval time.Duration) {
|
||||||
tick := time.NewTicker(interval)
|
tick := time.NewTicker(interval)
|
||||||
defer tick.Stop()
|
defer tick.Stop()
|
||||||
for {
|
for {
|
||||||
|
@ -502,7 +422,7 @@ func (t *TransportHelper) sendLoads(ctx context.Context, stream grpc.ClientStrea
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := t.vClient.SendLoadStatsRequest(stream, store.Stats(clusterNames)); err != nil {
|
if err := t.vClient.SendLoadStatsRequest(stream, store.Stats(clusterNames)); err != nil {
|
||||||
logger.Warning(err)
|
t.logger.Warningf("%v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
155
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2/client.go
generated
vendored
Normal file
155
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v2/client.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2019 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package v2 provides xDS v2 transport protocol specific functionality.
|
||||||
|
package v2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/pretty"
|
||||||
|
controllerversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
|
xdsresourceversion "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version"
|
||||||
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
|
||||||
|
v2xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||||
|
v2corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||||
|
v2adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v2"
|
||||||
|
statuspb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
controllerversion.RegisterAPIClientBuilder(xdsresourceversion.TransportV2, newClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
resourceTypeToURL = map[xdsresource.ResourceType]string{
|
||||||
|
xdsresource.ListenerResource: xdsresourceversion.V2ListenerURL,
|
||||||
|
xdsresource.RouteConfigResource: xdsresourceversion.V2RouteConfigURL,
|
||||||
|
xdsresource.ClusterResource: xdsresourceversion.V2ClusterURL,
|
||||||
|
xdsresource.EndpointsResource: xdsresourceversion.V2EndpointsURL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func newClient(opts controllerversion.BuildOptions) (controllerversion.VersionedClient, error) {
|
||||||
|
nodeProto, ok := opts.NodeProto.(*v2corepb.Node)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, (*v2corepb.Node)(nil))
|
||||||
|
}
|
||||||
|
v2c := &client{nodeProto: nodeProto, logger: opts.Logger}
|
||||||
|
return v2c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type adsStream v2adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient
|
||||||
|
|
||||||
|
// client performs the actual xDS RPCs using the xDS v2 API. It creates a
|
||||||
|
// single ADS stream on which the different types of xDS requests and responses
|
||||||
|
// are multiplexed.
|
||||||
|
type client struct {
|
||||||
|
nodeProto *v2corepb.Node
|
||||||
|
logger *grpclog.PrefixLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v2c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) {
|
||||||
|
return v2adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx, grpc.WaitForReady(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendRequest sends out a DiscoveryRequest for the given resourceNames, of type
|
||||||
|
// rType, on the provided stream.
|
||||||
|
//
|
||||||
|
// version is the ack version to be sent with the request
|
||||||
|
// - If this is the new request (not an ack/nack), version will be empty.
|
||||||
|
// - If this is an ack, version will be the version from the response.
|
||||||
|
// - If this is a nack, version will be the previous acked version (from
|
||||||
|
// versionMap). If there was no ack before, it will be empty.
|
||||||
|
func (v2c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error {
|
||||||
|
stream, ok := s.(adsStream)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("xds: Attempt to send request on unsupported stream type: %T", s)
|
||||||
|
}
|
||||||
|
req := &v2xdspb.DiscoveryRequest{
|
||||||
|
Node: v2c.nodeProto,
|
||||||
|
TypeUrl: resourceTypeToURL[rType],
|
||||||
|
ResourceNames: resourceNames,
|
||||||
|
VersionInfo: version,
|
||||||
|
ResponseNonce: nonce,
|
||||||
|
}
|
||||||
|
if errMsg != "" {
|
||||||
|
req.ErrorDetail = &statuspb.Status{
|
||||||
|
Code: int32(codes.InvalidArgument), Message: errMsg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := stream.Send(req); err != nil {
|
||||||
|
return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err)
|
||||||
|
}
|
||||||
|
v2c.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecvResponse blocks on the receipt of one response message on the provided
|
||||||
|
// stream.
|
||||||
|
func (v2c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) {
|
||||||
|
stream, ok := s.(adsStream)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("xds: Attempt to receive response on unsupported stream type: %T", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err)
|
||||||
|
}
|
||||||
|
v2c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl())
|
||||||
|
v2c.logger.Debugf("ADS response received: %v", pretty.ToJSON(resp))
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v2c *client) ParseResponse(r proto.Message) (xdsresource.ResourceType, []*anypb.Any, string, string, error) {
|
||||||
|
rType := xdsresource.UnknownResource
|
||||||
|
resp, ok := r.(*v2xdspb.DiscoveryResponse)
|
||||||
|
if !ok {
|
||||||
|
return rType, nil, "", "", fmt.Errorf("xds: unsupported message type: %T", resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the xDS transport protocol is versioned independently of
|
||||||
|
// the resource types, and it is supported to transfer older versions
|
||||||
|
// of resource types using new versions of the transport protocol, or
|
||||||
|
// vice-versa. Hence we need to handle v3 type_urls as well here.
|
||||||
|
var err error
|
||||||
|
url := resp.GetTypeUrl()
|
||||||
|
switch {
|
||||||
|
case xdsresource.IsListenerResource(url):
|
||||||
|
rType = xdsresource.ListenerResource
|
||||||
|
case xdsresource.IsRouteConfigResource(url):
|
||||||
|
rType = xdsresource.RouteConfigResource
|
||||||
|
case xdsresource.IsClusterResource(url):
|
||||||
|
rType = xdsresource.ClusterResource
|
||||||
|
case xdsresource.IsEndpointsResource(url):
|
||||||
|
rType = xdsresource.EndpointsResource
|
||||||
|
default:
|
||||||
|
return rType, nil, "", "", controllerversion.ErrResourceTypeUnsupported{
|
||||||
|
ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rType, resp.GetResources(), resp.GetVersionInfo(), resp.GetNonce(), err
|
||||||
|
}
|
157
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3/client.go
generated
vendored
Normal file
157
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/v3/client.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2020 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package v3 provides xDS v3 transport protocol specific functionality.
|
||||||
|
package v3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
statuspb "google.golang.org/genproto/googleapis/rpc/status"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/pretty"
|
||||||
|
controllerversion "google.golang.org/grpc/xds/internal/xdsclient/controller/version"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
|
xdsresourceversion "google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version"
|
||||||
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
|
||||||
|
v3corepb "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
|
||||||
|
v3adsgrpc "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||||
|
v3discoverypb "github.com/envoyproxy/go-control-plane/envoy/service/discovery/v3"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
controllerversion.RegisterAPIClientBuilder(xdsresourceversion.TransportV3, newClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
resourceTypeToURL = map[xdsresource.ResourceType]string{
|
||||||
|
xdsresource.ListenerResource: xdsresourceversion.V3ListenerURL,
|
||||||
|
xdsresource.RouteConfigResource: xdsresourceversion.V3RouteConfigURL,
|
||||||
|
xdsresource.ClusterResource: xdsresourceversion.V3ClusterURL,
|
||||||
|
xdsresource.EndpointsResource: xdsresourceversion.V3EndpointsURL,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func newClient(opts controllerversion.BuildOptions) (controllerversion.VersionedClient, error) {
|
||||||
|
nodeProto, ok := opts.NodeProto.(*v3corepb.Node)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("xds: unsupported Node proto type: %T, want %T", opts.NodeProto, v3corepb.Node{})
|
||||||
|
}
|
||||||
|
v3c := &client{
|
||||||
|
nodeProto: nodeProto, logger: opts.Logger,
|
||||||
|
}
|
||||||
|
return v3c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type adsStream v3adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClient
|
||||||
|
|
||||||
|
// client performs the actual xDS RPCs using the xDS v3 API. It creates a
|
||||||
|
// single ADS stream on which the different types of xDS requests and responses
|
||||||
|
// are multiplexed.
|
||||||
|
type client struct {
|
||||||
|
nodeProto *v3corepb.Node
|
||||||
|
logger *grpclog.PrefixLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v3c *client) NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error) {
|
||||||
|
return v3adsgrpc.NewAggregatedDiscoveryServiceClient(cc).StreamAggregatedResources(ctx, grpc.WaitForReady(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendRequest sends out a DiscoveryRequest for the given resourceNames, of type
|
||||||
|
// rType, on the provided stream.
|
||||||
|
//
|
||||||
|
// version is the ack version to be sent with the request
|
||||||
|
// - If this is the new request (not an ack/nack), version will be empty.
|
||||||
|
// - If this is an ack, version will be the version from the response.
|
||||||
|
// - If this is a nack, version will be the previous acked version (from
|
||||||
|
// versionMap). If there was no ack before, it will be empty.
|
||||||
|
func (v3c *client) SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error {
|
||||||
|
stream, ok := s.(adsStream)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("xds: Attempt to send request on unsupported stream type: %T", s)
|
||||||
|
}
|
||||||
|
req := &v3discoverypb.DiscoveryRequest{
|
||||||
|
Node: v3c.nodeProto,
|
||||||
|
TypeUrl: resourceTypeToURL[rType],
|
||||||
|
ResourceNames: resourceNames,
|
||||||
|
VersionInfo: version,
|
||||||
|
ResponseNonce: nonce,
|
||||||
|
}
|
||||||
|
if errMsg != "" {
|
||||||
|
req.ErrorDetail = &statuspb.Status{
|
||||||
|
Code: int32(codes.InvalidArgument), Message: errMsg,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := stream.Send(req); err != nil {
|
||||||
|
return fmt.Errorf("xds: stream.Send(%+v) failed: %v", req, err)
|
||||||
|
}
|
||||||
|
v3c.logger.Debugf("ADS request sent: %v", pretty.ToJSON(req))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecvResponse blocks on the receipt of one response message on the provided
|
||||||
|
// stream.
|
||||||
|
func (v3c *client) RecvResponse(s grpc.ClientStream) (proto.Message, error) {
|
||||||
|
stream, ok := s.(adsStream)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("xds: Attempt to receive response on unsupported stream type: %T", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := stream.Recv()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("xds: stream.Recv() failed: %v", err)
|
||||||
|
}
|
||||||
|
v3c.logger.Infof("ADS response received, type: %v", resp.GetTypeUrl())
|
||||||
|
v3c.logger.Debugf("ADS response received: %+v", pretty.ToJSON(resp))
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v3c *client) ParseResponse(r proto.Message) (xdsresource.ResourceType, []*anypb.Any, string, string, error) {
|
||||||
|
rType := xdsresource.UnknownResource
|
||||||
|
resp, ok := r.(*v3discoverypb.DiscoveryResponse)
|
||||||
|
if !ok {
|
||||||
|
return rType, nil, "", "", fmt.Errorf("xds: unsupported message type: %T", resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the xDS transport protocol is versioned independently of
|
||||||
|
// the resource types, and it is supported to transfer older versions
|
||||||
|
// of resource types using new versions of the transport protocol, or
|
||||||
|
// vice-versa. Hence we need to handle v3 type_urls as well here.
|
||||||
|
var err error
|
||||||
|
url := resp.GetTypeUrl()
|
||||||
|
switch {
|
||||||
|
case xdsresource.IsListenerResource(url):
|
||||||
|
rType = xdsresource.ListenerResource
|
||||||
|
case xdsresource.IsRouteConfigResource(url):
|
||||||
|
rType = xdsresource.RouteConfigResource
|
||||||
|
case xdsresource.IsClusterResource(url):
|
||||||
|
rType = xdsresource.ClusterResource
|
||||||
|
case xdsresource.IsEndpointsResource(url):
|
||||||
|
rType = xdsresource.EndpointsResource
|
||||||
|
default:
|
||||||
|
return rType, nil, "", "", controllerversion.ErrResourceTypeUnsupported{
|
||||||
|
ErrStr: fmt.Sprintf("Resource type %v unknown in response from server", resp.GetTypeUrl()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rType, resp.GetResources(), resp.GetVersionInfo(), resp.GetNonce(), err
|
||||||
|
}
|
123
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/version.go
generated
vendored
Normal file
123
vendor/google.golang.org/grpc/xds/internal/xdsclient/controller/version/version.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2021 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package version defines APIs to deal with different versions of xDS.
|
||||||
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/internal/grpclog"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/load"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource"
|
||||||
|
"google.golang.org/grpc/xds/internal/xdsclient/xdsresource/version"
|
||||||
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
m = make(map[version.TransportAPI]func(opts BuildOptions) (VersionedClient, error))
|
||||||
|
)
|
||||||
|
|
||||||
|
// RegisterAPIClientBuilder registers a client builder for xDS transport protocol
|
||||||
|
// version specified by b.Version().
|
||||||
|
//
|
||||||
|
// NOTE: this function must only be called during initialization time (i.e. in
|
||||||
|
// an init() function), and is not thread-safe. If multiple builders are
|
||||||
|
// registered for the same version, the one registered last will take effect.
|
||||||
|
func RegisterAPIClientBuilder(v version.TransportAPI, f func(opts BuildOptions) (VersionedClient, error)) {
|
||||||
|
m[v] = f
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAPIClientBuilder returns the client builder registered for the provided
|
||||||
|
// xDS transport API version.
|
||||||
|
func GetAPIClientBuilder(version version.TransportAPI) func(opts BuildOptions) (VersionedClient, error) {
|
||||||
|
if f, ok := m[version]; ok {
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BuildOptions contains options to be passed to client builders.
|
||||||
|
type BuildOptions struct {
|
||||||
|
// NodeProto contains the Node proto to be used in xDS requests. The actual
|
||||||
|
// type depends on the transport protocol version used.
|
||||||
|
NodeProto proto.Message
|
||||||
|
// // Backoff returns the amount of time to backoff before retrying broken
|
||||||
|
// // streams.
|
||||||
|
// Backoff func(int) time.Duration
|
||||||
|
// Logger provides enhanced logging capabilities.
|
||||||
|
Logger *grpclog.PrefixLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadReportingOptions contains configuration knobs for reporting load data.
|
||||||
|
type LoadReportingOptions struct {
|
||||||
|
LoadStore *load.Store
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrResourceTypeUnsupported is an error used to indicate an unsupported xDS
|
||||||
|
// resource type. The wrapped ErrStr contains the details.
|
||||||
|
type ErrResourceTypeUnsupported struct {
|
||||||
|
ErrStr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error helps implements the error interface.
|
||||||
|
func (e ErrResourceTypeUnsupported) Error() string {
|
||||||
|
return e.ErrStr
|
||||||
|
}
|
||||||
|
|
||||||
|
// VersionedClient is the interface to version specific operations of the
|
||||||
|
// client.
|
||||||
|
//
|
||||||
|
// It mainly deals with the type assertion from proto.Message to the real v2/v3
|
||||||
|
// types, and grpc.Stream to the versioned stream types.
|
||||||
|
type VersionedClient interface {
|
||||||
|
// NewStream returns a new xDS client stream specific to the underlying
|
||||||
|
// transport protocol version.
|
||||||
|
NewStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error)
|
||||||
|
// SendRequest constructs and sends out a DiscoveryRequest message specific
|
||||||
|
// to the underlying transport protocol version.
|
||||||
|
SendRequest(s grpc.ClientStream, resourceNames []string, rType xdsresource.ResourceType, version, nonce, errMsg string) error
|
||||||
|
// RecvResponse uses the provided stream to receive a response specific to
|
||||||
|
// the underlying transport protocol version.
|
||||||
|
RecvResponse(s grpc.ClientStream) (proto.Message, error)
|
||||||
|
// ParseResponse type asserts message to the versioned response, and
|
||||||
|
// retrieves the fields.
|
||||||
|
ParseResponse(r proto.Message) (xdsresource.ResourceType, []*anypb.Any, string, string, error)
|
||||||
|
|
||||||
|
// The following are LRS methods.
|
||||||
|
|
||||||
|
// NewLoadStatsStream returns a new LRS client stream specific to the
|
||||||
|
// underlying transport protocol version.
|
||||||
|
NewLoadStatsStream(ctx context.Context, cc *grpc.ClientConn) (grpc.ClientStream, error)
|
||||||
|
// SendFirstLoadStatsRequest constructs and sends the first request on the
|
||||||
|
// LRS stream.
|
||||||
|
SendFirstLoadStatsRequest(s grpc.ClientStream) error
|
||||||
|
// HandleLoadStatsResponse receives the first response from the server which
|
||||||
|
// contains the load reporting interval and the clusters for which the
|
||||||
|
// server asks the client to report load for.
|
||||||
|
//
|
||||||
|
// If the response sets SendAllClusters to true, the returned clusters is
|
||||||
|
// nil.
|
||||||
|
HandleLoadStatsResponse(s grpc.ClientStream) (clusters []string, _ time.Duration, _ error)
|
||||||
|
// SendLoadStatsRequest will be invoked at regular intervals to send load
|
||||||
|
// report with load data reported since the last time this method was
|
||||||
|
// invoked.
|
||||||
|
SendLoadStatsRequest(s grpc.ClientStream, loads []*load.Data) error
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue