Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2021-09-20 14:55:21 +03:00
commit c68663deee
69 changed files with 5046 additions and 1723 deletions

View file

@ -35,9 +35,9 @@ var (
// InsertHandlerForReader processes remote write for influx line protocol.
//
// See https://github.com/influxdata/telegraf/tree/master/plugins/inputs/socket_listener/
func InsertHandlerForReader(r io.Reader) error {
func InsertHandlerForReader(r io.Reader, isGzipped bool) error {
return writeconcurrencylimiter.Do(func() error {
return parser.ParseStream(r, false, "", "", func(db string, rows []parser.Row) error {
return parser.ParseStream(r, isGzipped, "", "", func(db string, rows []parser.Row) error {
return insertRows(nil, db, rows, nil)
})
})

View file

@ -3,6 +3,7 @@ package main
import (
"flag"
"fmt"
"io"
"net/http"
"os"
"strings"
@ -93,7 +94,9 @@ func main() {
common.StartUnmarshalWorkers()
writeconcurrencylimiter.Init()
if len(*influxListenAddr) > 0 {
influxServer = influxserver.MustStart(*influxListenAddr, influx.InsertHandlerForReader)
influxServer = influxserver.MustStart(*influxListenAddr, func(r io.Reader) error {
return influx.InsertHandlerForReader(r, false)
})
}
if len(*graphiteListenAddr) > 0 {
graphiteServer = graphiteserver.MustStart(*graphiteListenAddr, graphite.InsertHandler)

View file

@ -1,6 +1,7 @@
package prometheusimport
import (
"io"
"net/http"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
@ -38,6 +39,15 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
})
}
// InsertHandlerForReader processes metrics from given reader with optional gzip format
func InsertHandlerForReader(r io.Reader, isGzipped bool) error {
return writeconcurrencylimiter.Do(func() error {
return parser.ParseStream(r, 0, isGzipped, func(rows []parser.Row) error {
return insertRows(nil, rows, nil)
}, nil)
})
}
func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.Label) error {
ctx := common.GetPushCtx()
defer common.PutPushCtx(ctx)

View file

@ -1,6 +1,7 @@
package promremotewrite
import (
"io"
"net/http"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
@ -29,12 +30,21 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
return err
}
return writeconcurrencylimiter.Do(func() error {
return parser.ParseStream(req, func(tss []prompb.TimeSeries) error {
return parser.ParseStream(req.Body, func(tss []prompb.TimeSeries) error {
return insertRows(at, tss, extraLabels)
})
})
}
// InsertHandlerForReader processes metrics from given reader
func InsertHandlerForReader(r io.Reader) error {
return writeconcurrencylimiter.Do(func() error {
return parser.ParseStream(r, func(tss []prompb.TimeSeries) error {
return insertRows(nil, tss, nil)
})
})
}
func insertRows(at *auth.Token, timeseries []prompb.TimeSeries, extraLabels []prompbmarshal.Label) error {
ctx := common.GetPushCtx()
defer common.PutPushCtx(ctx)

View file

@ -1,6 +1,7 @@
package vmimport
import (
"io"
"net/http"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
@ -31,12 +32,22 @@ func InsertHandler(at *auth.Token, req *http.Request) error {
return err
}
return writeconcurrencylimiter.Do(func() error {
return parser.ParseStream(req, func(rows []parser.Row) error {
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
return parser.ParseStream(req.Body, isGzipped, func(rows []parser.Row) error {
return insertRows(at, rows, extraLabels)
})
})
}
// InsertHandlerForReader processes metrics from given reader
func InsertHandlerForReader(r io.Reader, isGzipped bool) error {
return writeconcurrencylimiter.Do(func() error {
return parser.ParseStream(r, isGzipped, func(rows []parser.Row) error {
return insertRows(nil, rows, nil)
})
})
}
func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.Label) error {
ctx := common.GetPushCtx()
defer common.PutPushCtx(ctx)

View file

@ -6,6 +6,7 @@ import (
"fmt"
"io/ioutil"
"net/http"
"path"
"strings"
"sync"
"time"
@ -92,6 +93,10 @@ func NewClient(ctx context.Context, cfg Config) (*Client, error) {
if cfg.Transport == nil {
cfg.Transport = http.DefaultTransport.(*http.Transport).Clone()
}
cc := defaultConcurrency
if cfg.Concurrency > 0 {
cc = cfg.Concurrency
}
c := &Client{
c: &http.Client{
Timeout: cfg.WriteTimeout,
@ -106,10 +111,7 @@ func NewClient(ctx context.Context, cfg Config) (*Client, error) {
input: make(chan prompbmarshal.TimeSeries, cfg.MaxQueueSize),
disablePathAppend: cfg.DisablePathAppend,
}
cc := defaultConcurrency
if cfg.Concurrency > 0 {
cc = cfg.Concurrency
}
for i := 0; i < cc; i++ {
c.run(ctx)
}
@ -182,10 +184,11 @@ func (c *Client) run(ctx context.Context) {
}
var (
sentRows = metrics.NewCounter(`vmalert_remotewrite_sent_rows_total`)
sentBytes = metrics.NewCounter(`vmalert_remotewrite_sent_bytes_total`)
droppedRows = metrics.NewCounter(`vmalert_remotewrite_dropped_rows_total`)
droppedBytes = metrics.NewCounter(`vmalert_remotewrite_dropped_bytes_total`)
sentRows = metrics.NewCounter(`vmalert_remotewrite_sent_rows_total`)
sentBytes = metrics.NewCounter(`vmalert_remotewrite_sent_bytes_total`)
droppedRows = metrics.NewCounter(`vmalert_remotewrite_dropped_rows_total`)
droppedBytes = metrics.NewCounter(`vmalert_remotewrite_dropped_bytes_total`)
bufferFlushDuration = metrics.NewHistogram(`vmalert_remotewrite_flush_duration_seconds`)
)
// flush is a blocking function that marshals WriteRequest and sends
@ -196,6 +199,7 @@ func (c *Client) flush(ctx context.Context, wr *prompbmarshal.WriteRequest) {
return
}
defer prompbmarshal.ResetWriteRequest(wr)
defer bufferFlushDuration.UpdateDuration(time.Now())
data, err := wr.Marshal()
if err != nil {
@ -237,7 +241,7 @@ func (c *Client) send(ctx context.Context, data []byte) error {
}
}
if !c.disablePathAppend {
req.URL.Path += writePath
req.URL.Path = path.Join(req.URL.Path, writePath)
}
resp, err := c.c.Do(req.WithContext(ctx))
if err != nil {

View file

@ -25,7 +25,7 @@ func InsertHandler(req *http.Request) error {
return err
}
return writeconcurrencylimiter.Do(func() error {
return parser.ParseStream(req, func(tss []prompb.TimeSeries) error {
return parser.ParseStream(req.Body, func(tss []prompb.TimeSeries) error {
return insertRows(tss, extraLabels)
})
})

View file

@ -30,7 +30,8 @@ func InsertHandler(req *http.Request) error {
return err
}
return writeconcurrencylimiter.Do(func() error {
return parser.ParseStream(req, func(rows []parser.Row) error {
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
return parser.ParseStream(req.Body, isGzipped, func(rows []parser.Row) error {
return insertRows(rows, extraLabels)
})
})

View file

@ -198,7 +198,12 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
}
return true
}
if strings.HasPrefix(path, "/functions") {
graphiteFunctionsRequests.Inc()
w.Header().Set("Content-Type", "application/json; charset=utf-8")
fmt.Fprintf(w, "%s", `{}`)
return true
}
switch path {
case "/api/v1/query":
queryRequests.Inc()
@ -544,6 +549,8 @@ var (
graphiteTagsDelSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/delSeries"}`)
graphiteTagsDelSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/delSeries"}`)
graphiteFunctionsRequests = metrics.NewCounter(`vm_http_request_total{path="/functions"}`)
rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`)
alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/alerts"}`)
metadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/metadata"}`)

View file

@ -98,7 +98,7 @@ func FederateHandler(startTime time.Time, w http.ResponseWriter, r *http.Request
return err
})
if err != nil {
return fmt.Errorf("error during data fetching: %w", err)
return fmt.Errorf("error during sending data to remote client: %w", err)
}
if err := bw.Flush(); err != nil {
return err
@ -175,7 +175,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
}
err = <-doneCh
if err != nil {
return fmt.Errorf("error during exporting data to csv: %w", err)
return fmt.Errorf("error during sending the exported csv data to remote client: %w", err)
}
return nil
}
@ -244,10 +244,10 @@ func ExportNativeHandler(startTime time.Time, w http.ResponseWriter, r *http.Req
return err
})
if err != nil {
return err
return fmt.Errorf("error during sending native data to remote client: %w", err)
}
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("error during flushing native data to remote client: %w", err)
}
return nil
}
@ -414,7 +414,7 @@ func exportHandler(w http.ResponseWriter, matches []string, etf []storage.TagFil
}
err = <-doneCh
if err != nil {
return fmt.Errorf("error during data fetching: %w", err)
return fmt.Errorf("error during sending the data to remote client: %w", err)
}
return nil
}
@ -538,7 +538,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
defer bufferedwriter.Put(bw)
WriteLabelValuesResponse(bw, labelValues)
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("canot flush label values to remote client: %w", err)
}
return nil
}
@ -600,7 +600,7 @@ func labelValuesWithMatches(labelName string, matches []string, etf []storage.Ta
return nil
})
if err != nil {
return nil, fmt.Errorf("error when data fetching: %w", err)
return nil, fmt.Errorf("cannot fetch label values from storage: %w", err)
}
}
labelValues := make([]string, 0, len(m))
@ -627,7 +627,7 @@ func LabelsCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
defer bufferedwriter.Put(bw)
WriteLabelsCountResponse(bw, labelEntries)
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("cannot send labels count response to remote client: %w", err)
}
return nil
}
@ -695,7 +695,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
defer bufferedwriter.Put(bw)
WriteTSDBStatusResponse(bw, status)
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("cannot send tsdb status response to remote client: %w", err)
}
return nil
}
@ -789,7 +789,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
defer bufferedwriter.Put(bw)
WriteLabelsResponse(bw, labels)
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("cannot send labels response to remote client: %w", err)
}
return nil
}
@ -838,7 +838,7 @@ func labelsWithMatches(matches []string, etf []storage.TagFilter, start, end int
return nil
})
if err != nil {
return nil, fmt.Errorf("error when data fetching: %w", err)
return nil, fmt.Errorf("cannot fetch labels from storage: %w", err)
}
}
labels := make([]string, 0, len(m))
@ -865,7 +865,7 @@ func SeriesCountHandler(startTime time.Time, w http.ResponseWriter, r *http.Requ
defer bufferedwriter.Put(bw)
WriteSeriesCountResponse(bw, n)
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("cannot send series count response to remote client: %w", err)
}
return nil
}
@ -957,11 +957,11 @@ func SeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
// WriteSeriesResponse must consume all the data from resultsCh.
WriteSeriesResponse(bw, resultsCh)
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("cannot flush series response to remote client: %w", err)
}
err = <-doneCh
if err != nil {
return fmt.Errorf("error during data fetching: %w", err)
return fmt.Errorf("cannot send series response to remote client: %w", err)
}
return nil
}
@ -1075,7 +1075,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
defer bufferedwriter.Put(bw)
WriteQueryResponse(bw, result)
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("cannot flush query response to remote client: %w", err)
}
return nil
}
@ -1168,7 +1168,7 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string,
defer bufferedwriter.Put(bw)
WriteQueryRangeResponse(bw, result)
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("cannot send query range response to remote client: %w", err)
}
return nil
}
@ -1348,7 +1348,7 @@ func QueryStatsHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
defer bufferedwriter.Put(bw)
querystats.WriteJSONQueryStats(bw, topN, maxLifetime)
if err := bw.Flush(); err != nil {
return err
return fmt.Errorf("cannot send query stats response to client: %w", err)
}
return nil
}

View file

@ -45,6 +45,8 @@ var aggrFuncs = map[string]aggrFunc{
"bottomk_avg": newAggrFuncRangeTopK(avgValue, true),
"bottomk_median": newAggrFuncRangeTopK(medianValue, true),
"any": aggrFuncAny,
"mad": newAggrFunc(aggrFuncMAD),
"outliers_mad": aggrFuncOutliersMAD,
"outliersk": aggrFuncOutliersK,
"mode": newAggrFunc(aggrFuncMode),
"zscore": aggrFuncZScore,
@ -485,7 +487,6 @@ func aggrFuncZScore(afa *aggrFuncArg) ([]*timeseries, error) {
ts.Values[i] = (v - avg) / stddev
}
}
// Remove MetricGroup from all the tss.
for _, ts := range tss {
ts.MetricName.ResetMetricGroup()
@ -811,6 +812,52 @@ func medianValue(values []float64) float64 {
return value
}
func aggrFuncMAD(tss []*timeseries) []*timeseries {
// Calculate medians for each point across tss.
medians := getPerPointMedians(tss)
// Calculate MAD values multipled by tolerance for each point across tss.
// See https://en.wikipedia.org/wiki/Median_absolute_deviation
mads := getPerPointMADs(tss, medians)
tss[0].Values = append(tss[0].Values[:0], mads...)
return tss[:1]
}
func aggrFuncOutliersMAD(afa *aggrFuncArg) ([]*timeseries, error) {
args := afa.args
if err := expectTransformArgsNum(args, 2); err != nil {
return nil, err
}
tolerances, err := getScalar(args[0], 0)
if err != nil {
return nil, err
}
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
// Calculate medians for each point across tss.
medians := getPerPointMedians(tss)
// Calculate MAD values multipled by tolerance for each point across tss.
// See https://en.wikipedia.org/wiki/Median_absolute_deviation
mads := getPerPointMADs(tss, medians)
for n := range mads {
mads[n] *= tolerances[n]
}
// Leave only time series with at least a single peak above the MAD multiplied by tolerance.
tssDst := tss[:0]
for _, ts := range tss {
values := ts.Values
for n, v := range values {
ad := math.Abs(v - medians[n])
mad := mads[n]
if ad > mad {
tssDst = append(tssDst, ts)
break
}
}
}
return tssDst
}
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true)
}
func aggrFuncOutliersK(afa *aggrFuncArg) ([]*timeseries, error) {
args := afa.args
if err := expectTransformArgsNum(args, 2); err != nil {
@ -822,20 +869,7 @@ func aggrFuncOutliersK(afa *aggrFuncArg) ([]*timeseries, error) {
}
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
// Calculate medians for each point across tss.
medians := make([]float64, len(ks))
h := histogram.GetFast()
for n := range ks {
h.Reset()
for j := range tss {
v := tss[j].Values[n]
if !math.IsNaN(v) {
h.Update(v)
}
}
medians[n] = h.Quantile(0.5)
}
histogram.PutFast(h)
medians := getPerPointMedians(tss)
// Return topK time series with the highest variance from median.
f := func(values []float64) float64 {
sum2 := float64(0)
@ -850,6 +884,44 @@ func aggrFuncOutliersK(afa *aggrFuncArg) ([]*timeseries, error) {
return aggrFuncExt(afe, args[1], &afa.ae.Modifier, afa.ae.Limit, true)
}
func getPerPointMedians(tss []*timeseries) []float64 {
if len(tss) == 0 {
logger.Panicf("BUG: expecting non-empty tss")
}
medians := make([]float64, len(tss[0].Values))
h := histogram.GetFast()
for n := range medians {
h.Reset()
for j := range tss {
v := tss[j].Values[n]
if !math.IsNaN(v) {
h.Update(v)
}
}
medians[n] = h.Quantile(0.5)
}
histogram.PutFast(h)
return medians
}
func getPerPointMADs(tss []*timeseries, medians []float64) []float64 {
mads := make([]float64, len(medians))
h := histogram.GetFast()
for n, median := range medians {
h.Reset()
for j := range tss {
v := tss[j].Values[n]
if !math.IsNaN(v) {
ad := math.Abs(v - median)
h.Update(ad)
}
}
mads[n] = h.Quantile(0.5)
}
histogram.PutFast(h)
return mads
}
func aggrFuncLimitK(afa *aggrFuncArg) ([]*timeseries, error) {
args := afa.args
if err := expectTransformArgsNum(args, 2); err != nil {
@ -894,30 +966,46 @@ func aggrFuncQuantiles(afa *aggrFuncArg) ([]*timeseries, error) {
return nil, fmt.Errorf("cannot obtain dstLabel: %w", err)
}
phiArgs := args[1 : len(args)-1]
argOrig := args[len(args)-1]
var rvs []*timeseries
phis := make([]float64, len(phiArgs))
for i, phiArg := range phiArgs {
phis, err := getScalar(phiArg, i+1)
phisLocal, err := getScalar(phiArg, i+1)
if err != nil {
return nil, err
}
if len(phis) == 0 {
logger.Panicf("BUG: expecting at least a single sample")
}
phi := phis[0]
afe := newAggrQuantileFunc(phis)
arg := copyTimeseries(argOrig)
tss, err := aggrFuncExt(afe, arg, &afa.ae.Modifier, afa.ae.Limit, false)
if err != nil {
return nil, fmt.Errorf("cannot calculate quantile %g: %w", phi, err)
}
for _, ts := range tss {
ts.MetricName.RemoveTag(dstLabel)
ts.MetricName.AddTag(dstLabel, fmt.Sprintf("%g", phi))
}
rvs = append(rvs, tss...)
phis[i] = phisLocal[0]
}
return rvs, nil
argOrig := args[len(args)-1]
afe := func(tss []*timeseries, modifier *metricsql.ModifierExpr) []*timeseries {
tssDst := make([]*timeseries, len(phiArgs))
for j := range tssDst {
ts := &timeseries{}
ts.CopyFromShallowTimestamps(tss[0])
ts.MetricName.RemoveTag(dstLabel)
ts.MetricName.AddTag(dstLabel, fmt.Sprintf("%g", phis[j]))
tssDst[j] = ts
}
h := histogram.GetFast()
defer histogram.PutFast(h)
var qs []float64
for n := range tss[0].Values {
h.Reset()
for j := range tss {
v := tss[j].Values[n]
if !math.IsNaN(v) {
h.Update(v)
}
}
qs = h.Quantiles(qs[:0], phis)
for j := range tssDst {
tssDst[j].Values[n] = qs[j]
}
}
return tssDst
}
return aggrFuncExt(afe, argOrig, &afa.ae.Modifier, afa.ae.Limit, false)
}
func aggrFuncQuantile(afa *aggrFuncArg) ([]*timeseries, error) {

View file

@ -4,6 +4,7 @@ import (
"flag"
"fmt"
"math"
"strings"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
@ -390,7 +391,7 @@ func tryGetArgRollupFuncWithMetricExpr(ae *metricsql.AggrFuncExpr) (*metricsql.F
if nrf == nil {
return nil, nil
}
rollupArgIdx := getRollupArgIdx(fe.Name)
rollupArgIdx := getRollupArgIdx(fe)
if rollupArgIdx >= len(fe.Args) {
// Incorrect number of args for rollup func.
return nil, nil
@ -430,7 +431,7 @@ func evalExprs(ec *EvalConfig, es []metricsql.Expr) ([][]*timeseries, error) {
func evalRollupFuncArgs(ec *EvalConfig, fe *metricsql.FuncExpr) ([]interface{}, *metricsql.RollupExpr, error) {
var re *metricsql.RollupExpr
rollupArgIdx := getRollupArgIdx(fe.Name)
rollupArgIdx := getRollupArgIdx(fe)
if len(fe.Args) <= rollupArgIdx {
return nil, nil, fmt.Errorf("expecting at least %d args to %q; got %d args; expr: %q", rollupArgIdx+1, fe.Name, len(fe.Args), fe.AppendString(nil))
}
@ -478,7 +479,8 @@ func getRollupExprArg(arg metricsql.Expr) *metricsql.RollupExpr {
return &reNew
}
func evalRollupFunc(ec *EvalConfig, name string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
func evalRollupFunc(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr, iafc *incrementalAggrFuncContext) ([]*timeseries, error) {
funcName = strings.ToLower(funcName)
ecNew := ec
var offset int64
if re.Offset != nil {
@ -491,7 +493,7 @@ func evalRollupFunc(ec *EvalConfig, name string, rf rollupFunc, expr metricsql.E
// so cache hit rate should be quite good.
// See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/976
}
if name == "rollup_candlestick" {
if funcName == "rollup_candlestick" {
// Automatically apply `offset -step` to `rollup_candlestick` function
// in order to obtain expected OHLC results.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/309#issuecomment-582113462
@ -504,12 +506,12 @@ func evalRollupFunc(ec *EvalConfig, name string, rf rollupFunc, expr metricsql.E
var rvs []*timeseries
var err error
if me, ok := re.Expr.(*metricsql.MetricExpr); ok {
rvs, err = evalRollupFuncWithMetricExpr(ecNew, name, rf, expr, me, iafc, re.Window)
rvs, err = evalRollupFuncWithMetricExpr(ecNew, funcName, rf, expr, me, iafc, re.Window)
} else {
if iafc != nil {
logger.Panicf("BUG: iafc must be nil for rollup %q over subquery %q", name, re.AppendString(nil))
logger.Panicf("BUG: iafc must be nil for rollup %q over subquery %q", funcName, re.AppendString(nil))
}
rvs, err = evalRollupFuncWithSubquery(ecNew, name, rf, expr, re)
rvs, err = evalRollupFuncWithSubquery(ecNew, funcName, rf, expr, re)
}
if err != nil {
return nil, err
@ -528,7 +530,7 @@ func evalRollupFunc(ec *EvalConfig, name string, rf rollupFunc, expr metricsql.E
return rvs, nil
}
func evalRollupFuncWithSubquery(ec *EvalConfig, name string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr) ([]*timeseries, error) {
func evalRollupFuncWithSubquery(ec *EvalConfig, funcName string, rf rollupFunc, expr metricsql.Expr, re *metricsql.RollupExpr) ([]*timeseries, error) {
// TODO: determine whether to use rollupResultCacheV here.
step := re.Step.Duration(ec.Step)
if step == 0 {
@ -550,25 +552,24 @@ func evalRollupFuncWithSubquery(ec *EvalConfig, name string, rf rollupFunc, expr
return nil, err
}
if len(tssSQ) == 0 {
if name == "absent_over_time" {
if funcName == "absent_over_time" {
tss := evalNumber(ec, 1)
return tss, nil
}
return nil, nil
}
sharedTimestamps := getTimestamps(ec.Start, ec.End, ec.Step)
preFunc, rcs, err := getRollupConfigs(name, rf, expr, ec.Start, ec.End, ec.Step, window, ec.LookbackDelta, sharedTimestamps)
preFunc, rcs, err := getRollupConfigs(funcName, rf, expr, ec.Start, ec.End, ec.Step, window, ec.LookbackDelta, sharedTimestamps)
if err != nil {
return nil, err
}
tss := make([]*timeseries, 0, len(tssSQ)*len(rcs))
var tssLock sync.Mutex
removeMetricGroup := !rollupFuncsKeepMetricGroup[name]
doParallel(tssSQ, func(tsSQ *timeseries, values []float64, timestamps []int64) ([]float64, []int64) {
values, timestamps = removeNanValues(values[:0], timestamps[:0], tsSQ.Values, tsSQ.Timestamps)
preFunc(values, timestamps)
for _, rc := range rcs {
if tsm := newTimeseriesMap(name, sharedTimestamps, &tsSQ.MetricName); tsm != nil {
if tsm := newTimeseriesMap(funcName, sharedTimestamps, &tsSQ.MetricName); tsm != nil {
rc.DoTimeseriesMap(tsm, values, timestamps)
tssLock.Lock()
tss = tsm.AppendTimeseriesTo(tss)
@ -576,7 +577,7 @@ func evalRollupFuncWithSubquery(ec *EvalConfig, name string, rf rollupFunc, expr
continue
}
var ts timeseries
doRollupForTimeseries(rc, &ts, &tsSQ.MetricName, values, timestamps, sharedTimestamps, removeMetricGroup)
doRollupForTimeseries(funcName, rc, &ts, &tsSQ.MetricName, values, timestamps, sharedTimestamps)
tssLock.Lock()
tss = append(tss, &ts)
tssLock.Unlock()
@ -642,7 +643,7 @@ var (
rollupResultCacheMiss = metrics.NewCounter(`vm_rollup_result_cache_miss_total`)
)
func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc,
func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc,
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr) ([]*timeseries, error) {
if me.IsEmpty() {
return evalNumber(ec, nan), nil
@ -665,7 +666,7 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc,
// Obtain rollup configs before fetching data from db,
// so type errors can be caught earlier.
sharedTimestamps := getTimestamps(start, ec.End, ec.Step)
preFunc, rcs, err := getRollupConfigs(name, rf, expr, start, ec.End, ec.Step, window, ec.LookbackDelta, sharedTimestamps)
preFunc, rcs, err := getRollupConfigs(funcName, rf, expr, start, ec.End, ec.Step, window, ec.LookbackDelta, sharedTimestamps)
if err != nil {
return nil, err
}
@ -689,7 +690,7 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc,
if rssLen == 0 {
rss.Cancel()
var tss []*timeseries
if name == "absent_over_time" {
if funcName == "absent_over_time" {
tss = getAbsentTimeseries(ec, me)
}
// Add missing points until ec.End.
@ -735,12 +736,11 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, name string, rf rollupFunc,
defer rml.Put(uint64(rollupMemorySize))
// Evaluate rollup
removeMetricGroup := !rollupFuncsKeepMetricGroup[name]
var tss []*timeseries
if iafc != nil {
tss, err = evalRollupWithIncrementalAggregate(name, iafc, rss, rcs, preFunc, sharedTimestamps, removeMetricGroup)
tss, err = evalRollupWithIncrementalAggregate(funcName, iafc, rss, rcs, preFunc, sharedTimestamps)
} else {
tss, err = evalRollupNoIncrementalAggregate(name, rss, rcs, preFunc, sharedTimestamps, removeMetricGroup)
tss, err = evalRollupNoIncrementalAggregate(funcName, rss, rcs, preFunc, sharedTimestamps)
}
if err != nil {
return nil, err
@ -762,15 +762,15 @@ func getRollupMemoryLimiter() *memoryLimiter {
return &rollupMemoryLimiter
}
func evalRollupWithIncrementalAggregate(name string, iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64, removeMetricGroup bool) ([]*timeseries, error) {
func evalRollupWithIncrementalAggregate(funcName string, iafc *incrementalAggrFuncContext, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
rs.Values, rs.Timestamps = dropStaleNaNs(name, rs.Values, rs.Timestamps)
rs.Values, rs.Timestamps = dropStaleNaNs(funcName, rs.Values, rs.Timestamps)
preFunc(rs.Values, rs.Timestamps)
ts := getTimeseries()
defer putTimeseries(ts)
for _, rc := range rcs {
if tsm := newTimeseriesMap(name, sharedTimestamps, &rs.MetricName); tsm != nil {
if tsm := newTimeseriesMap(funcName, sharedTimestamps, &rs.MetricName); tsm != nil {
rc.DoTimeseriesMap(tsm, rs.Values, rs.Timestamps)
for _, ts := range tsm.m {
iafc.updateTimeseries(ts, workerID)
@ -778,7 +778,7 @@ func evalRollupWithIncrementalAggregate(name string, iafc *incrementalAggrFuncCo
continue
}
ts.Reset()
doRollupForTimeseries(rc, ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps, removeMetricGroup)
doRollupForTimeseries(funcName, rc, ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps)
iafc.updateTimeseries(ts, workerID)
// ts.Timestamps points to sharedTimestamps. Zero it, so it can be re-used.
@ -794,15 +794,15 @@ func evalRollupWithIncrementalAggregate(name string, iafc *incrementalAggrFuncCo
return tss, nil
}
func evalRollupNoIncrementalAggregate(name string, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64, removeMetricGroup bool) ([]*timeseries, error) {
func evalRollupNoIncrementalAggregate(funcName string, rss *netstorage.Results, rcs []*rollupConfig,
preFunc func(values []float64, timestamps []int64), sharedTimestamps []int64) ([]*timeseries, error) {
tss := make([]*timeseries, 0, rss.Len()*len(rcs))
var tssLock sync.Mutex
err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
rs.Values, rs.Timestamps = dropStaleNaNs(name, rs.Values, rs.Timestamps)
rs.Values, rs.Timestamps = dropStaleNaNs(funcName, rs.Values, rs.Timestamps)
preFunc(rs.Values, rs.Timestamps)
for _, rc := range rcs {
if tsm := newTimeseriesMap(name, sharedTimestamps, &rs.MetricName); tsm != nil {
if tsm := newTimeseriesMap(funcName, sharedTimestamps, &rs.MetricName); tsm != nil {
rc.DoTimeseriesMap(tsm, rs.Values, rs.Timestamps)
tssLock.Lock()
tss = tsm.AppendTimeseriesTo(tss)
@ -810,7 +810,7 @@ func evalRollupNoIncrementalAggregate(name string, rss *netstorage.Results, rcs
continue
}
var ts timeseries
doRollupForTimeseries(rc, &ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps, removeMetricGroup)
doRollupForTimeseries(funcName, rc, &ts, &rs.MetricName, rs.Values, rs.Timestamps, sharedTimestamps)
tssLock.Lock()
tss = append(tss, &ts)
tssLock.Unlock()
@ -823,13 +823,13 @@ func evalRollupNoIncrementalAggregate(name string, rss *netstorage.Results, rcs
return tss, nil
}
func doRollupForTimeseries(rc *rollupConfig, tsDst *timeseries, mnSrc *storage.MetricName, valuesSrc []float64, timestampsSrc []int64,
sharedTimestamps []int64, removeMetricGroup bool) {
func doRollupForTimeseries(funcName string, rc *rollupConfig, tsDst *timeseries, mnSrc *storage.MetricName, valuesSrc []float64, timestampsSrc []int64,
sharedTimestamps []int64) {
tsDst.MetricName.CopyFrom(mnSrc)
if len(rc.TagValue) > 0 {
tsDst.MetricName.AddTag("rollup", rc.TagValue)
}
if removeMetricGroup {
if !rollupFuncsKeepMetricGroup[funcName] {
tsDst.MetricName.ResetMetricGroup()
}
tsDst.Values = rc.Do(tsDst.Values[:0], valuesSrc, timestampsSrc)
@ -896,8 +896,8 @@ func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
dst.IsNegative = src.IsNegative
}
func dropStaleNaNs(name string, values []float64, timestamps []int64) ([]float64, []int64) {
if *noStaleMarkers || name == "default_rollup" {
func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]float64, []int64) {
if *noStaleMarkers || funcName == "default_rollup" {
// Do not drop Prometheus staleness marks (aka stale NaNs) for default_rollup() function,
// since it uses them for Prometheus-style staleness detection.
return values, timestamps

View file

@ -3428,6 +3428,46 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`histogram_quantiles()`, func(t *testing.T) {
t.Parallel()
q := `sort_by_label(histogram_quantiles("phi", 0.2, 0.3,
label_set(0, "foo", "bar", "le", "10")
or label_set(100, "foo", "bar", "le", "30")
or label_set(300, "foo", "bar", "le", "+Inf")
), "phi")`
r1 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{22, 22, 22, 22, 22, 22},
Timestamps: timestampsExpected,
}
r1.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("phi"),
Value: []byte("0.2"),
},
}
r2 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{28, 28, 28, 28, 28, 28},
Timestamps: timestampsExpected,
}
r2.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("phi"),
Value: []byte("0.3"),
},
}
resultExpected := []netstorage.Result{r1, r2}
f(q, resultExpected)
})
t.Run(`histogram_share(normal-bucket-count)`, func(t *testing.T) {
t.Parallel()
q := `histogram_share(35,
@ -4575,6 +4615,67 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`quantile_over_time`, func(t *testing.T) {
t.Parallel()
q := `quantile_over_time(0.9, label_set(round(rand(0), 0.01), "__name__", "foo", "xx", "yy")[200s:5s])`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.89, 0.89, 0.95, 0.87, 0.92, 0.89},
Timestamps: timestampsExpected,
}
r.MetricName.MetricGroup = []byte("foo")
r.MetricName.Tags = []storage.Tag{
{
Key: []byte("xx"),
Value: []byte("yy"),
},
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`quantiles_over_time`, func(t *testing.T) {
t.Parallel()
q := `sort_by_label(
quantiles_over_time("phi", 0.5, 0.9,
label_set(round(rand(0), 0.01), "__name__", "foo", "xx", "yy")[200s:5s]
),
"phi",
)`
r1 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.47, 0.57, 0.49, 0.54, 0.56, 0.53},
Timestamps: timestampsExpected,
}
r1.MetricName.MetricGroup = []byte("foo")
r1.MetricName.Tags = []storage.Tag{
{
Key: []byte("phi"),
Value: []byte("0.5"),
},
{
Key: []byte("xx"),
Value: []byte("yy"),
},
}
r2 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{0.89, 0.89, 0.95, 0.87, 0.92, 0.89},
Timestamps: timestampsExpected,
}
r2.MetricName.MetricGroup = []byte("foo")
r2.MetricName.Tags = []storage.Tag{
{
Key: []byte("phi"),
Value: []byte("0.9"),
},
{
Key: []byte("xx"),
Value: []byte("yy"),
},
}
resultExpected := []netstorage.Result{r1, r2}
f(q, resultExpected)
})
t.Run(`histogram_over_time`, func(t *testing.T) {
t.Parallel()
q := `sort_by_label(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s]), "vmrange")`
@ -5524,6 +5625,47 @@ func TestExecSuccess(t *testing.T) {
resultExpected := []netstorage.Result{}
f(q, resultExpected)
})
t.Run(`mad()`, func(t *testing.T) {
t.Parallel()
q := `mad(
alias(time(), "metric1"),
alias(time()*1.5, "metric2"),
label_set(time()*0.9, "baz", "sss"),
)`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{100, 120, 140, 160, 180, 200},
Timestamps: timestampsExpected,
}
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`outliers_mad(1)`, func(t *testing.T) {
t.Parallel()
q := `outliers_mad(1, (
alias(time(), "metric1"),
alias(time()*1.5, "metric2"),
label_set(time()*0.9, "baz", "sss"),
))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{1500, 1800, 2100, 2400, 2700, 3000},
Timestamps: timestampsExpected,
}
r.MetricName.MetricGroup = []byte("metric2")
resultExpected := []netstorage.Result{r}
f(q, resultExpected)
})
t.Run(`outliers_mad(5)`, func(t *testing.T) {
t.Parallel()
q := `outliers_mad(5, (
alias(time(), "metric1"),
alias(time()*1.5, "metric2"),
label_set(time()*0.9, "baz", "sss"),
))`
resultExpected := []netstorage.Result{}
f(q, resultExpected)
})
t.Run(`outliersk(0)`, func(t *testing.T) {
t.Parallel()
q := `outliersk(0, (
@ -6939,6 +7081,7 @@ func TestExecError(t *testing.T) {
f(`timestamp()`)
f(`vector()`)
f(`histogram_quantile()`)
f(`histogram_quantiles()`)
f(`sum()`)
f(`count_values()`)
f(`quantile()`)
@ -7000,6 +7143,9 @@ func TestExecError(t *testing.T) {
f(`hoeffding_bound_upper()`)
f(`hoeffding_bound_upper(1)`)
f(`hoeffding_bound_upper(0.99, foo, 1)`)
f(`mad()`)
f(`outliers_mad()`)
f(`outliers_mad(1)`)
f(`outliersk()`)
f(`outliersk(1)`)
f(`mode_over_time()`)

View file

@ -83,6 +83,7 @@ var rollupFuncs = map[string]newRollupFunc{
"ascent_over_time": newRollupFuncOneArg(rollupAscentOverTime),
"descent_over_time": newRollupFuncOneArg(rollupDescentOverTime),
"zscore_over_time": newRollupFuncOneArg(rollupZScoreOverTime),
"quantiles_over_time": newRollupQuantiles,
// `timestamp` function must return timestamp for the last datapoint on the current window
// in order to properly handle offset and timestamps unaligned to the current step.
@ -156,6 +157,7 @@ var rollupFuncsCannotAdjustWindow = map[string]bool{
"sum_over_time": true,
"count_over_time": true,
"quantile_over_time": true,
"quantiles_over_time": true,
"stddev_over_time": true,
"stdvar_over_time": true,
"absent_over_time": true,
@ -191,6 +193,7 @@ var rollupFuncsKeepMetricGroup = map[string]bool{
"min_over_time": true,
"max_over_time": true,
"quantile_over_time": true,
"quantiles_over_time": true,
"rollup": true,
"geomean_over_time": true,
"hoeffding_bound_lower": true,
@ -250,15 +253,17 @@ func getRollupAggrFuncNames(expr metricsql.Expr) ([]string, error) {
return aggrFuncNames, nil
}
func getRollupArgIdx(funcName string) int {
funcName = strings.ToLower(funcName)
func getRollupArgIdx(fe *metricsql.FuncExpr) int {
funcName := strings.ToLower(fe.Name)
if rollupFuncs[funcName] == nil {
logger.Panicf("BUG: getRollupArgIdx is called for non-rollup func %q", funcName)
logger.Panicf("BUG: getRollupArgIdx is called for non-rollup func %q", fe.Name)
}
switch funcName {
case "quantile_over_time", "aggr_over_time",
"hoeffding_bound_lower", "hoeffding_bound_upper":
return 1
case "quantiles_over_time":
return len(fe.Args) - 1
default:
return 0
}
@ -423,14 +428,16 @@ var (
const maxSilenceInterval = 5 * 60 * 1000
type timeseriesMap struct {
origin *timeseries
labelName string
h metrics.Histogram
m map[string]*timeseries
origin *timeseries
h metrics.Histogram
m map[string]*timeseries
}
func newTimeseriesMap(funcName string, sharedTimestamps []int64, mnSrc *storage.MetricName) *timeseriesMap {
if strings.ToLower(funcName) != "histogram_over_time" {
funcName = strings.ToLower(funcName)
switch funcName {
case "histogram_over_time", "quantiles_over_time":
default:
return nil
}
@ -440,13 +447,14 @@ func newTimeseriesMap(funcName string, sharedTimestamps []int64, mnSrc *storage.
}
var origin timeseries
origin.MetricName.CopyFrom(mnSrc)
origin.MetricName.ResetMetricGroup()
if !rollupFuncsKeepMetricGroup[funcName] {
origin.MetricName.ResetMetricGroup()
}
origin.Timestamps = sharedTimestamps
origin.Values = values
return &timeseriesMap{
origin: &origin,
labelName: "vmrange",
m: make(map[string]*timeseries),
origin: &origin,
m: make(map[string]*timeseries),
}
}
@ -457,15 +465,15 @@ func (tsm *timeseriesMap) AppendTimeseriesTo(dst []*timeseries) []*timeseries {
return dst
}
func (tsm *timeseriesMap) GetOrCreateTimeseries(labelValue string) *timeseries {
func (tsm *timeseriesMap) GetOrCreateTimeseries(labelName, labelValue string) *timeseries {
ts := tsm.m[labelValue]
if ts != nil {
return ts
}
ts = &timeseries{}
ts.CopyFromShallowTimestamps(tsm.origin)
ts.MetricName.RemoveTag(tsm.labelName)
ts.MetricName.AddTag(tsm.labelName, labelValue)
ts.MetricName.RemoveTag(labelName)
ts.MetricName.AddTag(labelName, labelValue)
tsm.m[labelValue] = ts
return ts
}
@ -1024,6 +1032,57 @@ func rollupHoeffdingBoundInternal(rfa *rollupFuncArg, phis []float64) (float64,
return bound, vAvg
}
func newRollupQuantiles(args []interface{}) (rollupFunc, error) {
if len(args) < 3 {
return nil, fmt.Errorf("unexpected number of args: %d; want at least 3 args", len(args))
}
tssPhi, ok := args[0].([]*timeseries)
if !ok {
return nil, fmt.Errorf("unexpected type for phi arg: %T; want string", args[0])
}
phiLabel, err := getString(tssPhi, 0)
if err != nil {
return nil, err
}
phiArgs := args[1 : len(args)-1]
phis := make([]float64, len(phiArgs))
phiStrs := make([]string, len(phiArgs))
for i, phiArg := range phiArgs {
phiValues, err := getScalar(phiArg, i+1)
if err != nil {
return nil, fmt.Errorf("cannot obtain phi from arg #%d: %w", i+1, err)
}
phis[i] = phiValues[0]
phiStrs[i] = fmt.Sprintf("%g", phiValues[0])
}
rf := func(rfa *rollupFuncArg) float64 {
// There is no need in handling NaNs here, since they must be cleaned up
// before calling rollup funcs.
values := rfa.values
if len(values) == 0 {
return rfa.prevValue
}
if len(values) == 1 {
// Fast path - only a single value.
return values[0]
}
hf := histogram.GetFast()
for _, v := range values {
hf.Update(v)
}
qs := hf.Quantiles(nil, phis)
histogram.PutFast(hf)
idx := rfa.idx
tsm := rfa.tsm
for i, phiStr := range phiStrs {
ts := tsm.GetOrCreateTimeseries(phiLabel, phiStr)
ts.Values[idx] = qs[i]
}
return nan
}
return rf, nil
}
func newRollupQuantile(args []interface{}) (rollupFunc, error) {
if err := expectRollupArgsNum(args, 2); err != nil {
return nil, err
@ -1064,7 +1123,7 @@ func rollupHistogram(rfa *rollupFuncArg) float64 {
}
idx := rfa.idx
tsm.h.VisitNonZeroBuckets(func(vmrange string, count uint64) {
ts := tsm.GetOrCreateTimeseries(vmrange)
ts := tsm.GetOrCreateTimeseries("vmrange", vmrange)
ts.Values[idx] = float64(count)
})
return nan

View file

@ -508,6 +508,7 @@ func TestRollupNewRollupFuncError(t *testing.T) {
f("holt_winters", nil)
f("predict_linear", nil)
f("quantile_over_time", nil)
f("quantiles_over_time", nil)
// Invalid arg type
scalarTs := []*timeseries{{
@ -521,6 +522,7 @@ func TestRollupNewRollupFuncError(t *testing.T) {
f("predict_linear", []interface{}{123, 123})
f("predict_linear", []interface{}{me, 123})
f("quantile_over_time", []interface{}{123, 123})
f("quantiles_over_time", []interface{}{123, 123})
}
func TestRollupNoWindowNoPoints(t *testing.T) {

View file

@ -74,59 +74,60 @@ var transformFuncs = map[string]transformFunc{
"year": newTransformFuncDateTime(transformYear),
// New funcs
"label_set": transformLabelSet,
"label_map": transformLabelMap,
"label_uppercase": transformLabelUppercase,
"label_lowercase": transformLabelLowercase,
"label_del": transformLabelDel,
"label_keep": transformLabelKeep,
"label_copy": transformLabelCopy,
"label_move": transformLabelMove,
"label_transform": transformLabelTransform,
"label_value": transformLabelValue,
"label_match": transformLabelMatch,
"label_mismatch": transformLabelMismatch,
"union": transformUnion,
"": transformUnion, // empty func is a synonym to union
"keep_last_value": transformKeepLastValue,
"keep_next_value": transformKeepNextValue,
"interpolate": transformInterpolate,
"start": newTransformFuncZeroArgs(transformStart),
"end": newTransformFuncZeroArgs(transformEnd),
"step": newTransformFuncZeroArgs(transformStep),
"running_sum": newTransformFuncRunning(runningSum),
"running_max": newTransformFuncRunning(runningMax),
"running_min": newTransformFuncRunning(runningMin),
"running_avg": newTransformFuncRunning(runningAvg),
"range_sum": newTransformFuncRange(runningSum),
"range_max": newTransformFuncRange(runningMax),
"range_min": newTransformFuncRange(runningMin),
"range_avg": newTransformFuncRange(runningAvg),
"range_first": transformRangeFirst,
"range_last": transformRangeLast,
"range_quantile": transformRangeQuantile,
"smooth_exponential": transformSmoothExponential,
"remove_resets": transformRemoveResets,
"rand": newTransformRand(newRandFloat64),
"rand_normal": newTransformRand(newRandNormFloat64),
"rand_exponential": newTransformRand(newRandExpFloat64),
"pi": transformPi,
"sin": newTransformFuncOneArg(transformSin),
"cos": newTransformFuncOneArg(transformCos),
"asin": newTransformFuncOneArg(transformAsin),
"acos": newTransformFuncOneArg(transformAcos),
"prometheus_buckets": transformPrometheusBuckets,
"buckets_limit": transformBucketsLimit,
"histogram_share": transformHistogramShare,
"histogram_avg": transformHistogramAvg,
"histogram_stdvar": transformHistogramStdvar,
"histogram_stddev": transformHistogramStddev,
"sort_by_label": newTransformFuncSortByLabel(false),
"sort_by_label_desc": newTransformFuncSortByLabel(true),
"timezone_offset": transformTimezoneOffset,
"bitmap_and": newTransformBitmap(bitmapAnd),
"bitmap_or": newTransformBitmap(bitmapOr),
"bitmap_xor": newTransformBitmap(bitmapXor),
"label_set": transformLabelSet,
"label_map": transformLabelMap,
"label_uppercase": transformLabelUppercase,
"label_lowercase": transformLabelLowercase,
"label_del": transformLabelDel,
"label_keep": transformLabelKeep,
"label_copy": transformLabelCopy,
"label_move": transformLabelMove,
"label_transform": transformLabelTransform,
"label_value": transformLabelValue,
"label_match": transformLabelMatch,
"label_mismatch": transformLabelMismatch,
"union": transformUnion,
"": transformUnion, // empty func is a synonym to union
"keep_last_value": transformKeepLastValue,
"keep_next_value": transformKeepNextValue,
"interpolate": transformInterpolate,
"start": newTransformFuncZeroArgs(transformStart),
"end": newTransformFuncZeroArgs(transformEnd),
"step": newTransformFuncZeroArgs(transformStep),
"running_sum": newTransformFuncRunning(runningSum),
"running_max": newTransformFuncRunning(runningMax),
"running_min": newTransformFuncRunning(runningMin),
"running_avg": newTransformFuncRunning(runningAvg),
"range_sum": newTransformFuncRange(runningSum),
"range_max": newTransformFuncRange(runningMax),
"range_min": newTransformFuncRange(runningMin),
"range_avg": newTransformFuncRange(runningAvg),
"range_first": transformRangeFirst,
"range_last": transformRangeLast,
"range_quantile": transformRangeQuantile,
"smooth_exponential": transformSmoothExponential,
"remove_resets": transformRemoveResets,
"rand": newTransformRand(newRandFloat64),
"rand_normal": newTransformRand(newRandNormFloat64),
"rand_exponential": newTransformRand(newRandExpFloat64),
"pi": transformPi,
"sin": newTransformFuncOneArg(transformSin),
"cos": newTransformFuncOneArg(transformCos),
"asin": newTransformFuncOneArg(transformAsin),
"acos": newTransformFuncOneArg(transformAcos),
"prometheus_buckets": transformPrometheusBuckets,
"buckets_limit": transformBucketsLimit,
"histogram_share": transformHistogramShare,
"histogram_avg": transformHistogramAvg,
"histogram_stdvar": transformHistogramStdvar,
"histogram_stddev": transformHistogramStddev,
"sort_by_label": newTransformFuncSortByLabel(false),
"sort_by_label_desc": newTransformFuncSortByLabel(true),
"timezone_offset": transformTimezoneOffset,
"bitmap_and": newTransformBitmap(bitmapAnd),
"bitmap_or": newTransformBitmap(bitmapOr),
"bitmap_xor": newTransformBitmap(bitmapXor),
"histogram_quantiles": transformHistogramQuantiles,
}
func getTransformFunc(s string) transformFunc {
@ -789,6 +790,43 @@ func stdvarForLeTimeseries(i int, xss []leTimeseries) float64 {
return stdvar
}
func transformHistogramQuantiles(tfa *transformFuncArg) ([]*timeseries, error) {
args := tfa.args
if len(args) < 3 {
return nil, fmt.Errorf("unexpected number of args: %d; expecting at least 3 args", len(args))
}
dstLabel, err := getString(args[0], 0)
if err != nil {
return nil, fmt.Errorf("cannot obtain dstLabel: %w", err)
}
phiArgs := args[1 : len(args)-1]
tssOrig := args[len(args)-1]
// Calculate quantile individually per each phi.
var rvs []*timeseries
for _, phiArg := range phiArgs {
phiStr := fmt.Sprintf("%g", phiArg[0].Values[0])
tss := copyTimeseries(tssOrig)
tfaTmp := &transformFuncArg{
ec: tfa.ec,
fe: tfa.fe,
args: [][]*timeseries{
phiArg,
tss,
},
}
tssTmp, err := transformHistogramQuantile(tfaTmp)
if err != nil {
return nil, fmt.Errorf("cannot calculate quantile %s: %w", phiStr, err)
}
for _, ts := range tssTmp {
ts.MetricName.RemoveTag(dstLabel)
ts.MetricName.AddTag(dstLabel, phiStr)
}
rvs = append(rvs, tssTmp...)
}
return rvs, nil
}
func transformHistogramQuantile(tfa *transformFuncArg) ([]*timeseries, error) {
args := tfa.args
if len(args) < 2 || len(args) > 3 {

View file

@ -8,6 +8,7 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
## Third-party articles and slides about VictoriaMetrics
* [Choosing a Time Series Database for High Cardinality Aggregations](https://abiosgaming.com/press/high-cardinality-aggregations/)
* [Scaling to trillions of metric data points](https://engineering.razorpay.com/scaling-to-trillions-of-metric-data-points-f569a5b654f2)
* [VictoriaMetrics vs. OpenTSDB](https://blg.robot-house.us/posts/tsdbs-grow/)
* [Monitoring of multiple OpenShift clusters with VictoriaMetrics](https://medium.com/ibm-garage/monitoring-of-multiple-openshift-clusters-with-victoriametrics-d4f0979e2544)
@ -45,12 +46,12 @@ See also [case studies](https://docs.victoriametrics.com/CaseStudies.html).
* [Monitoring as Code на базе VictoriaMetrics и Grafana](https://habr.com/ru/post/568090/)
* [Push Prometheus metrics to VictoriaMetrics or other exporters](https://pythonawesome.com/push-prometheus-metrics-to-victoriametrics-or-other-exporters/)
* [Install and configure VictoriaMetrics on Debian](https://www.vultr.com/docs/install-and-configure-victoriametrics-on-debian)
* [Choosing a Time Series Database for High Cardinality Aggregations](https://abiosgaming.com/press/high-cardinality-aggregations/)
## Our articles
### Announcements
* [Open-source strategy at VictoriaMetrics](https://www.youtube.com/watch?v=-DbbIZzFHIY)
* [Open-sourcing VictoriaMetrics](https://blog.usejournal.com/open-sourcing-victoriametrics-f31e34485c2b)
* [VictoriaMetrics — creating the best remote storage for Prometheus](https://faun.pub/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac)
* [Anomaly Detection in VictoriaMetrics](https://victoriametrics.medium.com/anomaly-detection-in-victoriametrics-9528538786a7)

View file

@ -24,6 +24,11 @@ sort: 15
* FEATURE: vmui: use Prometheus-compatible query args, so `vmui` could be accessed from graph editor in Grafana. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1619). Thanks to @Loori-R.
* FEATURE: vmselect: automatically add missing port to `-storageNode` hostnames. For example, `-storageNode=vmstorage1,vmstorage2` is automatically translated to `-storageNode=vmstorage1:8401,vmstorage2:8401`. This simplifies [manual setup of VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup).
* FEATURE: vminsert: automatically add missing port to `-storageNode` hostnames. For example, `-storageNode=vmstorage1,vmstorage2` is automatically translated to `-storageNode=vmstorage1:8400,vmstorage2:8400`. This simplifies [manual setup of VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-setup).
* FEATURE: add [mad(q)](https://docs.victoriametrics.com/MetricsQL.html#mad) function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It calculates [Median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) for groups of points with identical timestamps across multiple time series.
* FEATURE: add [outliers_mad(tolerance, q)](https://docs.victoriametrics.com/MetricsQL.html#outliers_mad) function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It returns time series with peaks outside the [Median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) multiplied by `tolerance`.
* FEATURE: add `histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It calculates the given `phi*`-quantiles over the given `buckets` and returns time series per each quantile with the corresponding `{phiLabel="phi*"}` label.
* FEATURE: add `quantiles_over_time("phiLabel", phi1, ..., phiN, series_selector[d])` function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html). It calculates the given `phi*`-quantiles over raw samples selected by `series_selector` on the given lookbehind window `d`. It returns time series per each quantile with the corresponding `{phiLabel="phi*"}` label.
* FEATURE: [enterprise](https://victoriametrics.com/enterprise.html): do not ask for `-eula` flag if `-version` flag is passed to enteprise app. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1621).
* BUGFIX: properly handle queries with multiple filters matching empty labels such as `metric{label1=~"foo|",label2="bar|"}`. This filter must match the following series: `metric`, `metric{label1="foo"}`, `metric{label2="bar"}` and `metric{label1="foo",label2="bar"}`. Previously it was matching only `metric{label1="foo",label2="bar"}`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601).
* BUGFIX: vmselect: reset connection timeouts after each request to `vmstorage`. This should prevent from `cannot read data in 0.000 seconds: unexpected EOF` warning in logs. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1562). Thanks to @mxlxm .
@ -32,6 +37,7 @@ sort: 15
* BUGFIX: vmagent: properly use `https` scheme for wildcard TLS certificates for `role: ingress` targets in Kubernetes service discovery. See [this issue](https://github.com/prometheus/prometheus/issues/8902).
* BUGFIX: vmagent: support host networking mode for `docker_sd_config`. See [this issue](https://github.com/prometheus/prometheus/issues/9116).
* BUGFIX: fix non-repeatable results from `quantile_over_time()` function when the number of input samples exceeds 1000. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1612).
* BUGFIX: vmagent: fix EC2 zone discovery when `filters` are specified in [ec2_sc_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1626).
## [v1.65.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.65.0)
@ -47,7 +53,7 @@ sort: 15
* FEATURE: update Go builder from v1.16.7 to v1.17.0. This improves data ingestion and query performance by up to 5% according to benchmarks. See [the release post for Go1.17](https://go.dev/blog/go1.17).
* FEATURE: vmagent: expose `promscrape_discovery_http_errors_total` metric, which can be used for monitoring the number of failed discovery attempts per each `http_sd` config.
* FEATURE: do not reset response cache when a sample with old timestamp is ingested into VictoriaMetrics if `-search.disableAutoCacheReset` command-line option is set. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1570).
* FEATURE: add `quantiles("quantileLabel", phi1, ..., phiN, q)` aggregate function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which calculates the given `phi*` quantiles over time series returned by `q`. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1573).
* FEATURE: add `quantiles("phiLabel", phi1, ..., phiN, q)` aggregate function to [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html), which calculates the given `phi*` quantiles over time series returned by `q`. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1573).
* BUGFIX: rename `sign` function to `sgn` in order to be consistent with PromQL. See [this pull request from Prometheus](https://github.com/prometheus/prometheus/pull/8457).
* BUGFIX: vmagent: add `role: endpointslice` in [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config) in order to be consistent with Prometheus. Previously this role was supported with incorrect name: `role: endpointslices`. Now both `endpointslice` and `endpointslices` are supported. See [the corresponding code in Prometheus](https://github.com/prometheus/prometheus/blob/2ec6c7dbb82b72834021e01f1773eb90a67a371f/discovery/kubernetes/kubernetes.go#L99).

View file

@ -684,7 +684,7 @@ Below is the output for `/path/to/vmselect -help`:
Comma-serparated addresses of vmselect nodes; usage: -selectNode=vmselect-host1,...,vmselect-hostN
Supports an array of values separated by comma or specified via multiple flags.
-storageNode array
Comma-separated dddresses of vmstorage nodes; usage: -storageNode=vmstorage-host1,...,vmstorage-hostN
Comma-separated addresses of vmstorage nodes; usage: -storageNode=vmstorage-host1,...,vmstorage-hostN
Supports an array of values separated by comma or specified via multiple flags.
-tls
Whether to enable TLS (aka HTTPS) for incoming requests. -tlsCertFile and -tlsKeyFile must be set if -tls is set

View file

@ -4,37 +4,37 @@ sort: 14
# FAQ
### What is the main purpose of VictoriaMetrics?
## What is the main purpose of VictoriaMetrics?
To provide the best monitoring solution.
### Who uses VictoriaMetrics?
## Who uses VictoriaMetrics?
See [case studies](https://docs.victoriametrics.com/CaseStudies.html).
### Which features does VictoriaMetrics have?
## Which features does VictoriaMetrics have?
See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prominent-features).
### Are there performance comparisons with other solutions?
## Are there performance comparisons with other solutions?
Yes. See [these benchmarks](https://docs.victoriametrics.com/Articles.html#benchmarks).
### How to start using VictoriaMetrics?
## How to start using VictoriaMetrics?
See [these docs](https://docs.victoriametrics.com/Quick-Start.html).
### Does VictoriaMetrics support replication?
## Does VictoriaMetrics support replication?
Yes. See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#replication-and-data-safety) for details.
### Can I use VictoriaMetrics instead of Prometheus?
## Can I use VictoriaMetrics instead of Prometheus?
Yes in most cases. VictoriaMetrics can substitute Prometheus in the following aspects:
@ -43,7 +43,7 @@ Yes in most cases. VictoriaMetrics can substitute Prometheus in the following as
* Prometheus-compatible querying in Grafana is supported by VictoriaMetrics. See [these docs](https://docs.victoriametrics.com/#grafana-setup).
### What is the difference between vmagent and Prometheus?
## What is the difference between vmagent and Prometheus?
While both [vmagent](https://docs.victoriametrics.com/vmagent.html) and Prometheus may scrape Prometheus targets (aka `/metrics` pages)
according to the provided Prometheus-compatible [scrape configs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config)
@ -66,7 +66,7 @@ and send data to multiple remote storage systems, vmagent has the following addi
- [Prometheus remote_write proxy](https://docs.victoriametrics.com/vmagent.html#prometheus-remote_write-proxy)
### Is it safe to enable [remote write](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) in Prometheus?
## Is it safe to enable [remote write](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage) in Prometheus?
Yes. Prometheus continues writing data to local storage after enabling remote write, so all the existing local storage data
and new data is available for querying via Prometheus as usual.
@ -75,7 +75,7 @@ It is recommended using [vmagent](https://docs.victoriametrics.com/vmagent.html)
and writing data to VictoriaMetrics.
### How does VictoriaMetrics compare to other remote storage solutions for Prometheus such as [M3 from Uber](https://eng.uber.com/m3/), [Thanos](https://github.com/thanos-io/thanos), [Cortex](https://github.com/cortexproject/cortex), etc.?
## How does VictoriaMetrics compare to other remote storage solutions for Prometheus such as [M3 from Uber](https://eng.uber.com/m3/), [Thanos](https://github.com/thanos-io/thanos), [Cortex](https://github.com/cortexproject/cortex), etc.?
VictoriaMetrics is simpler, faster, more cost-effective and it provides [MetricsQL query language](MetricsQL) based on PromQL. The simplicity is twofold:
- It is simpler to configure and operate. There is no need in configuring [sidecars](https://github.com/thanos-io/thanos/blob/master/docs/components/sidecar.md),
@ -90,7 +90,7 @@ and [Remote Write Storage Wars](https://promcon.io/2019-munich/talks/remote-writ
VictoriaMetrics also [uses less RAM than Thanos components](https://github.com/thanos-io/thanos/issues/448).
### What is the difference between VictoriaMetrics and [QuestDB](https://questdb.io/)?
## What is the difference between VictoriaMetrics and [QuestDB](https://questdb.io/)?
- QuestDB needs more than 20x storage space than VictoriaMetrics. This translates to higher storage costs and slower queries over historical data, which must be read from the disk.
- QuestDB is much harder to setup and operate than VictoriaMetrics. Compare [setup instructions for QuestDB](https://questdb.io/docs/get-started/binaries) to [setup instructions for VictoriaMetrics](https://docs.victoriametrics.com/#how-to-start-victoriametrics).
@ -101,7 +101,7 @@ VictoriaMetrics also [uses less RAM than Thanos components](https://github.com/t
- [VictoriaMetrics supports backfilling (e.g. storing historical data) out of the box](https://docs.victoriametrics.com/#backfilling), while QuestDB provides [very limited support for backfilling](https://questdb.io/blog/2021/05/10/questdb-release-6-0-tsbs-benchmark#the-problem-with-out-of-order-data).
### What is the difference between VictoriaMetrics and [Cortex](https://github.com/cortexproject/cortex)?
## What is the difference between VictoriaMetrics and [Cortex](https://github.com/cortexproject/cortex)?
VictoriaMetrics is similar to Cortex in the following aspects:
- Both systems accept data from [vmagent](https://docs.victoriametrics.com/vmagent.html) or Prometheus
@ -129,7 +129,7 @@ The main differences between Cortex and VictoriaMetrics:
See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-time-series-data) for details.
### What is the difference between VictoriaMetrics and [Thanos](https://github.com/thanos-io/thanos)?
## What is the difference between VictoriaMetrics and [Thanos](https://github.com/thanos-io/thanos)?
- Thanos re-uses Prometheus source code, while VictoriaMetrics is written from scratch.
- VictoriaMetrics accepts data via [standard remote_write API for Prometheus](https://prometheus.io/docs/practices/remote_write/),
@ -150,7 +150,7 @@ The main differences between Cortex and VictoriaMetrics:
See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-time-series-data) for details.
### How does VictoriaMetrics compare to [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/)?
## How does VictoriaMetrics compare to [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/)?
- VictoriaMetrics requires [10x less RAM](https://medium.com/@valyala/insert-benchmarks-with-inch-influxdb-vs-victoriametrics-e31a41ae2893) and it [works faster](https://medium.com/@valyala/measuring-vertical-scalability-for-time-series-databases-in-google-cloud-92550d78d8ae).
- VictoriaMetrics needs lower amounts of storage space than InfluxDB on production data.
@ -159,21 +159,22 @@ The main differences between Cortex and VictoriaMetrics:
See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-time-series-data) for details.
### How does VictoriaMetrics compare to [TimescaleDB](https://www.timescale.com/)?
## How does VictoriaMetrics compare to [TimescaleDB](https://www.timescale.com/)?
- TimescaleDB insists on using SQL as a query language. While SQL is more powerful than PromQL, this power is rarely required during typical TSDB usage. Real-world queries usually [look clearer and simpler when written in PromQL than in SQL](https://medium.com/@valyala/promql-tutorial-for-beginners-9ab455142085).
- VictoriaMetrics requires [up to 70x less storage space comparing to TimescaleDB](https://medium.com/@valyala/when-size-matters-benchmarking-victoriametrics-vs-timescale-and-influxdb-6035811952d4) for storing the same amount of time series data. The gap in storage space usage can be lowered from 70x to 3x if [compression in TimescaleDB is properly configured](https://docs.timescale.com/latest/using-timescaledb/compression) (it isn't an easy task in general case :)).
- VictoriaMetrics requires up to 10x less CPU and RAM resources than TimescaleDB for processing production data. See [this article](https://abiosgaming.com/press/high-cardinality-aggregations/) for details.
- TimescaleDB is [harder to setup, configure and operate](https://docs.timescale.com/timescaledb/latest/how-to-guides/install-timescaledb/self-hosted/ubuntu/installation-apt-ubuntu/) than VictoriaMetrics (see [how to run VictoriaMetrics](https://docs.victoriametrics.com/#how-to-start-victoriametrics)).
- VictoriaMetrics accepts data in multiple popular data ingestion protocols - InfluxDB, OpenTSDB, Graphite, CSV, while TimescaleDB supports only SQL inserts.
### Does VictoriaMetrics use Prometheus technologies like other clustered TSDBs built on top of Prometheus such as [Thanos](https://github.com/thanos-io/thanos) or [Cortex](https://github.com/cortexproject/cortex)?
## Does VictoriaMetrics use Prometheus technologies like other clustered TSDBs built on top of Prometheus such as [Thanos](https://github.com/thanos-io/thanos) or [Cortex](https://github.com/cortexproject/cortex)?
No. VictoriaMetrics core is written in Go from scratch by [fasthttp](https://github.com/valyala/fasthttp) [author](https://github.com/valyala).
The architecture is [optimized for storing and querying large amounts of time series data with high cardinality](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac). VictoriaMetrics storage uses [certain ideas from ClickHouse](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282). Special thanks to [Alexey Milovidov](https://github.com/alexey-milovidov).
### What is the pricing for VictoriaMetrics?
## What is the pricing for VictoriaMetrics?
The following versions are open source and free:
* [Single-node version](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html).
@ -188,7 +189,7 @@ The following commercial versions of VictoriaMetrics are planned:
[Contact us](mailto:info@victoriametrics.com) for more information on our plans.
### Why VictoriaMetrics doesn't support [Prometheus remote read API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Cremote_read%3E)?
## Why VictoriaMetrics doesn't support [Prometheus remote read API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#%3Cremote_read%3E)?
Remote read API requires transferring all the raw data for all the requested metrics over the given time range. For instance,
if a query covers 1000 metrics with 10K values each, then the remote read API had to return `1000*10K`=10M metric values to Prometheus.
@ -199,19 +200,19 @@ So just query VictoriaMetrics directly via [Prometheus Querying API](https://doc
or via [Prometheus datasource in Grafana](https://docs.victoriametrics.com/#grafana-setup).
### Does VictoriaMetrics deduplicate data from Prometheus instances scraping the same targets (aka `HA pairs`)?
## Does VictoriaMetrics deduplicate data from Prometheus instances scraping the same targets (aka `HA pairs`)?
Yes. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#deduplication) for details.
### Where is the source code of VictoriaMetrics?
## Where is the source code of VictoriaMetrics?
Source code for the following versions is available in the following places:
* [Single-node version](https://github.com/VictoriaMetrics/VictoriaMetrics)
* [Cluster version](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/cluster)
### Does VictoriaMetrics fit for data from IoT sensors and industrial sensors?
## Does VictoriaMetrics fit for data from IoT sensors and industrial sensors?
VictoriaMetrics is able to handle data from hundreds of millions of IoT sensors and industrial sensors.
It supports [high cardinality data](https://medium.com/@valyala/high-cardinality-tsdb-benchmarks-victoriametrics-vs-timescaledb-vs-influxdb-13e6ee64dd6b),
@ -219,7 +220,7 @@ perfectly [scales up on a single node](https://medium.com/@valyala/measuring-ver
and scales horizontally to multiple nodes.
### Where can I ask questions about VictoriaMetrics?
## Where can I ask questions about VictoriaMetrics?
Questions about VictoriaMetrics can be asked via the following channels:
@ -228,17 +229,17 @@ Questions about VictoriaMetrics can be asked via the following channels:
- [Google group](https://groups.google.com/forum/#!forum/victorametrics-users)
### Where can I file bugs and feature requests regarding VictoriaMetrics?
## Where can I file bugs and feature requests regarding VictoriaMetrics?
File bugs and feature requests [here](https://github.com/VictoriaMetrics/VictoriaMetrics/issues).
### Where I can find information about multi-tenancy?
## Where I can find information about multi-tenancy?
See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy). Multitenancy is supported only by [cluster version](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html) of VictoriaMetrics.
### How to set a memory limit for VictoriaMetrics components?
## How to set a memory limit for VictoriaMetrics components?
All the VictoriaMetrics component provide command-line flags to control the size of internal buffers and caches: `-memory.allowedPercent` and `-memory.allowedBytes` (pass `-help` to any VictoriaMetrics component in order to see the description for these flags). These limits don't take into account additional memory, which may be needed for processing incoming queries. Hard limits may be enforced only by the OS via [cgroups](https://en.wikipedia.org/wiki/Cgroups), Docker (see [these docs](https://docs.docker.com/config/containers/resource_constraints)) or Kubernetes (see [these docs](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers)).
@ -250,22 +251,22 @@ Memory usage for VictoriaMetrics components can be tuned according to the follow
* [Troubleshooting for single-node VictoriaMetrics](https://docs.victoriametrics.com/#troubleshooting)
### How can I run VictoriaMetrics on FreeBSD?
## How can I run VictoriaMetrics on FreeBSD?
VictoriaMetrics is included in FreeBSD ports, so just install it from there. See [this link](https://www.freebsd.org/cgi/ports.cgi?query=victoria&stype=all).
### Does VictoriaMetrics support Graphite query language?
## Does VictoriaMetrics support Graphite query language?
Yes. See [these docs](https://docs.victoriametrics.com/#graphite-api-usage).
### What is active time series?
## What is active time series?
A time series is uniquely identified by its name plus a set of its labels. For example, `temperature{city="NY",country="US"}` and `temperature{city="SF",country="US"}` are two distinct series, since they differ by `city` label. A time series is considered active if it receives at least a single new sample during the last hour.
### What is high churn rate?
## What is high churn rate?
If old time series are constantly substituted by new time series at a high rate, then such a state is called `high churn rate`. High churn rate has the following negative consequences:
* Increased total number of time series stored in the database.
@ -273,31 +274,31 @@ If old time series are constantly substituted by new time series at a high rate,
* Slow down of queries over multiple days.
### What is high cardinality?
## What is high cardinality?
High cardinality usually means high number of [active time series](#what-is-active-time-series). High cardinality may lead to high memory usage and/or to high percentage of [slow inserts](#what-is-slow-insert). The source of high cardinality is usually a label with big number of unique values, which presents in big share of the ingested time series. The solution is to identify and remove the source of high cardinality with the help of `/api/v1/status/tsdb` page - see [these docs](https://docs.victoriametrics.com/#tsdb-stats).
### What is slow insert?
## What is slow insert?
VictoriaMetrics maintains in-memory cache for mapping of [active time series](#what-is-active-time-series) into internal series ids. The cache size depends on the available memory for VictoriaMetrics in the host system. If the information about all the active time series doesn't fit the cache, then VictoriaMetrics needs to read and unpack the information from disk on every incoming sample for time series missing in the cache. This operation is much slower than the cache lookup, so such insert is named `slow insert`. High percentage of slow inserts on the [official dashboard for VictoriaMetrics](https://docs.victoriametrics.com/#monitoring) indicates on memory shortage for the current number of [active time series](#what-is-active-time-series). Such a condition usually leads to significant slowdown for data ingestion, to significantly increased disk IO and CPU usage. The solution is to add more memory or to reduce the number of [active time series](#what-is-active-time-series). The `/api/v1/status/tsdb` page can be helpful for locating the source of high number of active time seriess - see [these docs](https://docs.victoriametrics.com/#tsdb-stats).
### How to migrate data from Prometheus to VictoriaMetrics?
## How to migrate data from Prometheus to VictoriaMetrics?
Please see [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-from-prometheus).
### How to migrate data from InfluxDB to VictoriaMetrics?
## How to migrate data from InfluxDB to VictoriaMetrics?
Please see [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-from-influxdb-1x).
### How to migrate data from OpenTSDB to VictoriaMetrics?
## How to migrate data from OpenTSDB to VictoriaMetrics?
Please see [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-from-opentsdb).
### How to migrate data from Graphite to VictoriaMetrics?
## How to migrate data from Graphite to VictoriaMetrics?
Please use [whisper-to-graphite](https://github.com/bzed/whisper-to-graphite) tool for reading the data from Graphite and pushing it to VictoriaMetrics via [Graphite import API](https://docs.victoriametrics.com/#how-to-send-data-from-graphite-compatible-agents-such-as-statsd).

View file

@ -228,7 +228,11 @@ See also [implicit query conversions](#implicit-query-conversions).
#### quantile_over_time
`quantile_over_time(phi, series_selector[d])` calculates `phi`-quantile over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). The `phi` value must be in the range `[0...1]`. This function is supported by PromQL.
`quantile_over_time(phi, series_selector[d])` calculates `phi`-quantile over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). The `phi` value must be in the range `[0...1]`. This function is supported by PromQL. See also [quantiles_over_time](#quantiles_over_time).
#### quantiles_over_time
`quantiles_over_time("phiLabel", phi1, ..., phiN, series_selector[d])` calculates `phi*`-quantiles over raw samples on the given lookbehind window `d` per each time series returned from the given [series_selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors). The function returns individual series per each `phi*` with `{phiLabel="phi*"}` label. `phi*` values must be in the range `[0...1]`. See also [quantile_over_time](#quantile_over_time).
#### range_over_time
@ -413,7 +417,11 @@ See also [implicit query conversions](#implicit-query-conversions).
#### histogram_quantile
`histogram_quantile(phi, buckets)` calculates `phi`-quantile over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). For example, `histogram_quantile(0.5, sum(rate(http_request_duration_seconds_bucket[5m]) by (le))` would return median request duration for all the requests during the last 5 minutes. It accepts optional third arg - `boundsLabel`. In this case it returns `lower` and `upper` bounds for the estimated percentile. See [this issue for details](https://github.com/prometheus/prometheus/issues/5706). This function is supported by PromQL (except of the `boundLabel` arg). See also [histogram_share](#histogram_share).
`histogram_quantile(phi, buckets)` calculates `phi`-quantile over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). `phi` must be in the range `[0...1]`. For example, `histogram_quantile(0.5, sum(rate(http_request_duration_seconds_bucket[5m]) by (le))` would return median request duration for all the requests during the last 5 minutes. It accepts optional third arg - `boundsLabel`. In this case it returns `lower` and `upper` bounds for the estimated percentile. See [this issue for details](https://github.com/prometheus/prometheus/issues/5706). This function is supported by PromQL (except of the `boundLabel` arg). See also [histogram_quantiles](#histogram_quantiles) and [histogram_share](#histogram_share).
#### histogram_quantiles
`histogram_quantiles("phiLabel", phi1, ..., phiN, buckets)` calculates the given `phi*`-quantiles over the given [histogram buckets](https://valyala.medium.com/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350). `phi*` must be in the range `[0...1]`. Each calculated quantile is returned in a separate time series with the corresponding `{phiLabel="phi*"}` label. See also [histogram_quantile](#histogram_quantile).
#### histogram_share
@ -509,7 +517,7 @@ See also [implicit query conversions](#implicit-query-conversions).
#### range_quantile
`range_quantile(phi, q)` returns `phi`-quantile across points per each time series returned by `q`.
`range_quantile(phi, q)` returns `phi`-quantile across points per each time series returned by `q`. `phi` must be in the range `[0...1]`.
#### range_sum
@ -748,6 +756,10 @@ See also [implicit query conversions](#implicit-query-conversions).
`limitk(k, q) by (group_labels)` returns up to `k` time series per each `group_labels` out of time series returned by `q`. The returned set of time series can change with each call.
#### mad
`mad(q) by (group_labels)` returns the [Median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp. See also [outliers_mad](#outliers_mad) and [stddev](#stddev).
#### max
`max(q) by (group_labels)` returns the maximum value per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp. This function is supported by PromQL.
@ -764,17 +776,21 @@ See also [implicit query conversions](#implicit-query-conversions).
`mode(q) by (group_labels)` returns [mode](https://en.wikipedia.org/wiki/Mode_(statistics)) per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp.
#### outliers_mad
`outliers_mad(tolerance, q)` returns time series from `q` with at least a single point outside [Median absolute deviation](https://en.wikipedia.org/wiki/Median_absolute_deviation) (aka MAD) multiplied by `tolerance`. E.g. it returns time series with at least a single point below `median(q) - mad(q)` or a single point above `median(q) + mad(q)`. See also [outliersk](#outliersk) and [mad](#mad).
#### outliersk
`outliersk(k, q)` returns up to `k` time series with the biggest standard deviation (aka outliers) out of time series returned by `q`.
`outliersk(k, q)` returns up to `k` time series with the biggest standard deviation (aka outliers) out of time series returned by `q`. See also [outliers_mad](#outliers_mad).
#### quantile
`quantile(phi, q) by (group_labels)` calculates `phi`-quantile per each `group_labels` for all the time series returned by `q`. The aggregate is calculated individually per each group of points with the same timestamp. This function is supported by PromQL. See also [quantiles](#quantiles).
`quantile(phi, q) by (group_labels)` calculates `phi`-quantile per each `group_labels` for all the time series returned by `q`. `phi` must be in the range `[0...1]`. The aggregate is calculated individually per each group of points with the same timestamp. This function is supported by PromQL. See also [quantiles](#quantiles).
#### quantiles
`quantiles("quantileLabel", phi1, ..., phiN, q)` calculates `phi*`-quantiles for all the time series returned by `q` and return them in time series with `{quantileLabel="phi*"}` label. The aggregate is calculated individually per each group of points with the same timestamp. See also [quantile](#quantile).
`quantiles("phiLabel", phi1, ..., phiN, q)` calculates `phi*`-quantiles for all the time series returned by `q` and return them in time series with `{phiLabel="phi*"}` label. `phi*` must be in the range `[0...1]`. The aggregate is calculated individually per each group of points with the same timestamp. See also [quantile](#quantile).
#### stddev

16
go.mod
View file

@ -1,7 +1,6 @@
module github.com/VictoriaMetrics/VictoriaMetrics
require (
cloud.google.com/go v0.94.1 // indirect
cloud.google.com/go/storage v1.16.1
github.com/VictoriaMetrics/fastcache v1.7.0
@ -9,9 +8,9 @@ require (
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.18.0
github.com/VictoriaMetrics/metricsql v0.21.0
github.com/VictoriaMetrics/metricsql v0.24.0
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/aws/aws-sdk-go v1.40.43
github.com/aws/aws-sdk-go v1.40.45
github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.0.8
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
@ -20,7 +19,8 @@ require (
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.9.3
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
github.com/influxdata/influxdb v1.9.4
github.com/klauspost/compress v1.13.6
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
@ -36,12 +36,12 @@ require (
github.com/valyala/histogram v1.2.0
github.com/valyala/quicktemplate v1.7.0
go.uber.org/atomic v1.9.0 // indirect
golang.org/x/net v0.0.0-20210913180222-943fd674d43e
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678
golang.org/x/text v0.3.7 // indirect
google.golang.org/api v0.56.0
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af // indirect
google.golang.org/api v0.57.0
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 // indirect
gopkg.in/yaml.v2 v2.4.0
)

70
go.sum
View file

@ -50,24 +50,24 @@ cloud.google.com/go/storage v1.16.1 h1:sMEIc4wxvoY3NXG7Rn9iP7jb/2buJgWR1vNXCR/UP
cloud.google.com/go/storage v1.16.1/go.mod h1:LaNorbty3ehnU3rEjXSNV/NRgQA0O8Y+uh6bPe5UOk4=
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.10.0/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.10.1/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
github.com/Azure/go-autorest/autorest v0.11.11/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM=
github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
@ -109,8 +109,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
github.com/VictoriaMetrics/metrics v1.18.0 h1:vov5NxDHRSXFbdiH4dYLYEjKLoAXXSQ7hcnG8TSD9JQ=
github.com/VictoriaMetrics/metrics v1.18.0/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA=
github.com/VictoriaMetrics/metricsql v0.21.0 h1:wA/IVfRFQaThy4bM1kAmPiCR0BkWv4tEXD9lBF+GPdU=
github.com/VictoriaMetrics/metricsql v0.21.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
github.com/VictoriaMetrics/metricsql v0.24.0 h1:1SOiuEaSgfS2CiQyCAlYQs3WPHzXNMPUSXtE1Zx6qDw=
github.com/VictoriaMetrics/metricsql v0.24.0/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
@ -132,6 +132,7 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0=
github.com/apache/arrow/go/arrow v0.0.0-20200923215132-ac86123a3f01/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
@ -153,11 +154,23 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.43 h1:froMtO2//9kCu1sK+dOfAcwxUu91p5KgUP4AL7SDwUQ=
github.com/aws/aws-sdk-go v1.40.43/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.40.45 h1:QN1nsY27ssD/JmW4s83qmSb+uL6DG4GmCDzjmJB4xUI=
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8=
github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4=
github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E=
github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE=
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g=
github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq4RyceNmYWAQjZFDOAMLo=
github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM=
github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU=
github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps=
@ -217,7 +230,6 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M=
github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
@ -403,6 +415,7 @@ github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5
github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI=
@ -496,8 +509,9 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0 h1:6DWmvNpomjL1+3liNSZbVns3zsYzzCjm6pRBO1tLeso=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1 h1:dp3bWCh+PPO1zjRRiCSczJav13sBvG4UhNyVTa1KqdU=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
@ -569,12 +583,12 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY=
github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY=
github.com/influxdata/flux v0.120.1/go.mod h1:pGSAvyAA5d3et7SSzajaYShWYXmnRnJJq2qWi+WWZ2I=
github.com/influxdata/flux v0.127.3/go.mod h1:Zc0P/HNnJnhBlm4QsmsBbAeAdtccKo4Eu0OfkP3RCk0=
github.com/influxdata/httprouter v1.3.1-0.20191122104820-ee83e2772f69/go.mod h1:pwymjR6SrP3gD3pRj9RJwdl1j5s3doEEV8gS4X9qSzA=
github.com/influxdata/influxdb v1.8.0/go.mod h1:SIzcnsjaHRFpmlxpJ4S3NT64qtEKYweNTUMb/vh0OMQ=
github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI=
github.com/influxdata/influxdb v1.9.3 h1:60F7eqotCxogyuZAjNglNRG9D6WY65KR9mcmugBx6cs=
github.com/influxdata/influxdb v1.9.3/go.mod h1:xD4ZjAgEJQO9/bX3NhFrssKtdNPi+ki1kjrttJRDhGc=
github.com/influxdata/influxdb v1.9.4 h1:hZMq5fd4enVnruYHd7qCHsqG7kWQ/msA6x+kCvGFsRY=
github.com/influxdata/influxdb v1.9.4/go.mod h1:dR0WCHqaHPpJLaqWnRSl/QHsbXJR+QpofbZXyTc8ccw=
github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
@ -583,7 +597,7 @@ github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gH
github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk=
github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE=
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/influxdata/pkg-config v0.2.7/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk=
github.com/influxdata/pkg-config v0.2.8/go.mod h1:EMS7Ll0S4qkzDk53XS3Z72/egBsPInt+BeRxb0WeSwk=
github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8=
github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE=
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
@ -669,6 +683,7 @@ github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
@ -868,10 +883,11 @@ github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/snowflakedb/gosnowflake v1.3.4/go.mod h1:NsRq2QeiMUuoNUJhp5Q6xGC4uBrsS9g6LwZVEkTWgsE=
github.com/snowflakedb/gosnowflake v1.6.1/go.mod h1:1kyg2XEduwti88V11PKRHImhXLK5WpGiayY6lFNYb98=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
@ -1087,6 +1103,7 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -1116,8 +1133,8 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210913180222-943fd674d43e h1:+b/22bPvDYt4NPDcy4xAGCmON713ONAWFeY3Z7I3tR8=
golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1182,6 +1199,7 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1208,6 +1226,7 @@ golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1217,6 +1236,7 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1231,9 +1251,10 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164 h1:7ZDGnxgHAMw7thfC5bEos0RDAccZKxioiWBhfIe+tvw=
golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw=
golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1377,8 +1398,9 @@ google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNe
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0 h1:4t9zuDlHLcIx0ZEhmXEeFVCRsiOgpgn2QOH9N0MNjPI=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -1447,8 +1469,10 @@ google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEc
google.golang.org/genproto v0.0.0-20210825212027-de86158e7fda/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af h1:aLMMXFYqw01RA6XJim5uaN+afqNNjc9P8HPAbnpnc5s=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w=
google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -414,14 +414,14 @@ func getSTSAPIResponse(action, stsEndpoint, roleARN string, reqBuilder func(apiU
}
// getEC2APIResponse performs EC2 API request with given action.
func getEC2APIResponse(cfg *apiConfig, action, nextPageToken string) ([]byte, error) {
func getEC2APIResponse(cfg *apiConfig, action, filters, nextPageToken string) ([]byte, error) {
ac, err := cfg.getFreshAPICredentials()
if err != nil {
return nil, fmt.Errorf("cannot obtain fresh credentials for EC2 API: %w", err)
}
apiURL := fmt.Sprintf("%s?Action=%s", cfg.ec2Endpoint, url.QueryEscape(action))
if len(cfg.filters) > 0 {
apiURL += "&" + cfg.filters
if len(filters) > 0 {
apiURL += "&" + filters
}
if len(nextPageToken) > 0 {
apiURL += fmt.Sprintf("&NextToken=%s", url.QueryEscape(nextPageToken))

View file

@ -30,7 +30,7 @@ func getReservations(cfg *apiConfig) ([]Reservation, error) {
var rs []Reservation
pageToken := ""
for {
data, err := getEC2APIResponse(cfg, "DescribeInstances", pageToken)
data, err := getEC2APIResponse(cfg, "DescribeInstances", cfg.filters, pageToken)
if err != nil {
return nil, fmt.Errorf("cannot obtain instances: %w", err)
}
@ -155,7 +155,7 @@ func getAZMap(cfg *apiConfig) map[string]string {
func getAvailabilityZones(cfg *apiConfig) ([]AvailabilityZone, error) {
// See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html
data, err := getEC2APIResponse(cfg, "DescribeAvailabilityZones", "")
data, err := getEC2APIResponse(cfg, "DescribeAvailabilityZones", "", "")
if err != nil {
return nil, fmt.Errorf("cannot obtain availability zones: %w", err)
}

View file

@ -4,7 +4,6 @@ import (
"bufio"
"fmt"
"io"
"net/http"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
@ -18,11 +17,11 @@ import (
var maxInsertRequestSize = flagutil.NewBytes("maxInsertRequestSize", 32*1024*1024, "The maximum size in bytes of a single Prometheus remote_write API request")
// ParseStream parses Prometheus remote_write message req and calls callback for the parsed timeseries.
// ParseStream parses Prometheus remote_write message from reader and calls callback for the parsed timeseries.
//
// callback shouldn't hold tss after returning.
func ParseStream(req *http.Request, callback func(tss []prompb.TimeSeries) error) error {
ctx := getPushCtx(req.Body)
func ParseStream(r io.Reader, callback func(tss []prompb.TimeSeries) error) error {
ctx := getPushCtx(r)
defer putPushCtx(ctx)
if err := ctx.Read(); err != nil {
return err

View file

@ -4,7 +4,6 @@ import (
"bufio"
"fmt"
"io"
"net/http"
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
@ -19,12 +18,11 @@ var maxLineLen = flagutil.NewBytes("import.maxLineLen", 100*1024*1024, "The maxi
// ParseStream parses /api/v1/import lines from req and calls callback for the parsed rows.
//
// The callback can be called concurrently multiple times for streamed data from req.
// The callback can be called concurrently multiple times for streamed data from reader.
//
// callback shouldn't hold rows after returning.
func ParseStream(req *http.Request, callback func(rows []Row) error) error {
r := req.Body
if req.Header.Get("Content-Encoding") == "gzip" {
func ParseStream(r io.Reader, isGzipped bool, callback func(rows []Row) error) error {
if isGzipped {
zr, err := common.GetGzipReader(r)
if err != nil {
return fmt.Errorf("cannot read gzipped vmimport data: %w", err)

View file

@ -35,6 +35,8 @@ var aggrFuncs = map[string]bool{
"bottomk_avg": true,
"bottomk_median": true,
"any": true,
"mad": true,
"outliers_mad": true,
"outliersk": true,
"mode": true,
"zscore": true,

View file

@ -134,7 +134,7 @@ func scanString(s string) (string, error) {
for {
n := strings.IndexByte(s[i:], quote)
if n < 0 {
return "", fmt.Errorf("cannot find closing quote %ch for the string %q", quote, s)
return "", fmt.Errorf("cannot find closing quote %c for the string %q", quote, s)
}
i += n
bs := 0

View file

@ -68,6 +68,7 @@ var rollupFuncs = map[string]bool{
"ascent_over_time": true,
"descent_over_time": true,
"zscore_over_time": true,
"quantiles_over_time": true,
// `timestamp` func has been moved here because it must work properly with offsets and samples unaligned to the current step.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/415 for details.

View file

@ -39,59 +39,60 @@ var transformFuncs = map[string]bool{
"year": true,
// New funcs from MetricsQL
"label_set": true,
"label_map": true,
"label_uppercase": true,
"label_lowercase": true,
"label_del": true,
"label_keep": true,
"label_copy": true,
"label_move": true,
"label_transform": true,
"label_value": true,
"label_match": true,
"label_mismatch": true,
"union": true,
"": true, // empty func is a synonym to union
"keep_last_value": true,
"keep_next_value": true,
"interpolate": true,
"start": true,
"end": true,
"step": true,
"running_sum": true,
"running_max": true,
"running_min": true,
"running_avg": true,
"range_sum": true,
"range_max": true,
"range_min": true,
"range_avg": true,
"range_first": true,
"range_last": true,
"range_quantile": true,
"smooth_exponential": true,
"remove_resets": true,
"rand": true,
"rand_normal": true,
"rand_exponential": true,
"pi": true,
"sin": true,
"cos": true,
"asin": true,
"acos": true,
"prometheus_buckets": true,
"buckets_limit": true,
"histogram_share": true,
"histogram_avg": true,
"histogram_stdvar": true,
"histogram_stddev": true,
"sort_by_label": true,
"sort_by_label_desc": true,
"timezone_offset": true,
"bitmap_and": true,
"bitmap_or": true,
"bitmap_xor": true,
"label_set": true,
"label_map": true,
"label_uppercase": true,
"label_lowercase": true,
"label_del": true,
"label_keep": true,
"label_copy": true,
"label_move": true,
"label_transform": true,
"label_value": true,
"label_match": true,
"label_mismatch": true,
"union": true,
"": true, // empty func is a synonym to union
"keep_last_value": true,
"keep_next_value": true,
"interpolate": true,
"start": true,
"end": true,
"step": true,
"running_sum": true,
"running_max": true,
"running_min": true,
"running_avg": true,
"range_sum": true,
"range_max": true,
"range_min": true,
"range_avg": true,
"range_first": true,
"range_last": true,
"range_quantile": true,
"smooth_exponential": true,
"remove_resets": true,
"rand": true,
"rand_normal": true,
"rand_exponential": true,
"pi": true,
"sin": true,
"cos": true,
"asin": true,
"acos": true,
"prometheus_buckets": true,
"buckets_limit": true,
"histogram_share": true,
"histogram_avg": true,
"histogram_stdvar": true,
"histogram_stddev": true,
"sort_by_label": true,
"sort_by_label_desc": true,
"timezone_offset": true,
"bitmap_and": true,
"bitmap_or": true,
"bitmap_xor": true,
"histogram_quantiles": true,
}
// IsTransformFunc returns whether funcName is known transform function.

View file

@ -53,7 +53,7 @@ var LogHTTPRequestHandler = request.NamedHandler{
}
func logRequest(r *request.Request) {
if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
return
}
@ -94,6 +94,10 @@ var LogHTTPRequestHeaderHandler = request.NamedHandler{
}
func logRequestHeader(r *request.Request) {
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
return
}
b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
if err != nil {
r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
@ -124,7 +128,7 @@ var LogHTTPResponseHandler = request.NamedHandler{
}
func logResponse(r *request.Request) {
if !r.Config.LogLevel.AtLeast(aws.LogDebug) {
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
return
}
@ -186,7 +190,7 @@ var LogHTTPResponseHeaderHandler = request.NamedHandler{
}
func logResponseHeader(r *request.Request) {
if r.Config.Logger == nil {
if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
return
}

View file

@ -1481,6 +1481,7 @@ var awsPartition = partition{
"codecommit": service{
Endpoints: endpoints{
"af-south-1": endpoint{},
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
@ -4125,6 +4126,27 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
"kafkaconnect": service{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-north-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"kinesis": service{
Endpoints: endpoints{
@ -4252,6 +4274,7 @@ var awsPartition = partition{
"ap-east-1": endpoint{},
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-northeast-3": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
@ -7451,9 +7474,33 @@ var awsPartition = partition{
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
"transcribestreaming-fips-ca-central-1": endpoint{
Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "ca-central-1",
},
},
"transcribestreaming-fips-us-east-1": endpoint{
Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-1",
},
},
"transcribestreaming-fips-us-east-2": endpoint{
Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-east-2",
},
},
"transcribestreaming-fips-us-west-2": endpoint{
Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-west-2",
},
},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
"transfer": service{
@ -10162,6 +10209,17 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
"oidc": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{
Hostname: "oidc.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
},
},
"organizations": service{
PartitionEndpoint: "aws-us-gov-global",
IsRegionalized: boxedFalse,

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.40.43"
const SDKVersion = "1.40.45"

File diff suppressed because it is too large Load diff

View file

@ -160,12 +160,12 @@ type UploadInput struct {
// If x-amz-server-side-encryption is present and has the value of aws:kms,
// this header specifies the ID of the Amazon Web Services Key Management Service
// (Amazon Web Services KMS) symmetrical customer managed customer master key
// (CMK) that was used for the object. If you specify x-amz-server-side-encryption:aws:kms,
// but do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3
// uses the Amazon Web Services managed CMK in Amazon Web Services to protect
// the data. If the KMS key does not exist in the same account issuing the command,
// you must use the full ARN and not just the ID.
// (Amazon Web Services KMS) symmetrical customer managed key that was used
// for the object. If you specify x-amz-server-side-encryption:aws:kms, but
// do not providex-amz-server-side-encryption-aws-kms-key-id, Amazon S3 uses
// the Amazon Web Services managed key to protect the data. If the KMS key does
// not exist in the same account issuing the command, you must use the full
// ARN and not just the ID.
SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string" sensitive:"true"`
// The server-side encryption algorithm used when storing this object in Amazon

View file

@ -513,12 +513,20 @@ type AccountInfo struct {
EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AccountInfo) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AccountInfo) GoString() string {
return s.String()
}
@ -548,6 +556,10 @@ type GetRoleCredentialsInput struct {
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
// in the AWS SSO OIDC API Reference Guide.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by GetRoleCredentialsInput's
// String and GoString methods.
//
// AccessToken is a required field
AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
@ -562,12 +574,20 @@ type GetRoleCredentialsInput struct {
RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetRoleCredentialsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetRoleCredentialsInput) GoString() string {
return s.String()
}
@ -616,12 +636,20 @@ type GetRoleCredentialsOutput struct {
RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetRoleCredentialsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetRoleCredentialsOutput) GoString() string {
return s.String()
}
@ -641,12 +669,20 @@ type InvalidRequestException struct {
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidRequestException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s InvalidRequestException) GoString() string {
return s.String()
}
@ -696,6 +732,10 @@ type ListAccountRolesInput struct {
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
// in the AWS SSO OIDC API Reference Guide.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by ListAccountRolesInput's
// String and GoString methods.
//
// AccessToken is a required field
AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
@ -712,12 +752,20 @@ type ListAccountRolesInput struct {
NextToken *string `location:"querystring" locationName:"next_token" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListAccountRolesInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListAccountRolesInput) GoString() string {
return s.String()
}
@ -776,12 +824,20 @@ type ListAccountRolesOutput struct {
RoleList []*RoleInfo `locationName:"roleList" type:"list"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListAccountRolesOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListAccountRolesOutput) GoString() string {
return s.String()
}
@ -805,6 +861,10 @@ type ListAccountsInput struct {
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
// in the AWS SSO OIDC API Reference Guide.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by ListAccountsInput's
// String and GoString methods.
//
// AccessToken is a required field
AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
@ -816,12 +876,20 @@ type ListAccountsInput struct {
NextToken *string `location:"querystring" locationName:"next_token" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListAccountsInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListAccountsInput) GoString() string {
return s.String()
}
@ -871,12 +939,20 @@ type ListAccountsOutput struct {
NextToken *string `locationName:"nextToken" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListAccountsOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ListAccountsOutput) GoString() string {
return s.String()
}
@ -900,16 +976,28 @@ type LogoutInput struct {
// (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
// in the AWS SSO OIDC API Reference Guide.
//
// AccessToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by LogoutInput's
// String and GoString methods.
//
// AccessToken is a required field
AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s LogoutInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s LogoutInput) GoString() string {
return s.String()
}
@ -937,12 +1025,20 @@ type LogoutOutput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s LogoutOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s LogoutOutput) GoString() string {
return s.String()
}
@ -955,12 +1051,20 @@ type ResourceNotFoundException struct {
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ResourceNotFoundException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s ResourceNotFoundException) GoString() string {
return s.String()
}
@ -1020,20 +1124,36 @@ type RoleCredentials struct {
// The key that is used to sign the request. For more information, see Using
// Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
// in the AWS IAM User Guide.
//
// SecretAccessKey is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by RoleCredentials's
// String and GoString methods.
SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"`
// The token used for temporary credentials. For more information, see Using
// Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
// in the AWS IAM User Guide.
//
// SessionToken is a sensitive parameter and its value will be
// replaced with "sensitive" in string returned by RoleCredentials's
// String and GoString methods.
SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s RoleCredentials) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s RoleCredentials) GoString() string {
return s.String()
}
@ -1073,12 +1193,20 @@ type RoleInfo struct {
RoleName *string `locationName:"roleName" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s RoleInfo) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s RoleInfo) GoString() string {
return s.String()
}
@ -1104,12 +1232,20 @@ type TooManyRequestsException struct {
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s TooManyRequestsException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s TooManyRequestsException) GoString() string {
return s.String()
}
@ -1161,12 +1297,20 @@ type UnauthorizedException struct {
Message_ *string `locationName:"message" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UnauthorizedException) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s UnauthorizedException) GoString() string {
return s.String()
}

View file

@ -1605,12 +1605,20 @@ type AssumeRoleInput struct {
TransitiveTagKeys []*string `type:"list"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleInput) GoString() string {
return s.String()
}
@ -1785,12 +1793,20 @@ type AssumeRoleOutput struct {
SourceIdentity *string `min:"2" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleOutput) GoString() string {
return s.String()
}
@ -1917,12 +1933,20 @@ type AssumeRoleWithSAMLInput struct {
SAMLAssertion *string `min:"4" type:"string" required:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleWithSAMLInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleWithSAMLInput) GoString() string {
return s.String()
}
@ -2089,12 +2113,20 @@ type AssumeRoleWithSAMLOutput struct {
SubjectType *string `type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleWithSAMLOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleWithSAMLOutput) GoString() string {
return s.String()
}
@ -2264,12 +2296,20 @@ type AssumeRoleWithWebIdentityInput struct {
WebIdentityToken *string `min:"4" type:"string" required:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleWithWebIdentityInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleWithWebIdentityInput) GoString() string {
return s.String()
}
@ -2432,12 +2472,20 @@ type AssumeRoleWithWebIdentityOutput struct {
SubjectFromWebIdentityToken *string `min:"6" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleWithWebIdentityOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumeRoleWithWebIdentityOutput) GoString() string {
return s.String()
}
@ -2505,12 +2553,20 @@ type AssumedRoleUser struct {
AssumedRoleId *string `min:"2" type:"string" required:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumedRoleUser) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s AssumedRoleUser) GoString() string {
return s.String()
}
@ -2552,12 +2608,20 @@ type Credentials struct {
SessionToken *string `type:"string" required:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Credentials) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Credentials) GoString() string {
return s.String()
}
@ -2595,12 +2659,20 @@ type DecodeAuthorizationMessageInput struct {
EncodedMessage *string `min:"1" type:"string" required:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DecodeAuthorizationMessageInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DecodeAuthorizationMessageInput) GoString() string {
return s.String()
}
@ -2637,12 +2709,20 @@ type DecodeAuthorizationMessageOutput struct {
DecodedMessage *string `type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DecodeAuthorizationMessageOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s DecodeAuthorizationMessageOutput) GoString() string {
return s.String()
}
@ -2672,12 +2752,20 @@ type FederatedUser struct {
FederatedUserId *string `min:"2" type:"string" required:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s FederatedUser) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s FederatedUser) GoString() string {
return s.String()
}
@ -2706,12 +2794,20 @@ type GetAccessKeyInfoInput struct {
AccessKeyId *string `min:"16" type:"string" required:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetAccessKeyInfoInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetAccessKeyInfoInput) GoString() string {
return s.String()
}
@ -2745,12 +2841,20 @@ type GetAccessKeyInfoOutput struct {
Account *string `type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetAccessKeyInfoOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetAccessKeyInfoOutput) GoString() string {
return s.String()
}
@ -2765,12 +2869,20 @@ type GetCallerIdentityInput struct {
_ struct{} `type:"structure"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetCallerIdentityInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetCallerIdentityInput) GoString() string {
return s.String()
}
@ -2794,12 +2906,20 @@ type GetCallerIdentityOutput struct {
UserId *string `type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetCallerIdentityOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetCallerIdentityOutput) GoString() string {
return s.String()
}
@ -2949,12 +3069,20 @@ type GetFederationTokenInput struct {
Tags []*Tag `type:"list"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetFederationTokenInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetFederationTokenInput) GoString() string {
return s.String()
}
@ -3057,12 +3185,20 @@ type GetFederationTokenOutput struct {
PackedPolicySize *int64 `type:"integer"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetFederationTokenOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetFederationTokenOutput) GoString() string {
return s.String()
}
@ -3120,12 +3256,20 @@ type GetSessionTokenInput struct {
TokenCode *string `min:"6" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSessionTokenInput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSessionTokenInput) GoString() string {
return s.String()
}
@ -3181,12 +3325,20 @@ type GetSessionTokenOutput struct {
Credentials *Credentials `type:"structure"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSessionTokenOutput) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s GetSessionTokenOutput) GoString() string {
return s.String()
}
@ -3209,12 +3361,20 @@ type PolicyDescriptorType struct {
Arn *string `locationName:"arn" min:"20" type:"string"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s PolicyDescriptorType) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s PolicyDescriptorType) GoString() string {
return s.String()
}
@ -3267,12 +3427,20 @@ type Tag struct {
Value *string `type:"string" required:"true"`
}
// String returns the string representation
// String returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Tag) String() string {
return awsutil.Prettify(s)
}
// GoString returns the string representation
// GoString returns the string representation.
//
// API parameter values that are decorated as "sensitive" in the API will not
// be included in the string output. The member name will be present, but the
// value will be replaced with "sensitive".
func (s Tag) GoString() string {
return s.String()
}

View file

@ -36,4 +36,4 @@
package gax
// Version specifies the gax-go version being used.
const Version = "2.0.5"
const Version = "2.1.1"

View file

@ -4,8 +4,8 @@ go 1.11
require (
github.com/google/go-cmp v0.5.6
google.golang.org/api v0.54.0
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8
google.golang.org/api v0.56.0
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af
google.golang.org/grpc v1.40.0
google.golang.org/protobuf v1.27.1

View file

@ -22,6 +22,7 @@ cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAV
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@ -136,6 +137,7 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -259,6 +261,7 @@ golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -312,8 +315,9 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069 h1:siQdpVirKtzPhKl3lZWozZraCFObP8S1v6PRp0bLrtU=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -408,8 +412,9 @@ google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59t
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/api v0.56.0 h1:08F9XVYTLOGeSQb3xI9C0gXMuQanhdGed0cWFhDozbI=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -466,8 +471,11 @@ google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8 h1:XosVttQUxX8erNhEruTu053/VchgYuksoS9Bj/OITjU=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af h1:aLMMXFYqw01RA6XJim5uaN+afqNNjc9P8HPAbnpnc5s=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=

View file

@ -4,6 +4,18 @@ package models
import "strconv"
func _() {
// An "invalid array index" compiler error signifies that the constant values have changed.
// Re-run the stringer command to generate them again.
var x [1]struct{}
_ = x[Integer-0]
_ = x[Float-1]
_ = x[Boolean-2]
_ = x[String-3]
_ = x[Empty-4]
_ = x[Unsigned-5]
}
const _FieldType_name = "IntegerFloatBooleanStringEmptyUnsigned"
var _FieldType_index = [...]uint8{0, 7, 12, 19, 25, 30, 38}

View file

@ -53,6 +53,13 @@ func (e ErrCode) String() string {
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
}
func (e ErrCode) stringToken() string {
if s, ok := errCodeName[e]; ok {
return s
}
return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
}
// ConnectionError is an error that results in the termination of the
// entire connection.
type ConnectionError ErrCode

View file

@ -130,6 +130,12 @@ type Server struct {
// If nil, a default scheduler is chosen.
NewWriteScheduler func() WriteScheduler
// CountError, if non-nil, is called on HTTP/2 server errors.
// It's intended to increment a metric for monitoring, such
// as an expvar or Prometheus metric.
// The errType consists of only ASCII word characters.
CountError func(errType string)
// Internal state. This is a pointer (rather than embedded directly)
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
@ -1399,7 +1405,7 @@ func (sc *serverConn) processFrame(f Frame) error {
// First frame received must be SETTINGS.
if !sc.sawFirstSettings {
if _, ok := f.(*SettingsFrame); !ok {
return ConnectionError(ErrCodeProtocol)
return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
}
sc.sawFirstSettings = true
}
@ -1424,7 +1430,7 @@ func (sc *serverConn) processFrame(f Frame) error {
case *PushPromiseFrame:
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
return ConnectionError(ErrCodeProtocol)
return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
default:
sc.vlogf("http2: server ignoring frame: %v", f.Header())
return nil
@ -1444,7 +1450,7 @@ func (sc *serverConn) processPing(f *PingFrame) error {
// identifier field value other than 0x0, the recipient MUST
// respond with a connection error (Section 5.4.1) of type
// PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
}
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
return nil
@ -1463,7 +1469,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
}
if st == nil {
// "WINDOW_UPDATE can be sent by a peer that has sent a
@ -1474,7 +1480,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
return nil
}
if !st.flow.add(int32(f.Increment)) {
return streamError(f.StreamID, ErrCodeFlowControl)
return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
}
default: // connection-level flow control
if !sc.flow.add(int32(f.Increment)) {
@ -1495,7 +1501,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
// identifying an idle stream is received, the
// recipient MUST treat this as a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
return ConnectionError(ErrCodeProtocol)
return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
}
if st != nil {
st.cancelCtx()
@ -1547,7 +1553,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
// Why is the peer ACKing settings we never sent?
// The spec doesn't mention this case, but
// hang up on them anyway.
return ConnectionError(ErrCodeProtocol)
return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
}
return nil
}
@ -1555,7 +1561,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
// This isn't actually in the spec, but hang up on
// suspiciously large settings frames or those with
// duplicate entries.
return ConnectionError(ErrCodeProtocol)
return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
}
if err := f.ForeachSetting(sc.processSetting); err != nil {
return err
@ -1622,7 +1628,7 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
// control window to exceed the maximum size as a
// connection error (Section 5.4.1) of type
// FLOW_CONTROL_ERROR."
return ConnectionError(ErrCodeFlowControl)
return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
}
}
return nil
@ -1655,7 +1661,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// or PRIORITY on a stream in this state MUST be
// treated as a connection error (Section 5.4.1) of
// type PROTOCOL_ERROR."
return ConnectionError(ErrCodeProtocol)
return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
}
// "If a DATA frame is received whose stream is not in "open"
@ -1672,7 +1678,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// and return any flow control bytes since we're not going
// to consume them.
if sc.inflow.available() < int32(f.Length) {
return streamError(id, ErrCodeFlowControl)
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
}
// Deduct the flow control from inflow, since we're
// going to immediately add it back in
@ -1685,7 +1691,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
// Already have a stream error in flight. Don't send another.
return nil
}
return streamError(id, ErrCodeStreamClosed)
return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
}
if st.body == nil {
panic("internal error: should have a body in this state")
@ -1697,12 +1703,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
// value of a content-length header field does not equal the sum of the
// DATA frame payload lengths that form the body.
return streamError(id, ErrCodeProtocol)
return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
}
if f.Length > 0 {
// Check whether the client has flow control quota.
if st.inflow.available() < int32(f.Length) {
return streamError(id, ErrCodeFlowControl)
return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
}
st.inflow.take(int32(f.Length))
@ -1710,7 +1716,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
wrote, err := st.body.Write(data)
if err != nil {
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
return streamError(id, ErrCodeStreamClosed)
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
}
if wrote != len(data) {
panic("internal error: bad Writer")
@ -1796,7 +1802,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// stream identifier MUST respond with a connection error
// (Section 5.4.1) of type PROTOCOL_ERROR.
if id%2 != 1 {
return ConnectionError(ErrCodeProtocol)
return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
}
// A HEADERS frame can be used to create a new stream or
// send a trailer for an open one. If we already have a stream
@ -1813,7 +1819,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// this state, it MUST respond with a stream error (Section 5.4.2) of
// type STREAM_CLOSED.
if st.state == stateHalfClosedRemote {
return streamError(id, ErrCodeStreamClosed)
return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
}
return st.processTrailerHeaders(f)
}
@ -1824,7 +1830,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
// receives an unexpected stream identifier MUST respond with
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
if id <= sc.maxClientStreamID {
return ConnectionError(ErrCodeProtocol)
return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
}
sc.maxClientStreamID = id
@ -1841,14 +1847,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
if sc.curClientStreams+1 > sc.advMaxStreams {
if sc.unackedSettings == 0 {
// They should know better.
return streamError(id, ErrCodeProtocol)
return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
}
// Assume it's a network race, where they just haven't
// received our last SETTINGS update. But actually
// this can't happen yet, because we don't yet provide
// a way for users to adjust server parameters at
// runtime.
return streamError(id, ErrCodeRefusedStream)
return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
}
initialState := stateOpen
@ -1858,7 +1864,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
st := sc.newStream(id, 0, initialState)
if f.HasPriority() {
if err := checkPriority(f.StreamID, f.Priority); err != nil {
if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
return err
}
sc.writeSched.AdjustStream(st.id, f.Priority)
@ -1902,15 +1908,15 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
sc := st.sc
sc.serveG.check()
if st.gotTrailerHeader {
return ConnectionError(ErrCodeProtocol)
return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
}
st.gotTrailerHeader = true
if !f.StreamEnded() {
return streamError(st.id, ErrCodeProtocol)
return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
}
if len(f.PseudoFields()) > 0 {
return streamError(st.id, ErrCodeProtocol)
return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
}
if st.trailer != nil {
for _, hf := range f.RegularFields() {
@ -1919,7 +1925,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
// TODO: send more details to the peer somehow. But http2 has
// no way to send debug data at a stream level. Discuss with
// HTTP folk.
return streamError(st.id, ErrCodeProtocol)
return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
}
st.trailer[key] = append(st.trailer[key], hf.Value)
}
@ -1928,13 +1934,13 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
return nil
}
func checkPriority(streamID uint32, p PriorityParam) error {
func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
if streamID == p.StreamDep {
// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
// Section 5.3.3 says that a stream can depend on one of its dependencies,
// so it's only self-dependencies that are forbidden.
return streamError(streamID, ErrCodeProtocol)
return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
}
return nil
}
@ -1943,7 +1949,7 @@ func (sc *serverConn) processPriority(f *PriorityFrame) error {
if sc.inGoAway {
return nil
}
if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
return err
}
sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
@ -2013,13 +2019,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
// "All HTTP/2 requests MUST include exactly one valid
// value for the :method, :scheme, and :path
// pseudo-header fields"
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
}
bodyOpen := !f.StreamEnded()
if rp.method == "HEAD" && bodyOpen {
// HEAD requests can't have bodies
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol))
}
rp.header = make(http.Header)
@ -2102,7 +2108,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
var err error
url_, err = url.ParseRequestURI(rp.path)
if err != nil {
return nil, nil, streamError(st.id, ErrCodeProtocol)
return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
}
requestURI = rp.path
}
@ -2985,3 +2991,31 @@ func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
}
return false
}
func (sc *serverConn) countError(name string, err error) error {
if sc == nil || sc.srv == nil {
return err
}
f := sc.srv.CountError
if f == nil {
return err
}
var typ string
var code ErrCode
switch e := err.(type) {
case ConnectionError:
typ = "conn"
code = ErrCode(e)
case StreamError:
typ = "stream"
code = ErrCode(e.Code)
default:
return err
}
codeStr := errCodeName[code]
if codeStr == "" {
codeStr = strconv.Itoa(int(code))
}
f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
return err
}

View file

@ -130,6 +130,12 @@ type Transport struct {
// Defaults to 15s.
PingTimeout time.Duration
// CountError, if non-nil, is called on HTTP/2 transport errors.
// It's intended to increment a metric for monitoring, such
// as an expvar or Prometheus metric.
// The errType consists of only ASCII word characters.
CountError func(errType string)
// t1, if non-nil, is the standard library Transport using
// this transport. Its settings are used (but not its
// RoundTrip method, etc).
@ -264,22 +270,29 @@ type ClientConn struct {
nextStreamID uint32
pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
pings map[[8]byte]chan struct{} // in flight ping data to notification channel
bw *bufio.Writer
br *bufio.Reader
fr *Framer
lastActive time.Time
lastIdle time.Time // time last idle
// Settings from peer: (also guarded by mu)
// Settings from peer: (also guarded by wmu)
maxFrameSize uint32
maxConcurrentStreams uint32
peerMaxHeaderListSize uint64
initialWindowSize uint32
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
// Lock reqmu BEFORE mu or wmu.
reqHeaderMu chan struct{}
// wmu is held while writing.
// Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
// Only acquire both at the same time when changing peer settings.
wmu sync.Mutex
bw *bufio.Writer
fr *Framer
werr error // first write error that has occurred
hbuf bytes.Buffer // HPACK encoder writes into this
henc *hpack.Encoder
wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
werr error // first write error that has occurred
}
// clientStream is the state for a single HTTP/2 stream. One of these
@ -398,10 +411,11 @@ func (cs *clientStream) abortRequestBodyWrite(err error) {
cc.mu.Lock()
if cs.stopReqBody == nil {
cs.stopReqBody = err
if cs.req.Body != nil {
cs.req.Body.Close()
}
cc.cond.Broadcast()
// Close the body after releasing the mutex, in case it blocks.
if body := cs.req.Body; body != nil {
defer body.Close()
}
}
cc.mu.Unlock()
}
@ -666,6 +680,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
singleUse: singleUse,
wantSettingsAck: true,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
}
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
@ -894,15 +909,18 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
func (cc *ClientConn) sendGoAway() error {
cc.mu.Lock()
defer cc.mu.Unlock()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if cc.closing {
closing := cc.closing
cc.closing = true
maxStreamID := cc.nextStreamID
cc.mu.Unlock()
if closing {
// GOAWAY sent already
return nil
}
cc.wmu.Lock()
defer cc.wmu.Unlock()
// Send a graceful shutdown frame to server
maxStreamID := cc.nextStreamID
if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
return err
}
@ -910,7 +928,6 @@ func (cc *ClientConn) sendGoAway() error {
return err
}
// Prevent new requests
cc.closing = true
return nil
}
@ -918,17 +935,22 @@ func (cc *ClientConn) sendGoAway() error {
// err is sent to streams.
func (cc *ClientConn) closeForError(err error) error {
cc.mu.Lock()
defer cc.cond.Broadcast()
defer cc.mu.Unlock()
for id, cs := range cc.streams {
streams := cc.streams
cc.streams = nil
cc.closed = true
cc.mu.Unlock()
for _, cs := range streams {
select {
case cs.resc <- resAndError{err: err}:
default:
}
cs.bufPipe.CloseWithError(err)
delete(cc.streams, id)
}
cc.closed = true
cc.mu.Lock()
defer cc.cond.Broadcast()
defer cc.mu.Unlock()
return cc.tconn.Close()
}
@ -943,6 +965,9 @@ func (cc *ClientConn) Close() error {
// closes the client connection immediately. In-flight requests are interrupted.
func (cc *ClientConn) closeForLostPing() error {
err := errors.New("http2: client connection lost")
if f := cc.t.CountError; f != nil {
f("conn_close_lost_ping")
}
return cc.closeForError(err)
}
@ -1013,6 +1038,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
}
func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) {
ctx := req.Context()
if err := checkConnHeaders(req); err != nil {
return nil, false, err
}
@ -1026,6 +1052,26 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
}
hasTrailers := trailers != ""
// Acquire the new-request lock by writing to reqHeaderMu.
// This lock guards the critical section covering allocating a new stream ID
// (requires mu) and creating the stream (requires wmu).
if cc.reqHeaderMu == nil {
panic("RoundTrip on initialized ClientConn") // for tests
}
select {
case cc.reqHeaderMu <- struct{}{}:
case <-req.Cancel:
return nil, false, errRequestCanceled
case <-ctx.Done():
return nil, false, ctx.Err()
}
reqHeaderMuNeedsUnlock := true
defer func() {
if reqHeaderMuNeedsUnlock {
<-cc.reqHeaderMu
}
}()
cc.mu.Lock()
if err := cc.awaitOpenSlotForRequest(req); err != nil {
cc.mu.Unlock()
@ -1057,21 +1103,23 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
requestedGzip = true
}
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
// sent by writeRequestBody below, along with any Trailers,
// again in form HEADERS{1}, CONTINUATION{0,})
hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
if err != nil {
cc.mu.Unlock()
return nil, false, err
}
cs := cc.newStream()
cs.req = req
cs.trace = httptrace.ContextClientTrace(req.Context())
cs.requestedGzip = requestedGzip
bodyWriter := cc.t.getBodyWriterState(cs, body)
cs.on100 = bodyWriter.on100
cc.mu.Unlock()
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
// sent by writeRequestBody below, along with any Trailers,
// again in form HEADERS{1}, CONTINUATION{0,})
cc.wmu.Lock()
hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
if err != nil {
cc.wmu.Unlock()
return nil, false, err
}
defer func() {
cc.wmu.Lock()
@ -1082,24 +1130,24 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
}
}()
cc.wmu.Lock()
endStream := !hasBody && !hasTrailers
werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
cc.wmu.Unlock()
<-cc.reqHeaderMu // release the new-request lock
reqHeaderMuNeedsUnlock = false
traceWroteHeaders(cs.trace)
cc.mu.Unlock()
if werr != nil {
if err != nil {
if hasBody {
bodyWriter.cancel()
}
cc.forgetStreamID(cs.ID)
// Don't bother sending a RST_STREAM (our write already failed;
// no need to keep writing)
traceWroteRequest(cs.trace, werr)
traceWroteRequest(cs.trace, err)
// TODO(dneil): An error occurred while writing the headers.
// Should we return an error indicating that this request can be retried?
return nil, false, werr
return nil, false, err
}
var respHeaderTimer <-chan time.Time
@ -1116,7 +1164,6 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
readLoopResCh := cs.resc
bodyWritten := false
ctx := req.Context()
handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) {
res := re.res
@ -1418,19 +1465,17 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
return nil
}
cc.wmu.Lock()
var trls []byte
if hasTrailers {
cc.mu.Lock()
trls, err = cc.encodeTrailers(req)
cc.mu.Unlock()
if err != nil {
cc.wmu.Unlock()
cc.writeStreamReset(cs.ID, ErrCodeInternal, err)
cc.forgetStreamID(cs.ID)
return err
}
}
cc.wmu.Lock()
defer cc.wmu.Unlock()
// Two ways to send END_STREAM: either with trailers, or
@ -1480,7 +1525,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
// requires cc.mu be held.
// requires cc.wmu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
@ -1668,7 +1713,7 @@ func shouldSendReqContentLength(method string, contentLength int64) bool {
}
}
// requires cc.mu be held.
// requires cc.wmu be held.
func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) {
cc.hbuf.Reset()
@ -1817,7 +1862,11 @@ func (rl *clientConnReadLoop) cleanup() {
} else if err == io.EOF {
err = io.ErrUnexpectedEOF
}
for _, cs := range cc.streams {
cc.closed = true
streams := cc.streams
cc.streams = nil
cc.mu.Unlock()
for _, cs := range streams {
cs.bufPipe.CloseWithError(err) // no-op if already closed
select {
case cs.resc <- resAndError{err: err}:
@ -1825,11 +1874,38 @@ func (rl *clientConnReadLoop) cleanup() {
}
close(cs.done)
}
cc.closed = true
cc.mu.Lock()
cc.cond.Broadcast()
cc.mu.Unlock()
}
// countReadFrameError calls Transport.CountError with a string
// representing err.
func (cc *ClientConn) countReadFrameError(err error) {
f := cc.t.CountError
if f == nil || err == nil {
return
}
if ce, ok := err.(ConnectionError); ok {
errCode := ErrCode(ce)
f(fmt.Sprintf("read_frame_conn_error_%s", errCode.stringToken()))
return
}
if errors.Is(err, io.EOF) {
f("read_frame_eof")
return
}
if errors.Is(err, io.ErrUnexpectedEOF) {
f("read_frame_unexpected_eof")
return
}
if errors.Is(err, ErrFrameTooLarge) {
f("read_frame_too_large")
return
}
f("read_frame_other")
}
func (rl *clientConnReadLoop) run() error {
cc := rl.cc
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
@ -1860,6 +1936,7 @@ func (rl *clientConnReadLoop) run() error {
}
continue
} else if err != nil {
cc.countReadFrameError(err)
return err
}
if VerboseLogs {
@ -2155,8 +2232,6 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
}
cc.mu.Lock()
defer cc.mu.Unlock()
var connAdd, streamAdd int32
// Check the conn-level first, before the stream-level.
if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
@ -2173,6 +2248,8 @@ func (b transportResponseBody) Read(p []byte) (n int, err error) {
cs.inflow.add(streamAdd)
}
}
cc.mu.Unlock()
if connAdd != 0 || streamAdd != 0 {
cc.wmu.Lock()
defer cc.wmu.Unlock()
@ -2198,19 +2275,25 @@ func (b transportResponseBody) Close() error {
if unread > 0 || !serverSentStreamEnd {
cc.mu.Lock()
cc.wmu.Lock()
if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
cs.didReset = true
}
// Return connection-level flow control.
if unread > 0 {
cc.inflow.add(int32(unread))
}
cc.mu.Unlock()
cc.wmu.Lock()
if !serverSentStreamEnd {
cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel)
}
// Return connection-level flow control.
if unread > 0 {
cc.fr.WriteWindowUpdate(0, uint32(unread))
}
cc.bw.Flush()
cc.wmu.Unlock()
cc.mu.Unlock()
}
cs.bufPipe.BreakWithError(errClosedResponseBody)
@ -2288,6 +2371,10 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
}
if refund > 0 {
cc.inflow.add(int32(refund))
}
cc.mu.Unlock()
if refund > 0 {
cc.wmu.Lock()
cc.fr.WriteWindowUpdate(0, uint32(refund))
if !didReset {
@ -2297,7 +2384,6 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc.bw.Flush()
cc.wmu.Unlock()
}
cc.mu.Unlock()
if len(data) > 0 && !didReset {
if _, err := cs.bufPipe.Write(data); err != nil {
@ -2352,12 +2438,33 @@ func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
if f.ErrCode != 0 {
// TODO: deal with GOAWAY more. particularly the error code
cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
if fn := cc.t.CountError; fn != nil {
fn("recv_goaway_" + f.ErrCode.stringToken())
}
}
cc.setGoAway(f)
return nil
}
func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc := rl.cc
// Locking both mu and wmu here allows frame encoding to read settings with only wmu held.
// Acquiring wmu when f.IsAck() is unnecessary, but convenient and mostly harmless.
cc.wmu.Lock()
defer cc.wmu.Unlock()
if err := rl.processSettingsNoWrite(f); err != nil {
return err
}
if !f.IsAck() {
cc.fr.WriteSettingsAck()
cc.bw.Flush()
}
return nil
}
func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
cc := rl.cc
cc.mu.Lock()
defer cc.mu.Unlock()
@ -2420,12 +2527,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
cc.seenSettings = true
}
cc.wmu.Lock()
defer cc.wmu.Unlock()
cc.fr.WriteSettingsAck()
cc.bw.Flush()
return cc.werr
return nil
}
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
@ -2466,9 +2568,9 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
if f.ErrCode == ErrCodeProtocol {
rl.cc.SetDoNotReuse()
serr.Cause = errFromPeer
// TODO(bradfitz): increment a varz here, once Transport
// takes an optional interface-typed field that expvar.Map.Add
// implements.
}
if fn := cs.cc.t.CountError; fn != nil {
fn("recv_rststream_" + f.ErrCode.stringToken())
}
cs.resetErr = serr
close(cs.peerReset)

View file

@ -120,6 +120,23 @@ func Openat2(dirfd int, path string, how *OpenHow) (fd int, err error) {
return openat2(dirfd, path, how, SizeofOpenHow)
}
func Pipe(p []int) error {
return Pipe2(p, 0)
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) error {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err := pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return err
}
//sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {

View file

@ -19,32 +19,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: int32(sec), Usec: int32(usec)}
}
//sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe(&pp)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
// 64-bit file system and 32-bit uid calls
// (386 default is 32-bit file system and 16-bit uid).
//sys dup2(oldfd int, newfd int) (err error)

View file

@ -114,32 +114,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: usec}
}
//sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe(&pp)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
func (r *PtraceRegs) PC() uint64 { return r.Rip }
func (r *PtraceRegs) SetPC(pc uint64) { r.Rip = pc }

View file

@ -19,32 +19,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: int32(sec), Usec: int32(usec)}
}
//sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, 0)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
func Seek(fd int, offset int64, whence int) (newoffset int64, err error) {
newoffset, errno := seek(fd, offset, whence)
if errno != 0 {

View file

@ -138,30 +138,6 @@ func utimes(path string, tv *[2]Timeval) (err error) {
return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
}
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, 0)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
// Getrlimit prefers the prlimit64 system call. See issue 38604.
func Getrlimit(resource int, rlim *Rlimit) error {
err := Prlimit(0, resource, nil, rlim)

View file

@ -93,30 +93,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: usec}
}
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, 0)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
func Ioperm(from int, num int, on int) (err error) {
return ENOSYS
}

View file

@ -111,29 +111,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: int32(sec), Usec: int32(usec)}
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe() (p1 int, p2 int, err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
p[0], p[1], err = pipe()
return
}
//sys mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error)
func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {

View file

@ -216,32 +216,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) {
rsa.Service_name_len = uint32(length)
}
//sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe(&pp)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
func Poll(fds []PollFd, timeout int) (n int, err error) {

View file

@ -102,32 +102,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) {
rsa.Service_name_len = uint64(length)
}
//sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe(&pp)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
func Poll(fds []PollFd, timeout int) (n int, err error) {

View file

@ -137,30 +137,6 @@ func utimes(path string, tv *[2]Timeval) (err error) {
return utimensat(AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
}
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, 0)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
func (r *PtraceRegs) PC() uint64 { return r.Pc }
func (r *PtraceRegs) SetPC(pc uint64) { r.Pc = pc }

View file

@ -75,30 +75,6 @@ func setTimeval(sec, usec int64) Timeval {
return Timeval{Sec: sec, Usec: usec}
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, 0) // pipe2 is the same as pipe when flags are set to 0.
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
func Ioperm(from int, num int, on int) (err error) {
return ENOSYS
}

View file

@ -119,32 +119,6 @@ func (rsa *RawSockaddrNFCLLCP) SetServiceNameLen(length int) {
rsa.Service_name_len = uint64(length)
}
//sysnb pipe(p *[2]_C_int) (err error)
func Pipe(p []int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe(&pp)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
func Pipe2(p []int, flags int) (err error) {
if len(p) != 2 {
return EINVAL
}
var pp [2]_C_int
err = pipe2(&pp, flags)
p[0] = int(pp[0])
p[1] = int(pp[1])
return
}
//sys poll(fds *PollFd, nfds int, timeout int) (n int, err error)
func Poll(fds []PollFd, timeout int) (n int, err error) {

View file

@ -110,6 +110,16 @@ func openat2(dirfd int, path string, open_how *OpenHow, size int) (fd int, err e
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
r0, _, e1 := Syscall6(SYS_PPOLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0)
n = int(r0)
@ -1964,13 +1974,3 @@ func PidfdGetfd(pidfd int, targetfd int, flags int) (fd int, err error) {
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe2(p *[2]_C_int, flags int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View file

@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func dup2(oldfd int, newfd int) (err error) {
_, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
if e1 != 0 {

View file

@ -689,16 +689,6 @@ func utimes(path string, times *[2]Timeval) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)

View file

@ -46,16 +46,6 @@ func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
r0, _, e1 := Syscall(SYS_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
fd = int(r0)

View file

@ -684,18 +684,6 @@ func Pause() (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe() (p1 int, p2 int, err error) {
r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
p1 = int(r0)
p2 = int(r1)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) {
r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset))
xaddr = uintptr(r0)

View file

@ -684,18 +684,6 @@ func Pause() (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe() (p1 int, p2 int, err error) {
r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0)
p1 = int(r0)
p2 = int(r1)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mmap2(addr uintptr, length uintptr, prot int, flags int, fd int, pageOffset uintptr) (xaddr uintptr, err error) {
r0, _, e1 := Syscall6(SYS_MMAP2, uintptr(addr), uintptr(length), uintptr(prot), uintptr(flags), uintptr(fd), uintptr(pageOffset))
xaddr = uintptr(r0)

View file

@ -695,16 +695,6 @@ func setrlimit(resource int, rlim *rlimit32) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)

View file

@ -741,16 +741,6 @@ func utimes(path string, times *[2]Timeval) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)

View file

@ -741,16 +741,6 @@ func utimes(path string, times *[2]Timeval) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)

View file

@ -710,16 +710,6 @@ func utimes(path string, times *[2]Timeval) (err error) {
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]_C_int) (err error) {
_, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
n = int(r0)

View file

@ -2454,7 +2454,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header {
func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -2607,7 +2607,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header {
func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -2776,7 +2776,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header {
func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -2951,7 +2951,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header {
func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -3117,7 +3117,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header {
func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -3296,7 +3296,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header {
func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -3484,7 +3484,7 @@ func (c *BucketsDeleteCall) Header() http.Header {
func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -3665,7 +3665,7 @@ func (c *BucketsGetCall) Header() http.Header {
func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -3873,7 +3873,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header {
func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -4092,7 +4092,7 @@ func (c *BucketsInsertCall) Header() http.Header {
func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -4351,7 +4351,7 @@ func (c *BucketsListCall) Header() http.Header {
func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -4565,7 +4565,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header {
func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -4802,7 +4802,7 @@ func (c *BucketsPatchCall) Header() http.Header {
func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -5033,7 +5033,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header {
func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -5211,7 +5211,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header {
func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -5453,7 +5453,7 @@ func (c *BucketsUpdateCall) Header() http.Header {
func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -5665,7 +5665,7 @@ func (c *ChannelsStopCall) Header() http.Header {
func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -5787,7 +5787,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header {
func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -5940,7 +5940,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header {
func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -6110,7 +6110,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header {
func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -6302,7 +6302,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header {
func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -6480,7 +6480,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header {
func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -6659,7 +6659,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header {
func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -6834,7 +6834,7 @@ func (c *NotificationsDeleteCall) Header() http.Header {
func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -6985,7 +6985,7 @@ func (c *NotificationsGetCall) Header() http.Header {
func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -7157,7 +7157,7 @@ func (c *NotificationsInsertCall) Header() http.Header {
func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -7334,7 +7334,7 @@ func (c *NotificationsListCall) Header() http.Header {
func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -7514,7 +7514,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header {
func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -7693,7 +7693,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header {
func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -7888,7 +7888,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header {
func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -8089,7 +8089,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header {
func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -8281,7 +8281,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header {
func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -8486,7 +8486,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header {
func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -8729,7 +8729,7 @@ func (c *ObjectsComposeCall) Header() http.Header {
func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -9085,7 +9085,7 @@ func (c *ObjectsCopyCall) Header() http.Header {
func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -9417,7 +9417,7 @@ func (c *ObjectsDeleteCall) Header() http.Header {
func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -9654,7 +9654,7 @@ func (c *ObjectsGetCall) Header() http.Header {
func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -9908,7 +9908,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header {
func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -10228,7 +10228,7 @@ func (c *ObjectsInsertCall) Header() http.Header {
func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -10603,7 +10603,7 @@ func (c *ObjectsListCall) Header() http.Header {
func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -10924,7 +10924,7 @@ func (c *ObjectsPatchCall) Header() http.Header {
func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -11329,7 +11329,7 @@ func (c *ObjectsRewriteCall) Header() http.Header {
func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -11636,7 +11636,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header {
func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -11841,7 +11841,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header {
func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -12106,7 +12106,7 @@ func (c *ObjectsUpdateCall) Header() http.Header {
func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -12426,7 +12426,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header {
func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -12645,7 +12645,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header {
func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -12798,7 +12798,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header {
func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -12937,7 +12937,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header {
func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -13139,7 +13139,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header {
func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -13338,7 +13338,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header {
func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}
@ -13517,7 +13517,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header {
func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) {
reqHeaders := make(http.Header)
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210830")
reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20210915")
for k, v := range c.header_ {
reqHeaders[k] = v
}

18
vendor/modules.txt vendored
View file

@ -1,5 +1,4 @@
# cloud.google.com/go v0.94.1
## explicit
cloud.google.com/go
cloud.google.com/go/compute/metadata
cloud.google.com/go/iam
@ -22,14 +21,14 @@ github.com/VictoriaMetrics/fasthttp/stackless
# github.com/VictoriaMetrics/metrics v1.18.0
## explicit
github.com/VictoriaMetrics/metrics
# github.com/VictoriaMetrics/metricsql v0.21.0
# github.com/VictoriaMetrics/metricsql v0.24.0
## explicit
github.com/VictoriaMetrics/metricsql
github.com/VictoriaMetrics/metricsql/binaryop
# github.com/VividCortex/ewma v1.2.0
## explicit
github.com/VividCortex/ewma
# github.com/aws/aws-sdk-go v1.40.43
# github.com/aws/aws-sdk-go v1.40.45
## explicit
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/arn
@ -122,11 +121,12 @@ github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
github.com/google/go-cmp/cmp/internal/function
github.com/google/go-cmp/cmp/internal/value
# github.com/googleapis/gax-go/v2 v2.1.0
# github.com/googleapis/gax-go/v2 v2.1.1
## explicit
github.com/googleapis/gax-go/v2
github.com/googleapis/gax-go/v2/apierror
github.com/googleapis/gax-go/v2/apierror/internal/proto
# github.com/influxdata/influxdb v1.9.3
# github.com/influxdata/influxdb v1.9.4
## explicit
github.com/influxdata/influxdb/client/v2
github.com/influxdata/influxdb/models
@ -240,7 +240,7 @@ go.opencensus.io/trace/tracestate
# go.uber.org/atomic v1.9.0
## explicit
go.uber.org/atomic
# golang.org/x/net v0.0.0-20210913180222-943fd674d43e
# golang.org/x/net v0.0.0-20210917221730-978cfadd31cf
## explicit
golang.org/x/net/context
golang.org/x/net/context/ctxhttp
@ -264,7 +264,7 @@ golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sync/errgroup
# golang.org/x/sys v0.0.0-20210915083310-ed5796bab164
# golang.org/x/sys v0.0.0-20210917161153-d61c044b1678
## explicit
golang.org/x/sys/internal/unsafeheader
golang.org/x/sys/unix
@ -275,7 +275,7 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
# google.golang.org/api v0.56.0
# google.golang.org/api v0.57.0
## explicit
google.golang.org/api/googleapi
google.golang.org/api/googleapi/transport
@ -305,7 +305,7 @@ google.golang.org/appengine/internal/socket
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/socket
google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af
# google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4
## explicit
google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/iam/v1