mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-03-01 15:33:35 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
a7333a7380
93 changed files with 6392 additions and 1568 deletions
|
@ -597,6 +597,8 @@ The UI allows exploring query results via graphs and tables. Graphs support scro
|
|||
|
||||
Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressing `up` or `down` arrows on the keyboard while the cursor is located in the query input field.
|
||||
|
||||
Multi-line queries can be entered by pressing `Shift-Enter` in query input field.
|
||||
|
||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
||||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||
|
@ -1095,7 +1097,7 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
|||
|
||||
## Deduplication
|
||||
|
||||
VictoriaMetrics de-duplicates data points if `-dedup.minScrapeInterval` command-line flag is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would de-duplicate data points on the same time series if they fall within the same discrete 60s bucket. The earliest data point will be kept. In the case of equal timestamps, an arbitrary data point will be kept. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2112#issuecomment-1032587618) for more details on how downsampling works.
|
||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete 60s interval. If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then an arbitrary sample out of these samples is left. This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. It is safe to use deduplication and downsampling simultaneously.
|
||||
|
||||
|
@ -1620,7 +1622,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
The maximum size in bytes of a single DataDog POST request to /api/v1/series
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
|
||||
-dedup.minScrapeInterval duration
|
||||
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling
|
||||
Leave only the last sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling
|
||||
-deleteAuthKey string
|
||||
authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries
|
||||
-denyQueriesOutsideRetention
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
var (
|
||||
httpListenAddr = flag.String("httpListenAddr", ":8428", "TCP address to listen for http connections")
|
||||
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the first sample in every time series per each discrete interval "+
|
||||
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Leave only the last sample in every time series per each discrete interval "+
|
||||
"equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling")
|
||||
dryRun = flag.Bool("dryRun", false, "Whether to check only -promscrape.config and then exit. "+
|
||||
"Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag")
|
||||
|
|
|
@ -481,6 +481,9 @@ per rule before giving up.
|
|||
(rules which depend on each other) rules. It is expected, that remote storage will be able to persist
|
||||
previously accepted data during the delay, so data will be available for the subsequent queries.
|
||||
Keep it equal or bigger than `-remoteWrite.flushInterval`.
|
||||
* `replay.disableProgressBar` - whether to disable progress bar which shows progress work.
|
||||
Progress bar may generate a lot of log records, which is not formatted as standard VictoriaMetrics logger.
|
||||
It could break logs parsing by external system and generate additional load on it.
|
||||
|
||||
See full description for these flags in `./vmalert --help`.
|
||||
|
||||
|
|
|
@ -92,12 +92,12 @@ var tplHeaders = []string{
|
|||
// requires a queryFunction as an argument.
|
||||
func (a *Alert) ExecTemplate(q QueryFn, labels, annotations map[string]string) (map[string]string, error) {
|
||||
tplData := AlertTplData{Value: a.Value, Labels: labels, Expr: a.Expr}
|
||||
return templateAnnotations(annotations, tplData, funcsWithQuery(q))
|
||||
return templateAnnotations(annotations, tplData, funcsWithQuery(q), true)
|
||||
}
|
||||
|
||||
// ExecTemplate executes the given template for given annotations map.
|
||||
func ExecTemplate(q QueryFn, annotations map[string]string, tpl AlertTplData) (map[string]string, error) {
|
||||
return templateAnnotations(annotations, tpl, funcsWithQuery(q))
|
||||
return templateAnnotations(annotations, tpl, funcsWithQuery(q), true)
|
||||
}
|
||||
|
||||
// ValidateTemplates validate annotations for possible template error, uses empty data for template population
|
||||
|
@ -105,11 +105,11 @@ func ValidateTemplates(annotations map[string]string) error {
|
|||
_, err := templateAnnotations(annotations, AlertTplData{
|
||||
Labels: map[string]string{},
|
||||
Value: 0,
|
||||
}, tmplFunc)
|
||||
}, tmplFunc, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func templateAnnotations(annotations map[string]string, data AlertTplData, funcs template.FuncMap) (map[string]string, error) {
|
||||
func templateAnnotations(annotations map[string]string, data AlertTplData, funcs template.FuncMap, execute bool) (map[string]string, error) {
|
||||
var builder strings.Builder
|
||||
var buf bytes.Buffer
|
||||
eg := new(utils.ErrGroup)
|
||||
|
@ -122,7 +122,7 @@ func templateAnnotations(annotations map[string]string, data AlertTplData, funcs
|
|||
builder.Grow(len(header) + len(text))
|
||||
builder.WriteString(header)
|
||||
builder.WriteString(text)
|
||||
if err := templateAnnotation(&buf, builder.String(), tData, funcs); err != nil {
|
||||
if err := templateAnnotation(&buf, builder.String(), tData, funcs, execute); err != nil {
|
||||
r[key] = text
|
||||
eg.Add(fmt.Errorf("key %q, template %q: %w", key, text, err))
|
||||
continue
|
||||
|
@ -138,12 +138,15 @@ type tplData struct {
|
|||
ExternalURL string
|
||||
}
|
||||
|
||||
func templateAnnotation(dst io.Writer, text string, data tplData, funcs template.FuncMap) error {
|
||||
func templateAnnotation(dst io.Writer, text string, data tplData, funcs template.FuncMap, execute bool) error {
|
||||
t := template.New("").Funcs(funcs).Option("missingkey=zero")
|
||||
tpl, err := t.Parse(text)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing annotation: %w", err)
|
||||
}
|
||||
if !execute {
|
||||
return nil
|
||||
}
|
||||
if err = tpl.Execute(dst, data); err != nil {
|
||||
return fmt.Errorf("error evaluating annotation template: %w", err)
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@ var (
|
|||
"Max number of data points expected in one request. The higher the value, the less requests will be made during replay.")
|
||||
replayRuleRetryAttempts = flag.Int("replay.ruleRetryAttempts", 5,
|
||||
"Defines how many retries to make before giving up on rule if request for it returns an error.")
|
||||
disableProgressBar = flag.Bool("replay.disableProgressBar", false, "Whether to disable rendering progress bars during the replay. "+
|
||||
"Progress bar rendering might be verbose or break the logs parsing, so it is recommended to be disabled when not used in interactive mode.")
|
||||
)
|
||||
|
||||
func replay(groupsCfg []config.Group, qb datasource.QuerierBuilder, rw *remotewrite.Client) error {
|
||||
|
@ -88,7 +90,10 @@ func (g *Group) replay(start, end time.Time, rw *remotewrite.Client) int {
|
|||
g.Name, g.Interval, iterations, step)
|
||||
for _, rule := range g.Rules {
|
||||
fmt.Printf("> Rule %q (ID: %d)\n", rule, rule.ID())
|
||||
bar := pb.StartNew(iterations)
|
||||
var bar *pb.ProgressBar
|
||||
if !*disableProgressBar {
|
||||
bar = pb.StartNew(iterations)
|
||||
}
|
||||
ri.reset()
|
||||
for ri.next() {
|
||||
n, err := replayRule(rule, ri.s, ri.e, rw)
|
||||
|
@ -96,9 +101,13 @@ func (g *Group) replay(start, end time.Time, rw *remotewrite.Client) int {
|
|||
logger.Fatalf("rule %q: %s", rule, err)
|
||||
}
|
||||
total += n
|
||||
bar.Increment()
|
||||
if bar != nil {
|
||||
bar.Increment()
|
||||
}
|
||||
}
|
||||
if bar != nil {
|
||||
bar.Finish()
|
||||
}
|
||||
bar.Finish()
|
||||
// sleep to let remote storage to flush data on-disk
|
||||
// so chained rules could be calculated correctly
|
||||
time.Sleep(*replayRulesDelay)
|
||||
|
|
|
@ -9,11 +9,6 @@
|
|||
min-height: 75rem;
|
||||
padding-top: 4.5rem;
|
||||
}
|
||||
pre {
|
||||
overflow: scroll;
|
||||
max-width: 600px;
|
||||
min-height: 30px;
|
||||
}
|
||||
.group-heading {
|
||||
cursor: pointer;
|
||||
padding: 5px;
|
||||
|
|
|
@ -41,11 +41,6 @@ func StreamHeader(qw422016 *qt422016.Writer, title string, pages []NavItem) {
|
|||
min-height: 75rem;
|
||||
padding-top: 4.5rem;
|
||||
}
|
||||
pre {
|
||||
overflow: scroll;
|
||||
max-width: 600px;
|
||||
min-height: 30px;
|
||||
}
|
||||
.group-heading {
|
||||
cursor: pointer;
|
||||
padding: 5px;
|
||||
|
@ -71,37 +66,37 @@ func StreamHeader(qw422016 *qt422016.Writer, title string, pages []NavItem) {
|
|||
</head>
|
||||
<body>
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:41
|
||||
//line app/vmalert/tpl/header.qtpl:36
|
||||
StreamPrintNavItems(qw422016, title, pages)
|
||||
//line app/vmalert/tpl/header.qtpl:41
|
||||
//line app/vmalert/tpl/header.qtpl:36
|
||||
qw422016.N().S(`
|
||||
<main class="px-2">
|
||||
`)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
func WriteHeader(qq422016 qtio422016.Writer, title string, pages []NavItem) {
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
qw422016 := qt422016.AcquireWriter(qq422016)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
StreamHeader(qw422016, title, pages)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
qt422016.ReleaseWriter(qw422016)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
}
|
||||
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
func Header(title string, pages []NavItem) string {
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
qb422016 := qt422016.AcquireByteBuffer()
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
WriteHeader(qb422016, title, pages)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
qs422016 := string(qb422016.B)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
qt422016.ReleaseByteBuffer(qb422016)
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
return qs422016
|
||||
//line app/vmalert/tpl/header.qtpl:43
|
||||
//line app/vmalert/tpl/header.qtpl:38
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
<b>record:</b> {%s r.Name %}
|
||||
{% endif %}
|
||||
<br>
|
||||
<code><pre>{%s r.Query %}</pre></code><br>
|
||||
<code><pre class="text-wrap">{%s r.Query %}</pre></code><br>
|
||||
{% if len(r.Labels) > 0 %} <b>Labels:</b>{% endif %}
|
||||
{% for k, v := range r.Labels %}
|
||||
<span class="ms-1 badge bg-primary">{%s k %}={%s v %}</span>
|
||||
|
@ -343,4 +343,4 @@
|
|||
|
||||
{% func badgeRestored() %}
|
||||
<span class="badge bg-warning text-dark" title="Alert state was restored after the service restart from remote storage">restored</span>
|
||||
{% endfunc %}
|
||||
{% endfunc %}
|
||||
|
|
|
@ -288,7 +288,7 @@ func StreamListGroups(qw422016 *qt422016.Writer, groups []APIGroup) {
|
|||
//line app/vmalert/web.qtpl:78
|
||||
qw422016.N().S(`
|
||||
<br>
|
||||
<code><pre>`)
|
||||
<code><pre class="text-wrap">`)
|
||||
//line app/vmalert/web.qtpl:80
|
||||
qw422016.E().S(r.Query)
|
||||
//line app/vmalert/web.qtpl:80
|
||||
|
|
26
app/vmctl/barpool/pool.go
Normal file
26
app/vmctl/barpool/pool.go
Normal file
|
@ -0,0 +1,26 @@
|
|||
// Package barpool provides access to the global
|
||||
// pool of progress bars, so they could be rendered
|
||||
// altogether.
|
||||
package barpool
|
||||
|
||||
import "github.com/cheggaaa/pb/v3"
|
||||
|
||||
var pool = pb.NewPool()
|
||||
|
||||
// Add adds bar to the global pool
|
||||
func Add(bar *pb.ProgressBar) { pool.Add(bar) }
|
||||
|
||||
// Start starts the global pool
|
||||
// Must be called after all progress bars were added
|
||||
func Start() error { return pool.Start() }
|
||||
|
||||
// Stop stops the global pool
|
||||
func Stop() { _ = pool.Stop() }
|
||||
|
||||
// AddWithTemplate adds bar with the given template
|
||||
// to the global pool
|
||||
func AddWithTemplate(format string, total int) *pb.ProgressBar {
|
||||
bar := pb.ProgressBarTemplate(format).New(total)
|
||||
Add(bar)
|
||||
return bar
|
||||
}
|
|
@ -36,6 +36,7 @@ const (
|
|||
vmBatchSize = "vm-batch-size"
|
||||
vmSignificantFigures = "vm-significant-figures"
|
||||
vmRoundDigits = "vm-round-digits"
|
||||
vmDisableProgressBar = "vm-disable-progress-bar"
|
||||
|
||||
// also used in vm-native
|
||||
vmExtraLabel = "vm-extra-label"
|
||||
|
@ -109,6 +110,10 @@ var (
|
|||
Usage: "Optional data transfer rate limit in bytes per second.\n" +
|
||||
"By default the rate limit is disabled. It can be useful for limiting load on configured via '--vmAddr' destination.",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: vmDisableProgressBar,
|
||||
Usage: "Whether to disable progress bar per each worker during the import.",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@ import (
|
|||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
)
|
||||
|
||||
type influxProcessor struct {
|
||||
|
@ -44,7 +44,11 @@ func (ip *influxProcessor) run(silent, verbose bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
bar := pb.StartNew(len(series))
|
||||
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing series"), len(series))
|
||||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
seriesCh := make(chan *influx.Series)
|
||||
errCh := make(chan error)
|
||||
ip.im.ResetStats()
|
||||
|
@ -84,7 +88,7 @@ func (ip *influxProcessor) run(silent, verbose bool) error {
|
|||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
||||
}
|
||||
}
|
||||
bar.Finish()
|
||||
barpool.Stop()
|
||||
log.Println("Import finished!")
|
||||
log.Print(ip.im.Stats())
|
||||
return nil
|
||||
|
|
|
@ -55,6 +55,9 @@ func main() {
|
|||
}
|
||||
|
||||
vmCfg := initConfigVM(c)
|
||||
// disable progress bars since openTSDB implementation
|
||||
// does not use progress bar pool
|
||||
vmCfg.DisableProgressBar = true
|
||||
importer, err := vm.NewImporter(vmCfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create VM importer: %s", err)
|
||||
|
@ -233,5 +236,6 @@ func initConfigVM(c *cli.Context) vm.Config {
|
|||
RoundDigits: c.Int(vmRoundDigits),
|
||||
ExtraLabels: c.StringSlice(vmExtraLabel),
|
||||
RateLimit: c.Int64(vmRateLimit),
|
||||
DisableProgressBar: c.Bool(vmDisableProgressBar),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,9 +5,9 @@ import (
|
|||
"log"
|
||||
"sync"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
)
|
||||
|
||||
|
@ -38,7 +38,12 @@ func (pp *prometheusProcessor) run(silent, verbose bool) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
bar := pb.StartNew(len(blocks))
|
||||
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing blocks"), len(blocks))
|
||||
|
||||
if err := barpool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockReadersCh := make(chan tsdb.BlockReader)
|
||||
errCh := make(chan error, pp.cc)
|
||||
pp.im.ResetStats()
|
||||
|
@ -81,7 +86,7 @@ func (pp *prometheusProcessor) run(silent, verbose bool) error {
|
|||
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
||||
}
|
||||
}
|
||||
bar.Finish()
|
||||
barpool.Stop()
|
||||
log.Println("Import finished!")
|
||||
log.Print(pp.im.Stats())
|
||||
return nil
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
)
|
||||
|
||||
const barTpl = `{{ blue "%s:" }} {{ counters . }} {{ bar . "[" "█" (cycle . "█") "▒" "]" }} {{ percent . }}`
|
||||
|
||||
func prompt(question string) bool {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Print(question, " [Y/n] ")
|
||||
|
|
|
@ -13,6 +13,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
)
|
||||
|
@ -51,6 +54,8 @@ type Config struct {
|
|||
// RateLimit defines a data transfer speed in bytes per second.
|
||||
// Is applied to each worker (see Concurrency) independently.
|
||||
RateLimit int64
|
||||
// Whether to disable progress bar per VM worker
|
||||
DisableProgressBar bool
|
||||
}
|
||||
|
||||
// Importer performs insertion of timeseries
|
||||
|
@ -144,15 +149,22 @@ func NewImporter(cfg Config) (*Importer, error) {
|
|||
|
||||
im.wg.Add(int(cfg.Concurrency))
|
||||
for i := 0; i < int(cfg.Concurrency); i++ {
|
||||
go func() {
|
||||
var bar *pb.ProgressBar
|
||||
if !cfg.DisableProgressBar {
|
||||
pbPrefix := fmt.Sprintf(`{{ green "VM worker %d:" }}`, i)
|
||||
bar = barpool.AddWithTemplate(pbPrefix+pbTpl, 0)
|
||||
}
|
||||
go func(bar *pb.ProgressBar) {
|
||||
defer im.wg.Done()
|
||||
im.startWorker(cfg.BatchSize, cfg.SignificantFigures, cfg.RoundDigits)
|
||||
}()
|
||||
im.startWorker(bar, cfg.BatchSize, cfg.SignificantFigures, cfg.RoundDigits)
|
||||
}(bar)
|
||||
}
|
||||
im.ResetStats()
|
||||
return im, nil
|
||||
}
|
||||
|
||||
const pbTpl = `{{ (cycle . "←" "↖" "↑" "↗" "→" "↘" "↓" "↙" ) }} {{speed . "%s samples/s"}}`
|
||||
|
||||
// ImportError is type of error generated
|
||||
// in case of unsuccessful import request
|
||||
type ImportError struct {
|
||||
|
@ -182,7 +194,7 @@ func (im *Importer) Close() {
|
|||
})
|
||||
}
|
||||
|
||||
func (im *Importer) startWorker(batchSize, significantFigures, roundDigits int) {
|
||||
func (im *Importer) startWorker(bar *pb.ProgressBar, batchSize, significantFigures, roundDigits int) {
|
||||
var batch []*TimeSeries
|
||||
var dataPoints int
|
||||
var waitForBatch time.Time
|
||||
|
@ -217,6 +229,11 @@ func (im *Importer) startWorker(batchSize, significantFigures, roundDigits int)
|
|||
|
||||
batch = append(batch, ts)
|
||||
dataPoints += len(ts.Values)
|
||||
|
||||
if bar != nil {
|
||||
bar.Add(len(ts.Values))
|
||||
}
|
||||
|
||||
if dataPoints < batchSize {
|
||||
continue
|
||||
}
|
||||
|
@ -232,8 +249,8 @@ func (im *Importer) startWorker(batchSize, significantFigures, roundDigits int)
|
|||
// make a new batch, since old one was referenced as err
|
||||
batch = make([]*TimeSeries, len(batch))
|
||||
}
|
||||
batch = batch[:0]
|
||||
dataPoints = 0
|
||||
batch = batch[:0]
|
||||
waitForBatch = time.Now()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,8 +7,7 @@ import (
|
|||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/cheggaaa/pb/v3"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||
)
|
||||
|
@ -49,7 +48,7 @@ const (
|
|||
nativeExportAddr = "api/v1/export/native"
|
||||
nativeImportAddr = "api/v1/import/native"
|
||||
|
||||
barTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}`
|
||||
nativeBarTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}`
|
||||
)
|
||||
|
||||
func (p *vmNativeProcessor) run() error {
|
||||
|
@ -84,8 +83,12 @@ func (p *vmNativeProcessor) run() error {
|
|||
}()
|
||||
|
||||
fmt.Printf("Initing import process to %q:\n", p.dst.addr)
|
||||
bar := pb.ProgressBarTemplate(barTpl).Start64(0)
|
||||
bar := barpool.AddWithTemplate(nativeBarTpl, 0)
|
||||
barReader := bar.NewProxyReader(exportReader)
|
||||
if err := barpool.Start(); err != nil {
|
||||
log.Printf("error start process bars pool: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
w := io.Writer(pw)
|
||||
if p.rateLimit > 0 {
|
||||
|
@ -101,7 +104,7 @@ func (p *vmNativeProcessor) run() error {
|
|||
}
|
||||
<-sync
|
||||
|
||||
bar.Finish()
|
||||
barpool.Stop()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"files": {
|
||||
"main.css": "./static/css/main.d8362c27.css",
|
||||
"main.js": "./static/js/main.7e81baa6.js",
|
||||
"main.js": "./static/js/main.9635653f.js",
|
||||
"static/js/362.1f16598a.chunk.js": "./static/js/362.1f16598a.chunk.js",
|
||||
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
|
||||
"static/media/README.md": "./static/media/README.40ebc3a1f4adae949154.md",
|
||||
|
@ -9,6 +9,6 @@
|
|||
},
|
||||
"entrypoints": [
|
||||
"static/css/main.d8362c27.css",
|
||||
"static/js/main.7e81baa6.js"
|
||||
"static/js/main.9635653f.js"
|
||||
]
|
||||
}
|
|
@ -1 +1 @@
|
|||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.7e81baa6.js"></script><link href="./static/css/main.d8362c27.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
||||
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.9635653f.js"></script><link href="./static/css/main.d8362c27.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
|
File diff suppressed because one or more lines are too long
2
app/vmselect/vmui/static/js/main.9635653f.js
Normal file
2
app/vmselect/vmui/static/js/main.9635653f.js
Normal file
File diff suppressed because one or more lines are too long
|
@ -1,11 +1,3 @@
|
|||
/**
|
||||
* A better abstraction over CSS.
|
||||
*
|
||||
* @copyright Oleg Isonen (Slobodskoi) / Isonen 2014-present
|
||||
* @website https://github.com/cssinjs/jss
|
||||
* @license MIT
|
||||
*/
|
||||
|
||||
/**
|
||||
* React Router DOM v6.3.0
|
||||
*
|
|
@ -23,10 +23,11 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
retentionPeriod = flagutil.NewDuration("retentionPeriod", 1, "Data with timestamps outside the retentionPeriod is automatically deleted")
|
||||
retentionPeriod = flagutil.NewDuration("retentionPeriod", "1", "Data with timestamps outside the retentionPeriod is automatically deleted")
|
||||
snapshotAuthKey = flag.String("snapshotAuthKey", "", "authKey, which must be passed in query string to /snapshot* pages")
|
||||
forceMergeAuthKey = flag.String("forceMergeAuthKey", "", "authKey, which must be passed in query string to /internal/force_merge pages")
|
||||
forceFlushAuthKey = flag.String("forceFlushAuthKey", "", "authKey, which must be passed in query string to /internal/force_flush pages")
|
||||
snapshotsMaxAge = flag.Duration("snapshotsMaxAge", 0, "Automatically delete snapshots older than -snapshotsMaxAge if it is set to non-zero duration. Make sure that backup process has enough time to finish the backup before the corresponding snapshot is automatically deleted")
|
||||
|
||||
precisionBits = flag.Int("precisionBits", 64, "The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss")
|
||||
|
||||
|
@ -103,6 +104,7 @@ func InitWithoutMetrics(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
|
|||
logger.Fatalf("cannot open a storage at %s with -retentionPeriod=%s: %s", *DataPath, retentionPeriod, err)
|
||||
}
|
||||
Storage = strg
|
||||
initStaleSnapshotsRemover()
|
||||
|
||||
var m storage.Metrics
|
||||
Storage.UpdateMetrics(&m)
|
||||
|
@ -259,6 +261,7 @@ func Stop() {
|
|||
startTime := time.Now()
|
||||
WG.WaitAndBlock()
|
||||
promdb.MustClose()
|
||||
stopStaleSnapshotsRemover()
|
||||
Storage.MustClose()
|
||||
logger.Infof("successfully closed the storage in %.3f seconds", time.Since(startTime).Seconds())
|
||||
|
||||
|
@ -378,6 +381,40 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
}
|
||||
|
||||
func initStaleSnapshotsRemover() {
|
||||
staleSnapshotsRemoverCh = make(chan struct{})
|
||||
if *snapshotsMaxAge <= 0 {
|
||||
return
|
||||
}
|
||||
staleSnapshotsRemoverWG.Add(1)
|
||||
go func() {
|
||||
defer staleSnapshotsRemoverWG.Done()
|
||||
t := time.NewTicker(11 * time.Second)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-staleSnapshotsRemoverCh:
|
||||
return
|
||||
case <-t.C:
|
||||
}
|
||||
if err := Storage.DeleteStaleSnapshots(*snapshotsMaxAge); err != nil {
|
||||
// Use logger.Errorf instead of logger.Fatalf in the hope the error is temporary.
|
||||
logger.Errorf("cannot delete stale snapshots: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func stopStaleSnapshotsRemover() {
|
||||
close(staleSnapshotsRemoverCh)
|
||||
staleSnapshotsRemoverWG.Wait()
|
||||
}
|
||||
|
||||
var (
|
||||
staleSnapshotsRemoverCh chan struct{}
|
||||
staleSnapshotsRemoverWG sync.WaitGroup
|
||||
)
|
||||
|
||||
var activeForceMerges = metrics.NewCounter("vm_active_force_merges")
|
||||
|
||||
func registerStorageMetrics() {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
FROM node:17-alpine3.15
|
||||
FROM node:18-alpine3.15
|
||||
|
||||
RUN apk update && apk add --no-cache bash bash-doc bash-completion libtool autoconf automake nasm pkgconfig libpng gcc make g++ zlib-dev gawk
|
||||
RUN apk update && apk upgrade
|
||||
RUN apk add --no-cache bash bash-doc bash-completion libtool autoconf automake nasm pkgconfig libpng gcc make g++ zlib-dev gawk
|
||||
|
||||
RUN mkdir -p /app
|
||||
WORKDIR /app
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.17.3 as build-web-stage
|
||||
FROM golang:1.18.1 as build-web-stage
|
||||
COPY build /build
|
||||
|
||||
WORKDIR /build
|
||||
|
@ -6,7 +6,7 @@ COPY web/ /build/
|
|||
RUN GOOS=linux GOARCH=amd64 GO111MODULE=on CGO_ENABLED=0 go build -o web-amd64 github.com/VictoriMetrics/vmui/ && \
|
||||
GOOS=windows GOARCH=amd64 GO111MODULE=on CGO_ENABLED=0 go build -o web-windows github.com/VictoriMetrics/vmui/
|
||||
|
||||
FROM alpine:3.15.0
|
||||
FROM alpine:3.15.4
|
||||
USER root
|
||||
|
||||
COPY --from=build-web-stage /build/web-amd64 /app/web
|
||||
|
|
2225
app/vmui/packages/vmui/package-lock.json
generated
2225
app/vmui/packages/vmui/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
@ -9,7 +9,6 @@
|
|||
"@mui/icons-material": "^5.6.0",
|
||||
"@mui/lab": "^5.0.0-alpha.73",
|
||||
"@mui/material": "^5.5.1",
|
||||
"@mui/styles": "^5.5.1",
|
||||
"@testing-library/jest-dom": "^5.16.2",
|
||||
"@testing-library/react": "^13.0.0",
|
||||
"@testing-library/user-event": "^14.0.4",
|
||||
|
|
|
@ -19,11 +19,9 @@ import TextField from "@mui/material/TextField";
|
|||
import Typography from "@mui/material/Typography";
|
||||
import DialogTitle from "@mui/material/DialogTitle";
|
||||
import Dialog from "@mui/material/Dialog";
|
||||
import createStyles from "@mui/styles/createStyles";
|
||||
import TabPanel from "./AuthTabPanel";
|
||||
import PersonIcon from "@mui/icons-material/Person";
|
||||
import LockIcon from "@mui/icons-material/Lock";
|
||||
import makeStyles from "@mui/styles/makeStyles";
|
||||
import {useAuthDispatch, useAuthState} from "../../../../state/auth/AuthStateContext";
|
||||
import {AUTH_METHOD, WithCheckbox} from "../../../../state/auth/reducer";
|
||||
import {ChangeEvent, ClipboardEvent} from "react";
|
||||
|
@ -39,14 +37,6 @@ export interface AuthTab {
|
|||
id: AUTH_METHOD;
|
||||
}
|
||||
|
||||
const useStyles = makeStyles(() =>
|
||||
createStyles({
|
||||
tabsContent: {
|
||||
height: "200px"
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
const BEARER_PREFIX = "Bearer ";
|
||||
|
||||
const tabs: AuthTab[] = [
|
||||
|
@ -57,7 +47,6 @@ const tabs: AuthTab[] = [
|
|||
|
||||
export const AuthDialog: React.FC<DialogProps> = (props) => {
|
||||
|
||||
const classes = useStyles();
|
||||
const {onClose, open} = props;
|
||||
|
||||
const {saveAuthLocally, basicData, bearerData, authMethod} = useAuthState();
|
||||
|
@ -133,7 +122,7 @@ export const AuthDialog: React.FC<DialogProps> = (props) => {
|
|||
tabs.map(t => <Tab key={t.id} label={t.title} />)
|
||||
}
|
||||
</Tabs>
|
||||
<Box p={0} display="flex" flexDirection="column" className={classes.tabsContent}>
|
||||
<Box p={0} display="flex" flexDirection="column" sx={{height: "200px"}}>
|
||||
<Box flexGrow={1}>
|
||||
<TabPanel value={tabIndex} index={0}>
|
||||
<Typography style={{fontStyle: "italic"}}>
|
||||
|
@ -213,4 +202,4 @@ export const AuthDialog: React.FC<DialogProps> = (props) => {
|
|||
</DialogActions>
|
||||
</Dialog>
|
||||
);
|
||||
};
|
||||
};
|
||||
|
|
|
@ -7,12 +7,11 @@ import Paper from "@mui/material/Paper";
|
|||
import Popper from "@mui/material/Popper";
|
||||
import Tooltip from "@mui/material/Tooltip";
|
||||
import Typography from "@mui/material/Typography";
|
||||
import makeStyles from "@mui/styles/makeStyles";
|
||||
import CloseIcon from "@mui/icons-material/Close";
|
||||
import ClickAwayListener from "@mui/material/ClickAwayListener";
|
||||
import {AxisRange, YaxisState} from "../../../../state/graph/reducer";
|
||||
|
||||
const useStyles = makeStyles({
|
||||
const classes = {
|
||||
popover: {
|
||||
display: "grid",
|
||||
gridGap: "16px",
|
||||
|
@ -32,7 +31,7 @@ const useStyles = makeStyles({
|
|||
gridGap: "6px",
|
||||
padding: "0 14px",
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const title = "Axes Settings";
|
||||
|
||||
|
@ -46,8 +45,6 @@ const GraphSettings: FC<GraphSettingsProps> = ({yaxis, setYaxisLimits, toggleEna
|
|||
const [anchorEl, setAnchorEl] = useState<HTMLButtonElement | null>(null);
|
||||
const open = Boolean(anchorEl);
|
||||
|
||||
const classes = useStyles();
|
||||
|
||||
return <Box>
|
||||
<Tooltip title={title}>
|
||||
<IconButton onClick={(e) => setAnchorEl(e.currentTarget)}>
|
||||
|
@ -60,14 +57,14 @@ const GraphSettings: FC<GraphSettingsProps> = ({yaxis, setYaxisLimits, toggleEna
|
|||
placement="left-start"
|
||||
modifiers={[{name: "offset", options: {offset: [0, 6]}}]}>
|
||||
<ClickAwayListener onClickAway={() => setAnchorEl(null)}>
|
||||
<Paper elevation={3} className={classes.popover}>
|
||||
<div id="handle" className={classes.popoverHeader}>
|
||||
<Paper elevation={3} sx={classes.popover}>
|
||||
<Box id="handle" sx={classes.popoverHeader}>
|
||||
<Typography variant="body1"><b>{title}</b></Typography>
|
||||
<IconButton size="small" onClick={() => setAnchorEl(null)}>
|
||||
<CloseIcon style={{color: "white"}}/>
|
||||
</IconButton>
|
||||
</div>
|
||||
<Box className={classes.popoverBody}>
|
||||
</Box>
|
||||
<Box sx={classes.popoverBody}>
|
||||
<AxesLimitsConfigurator
|
||||
yaxis={yaxis}
|
||||
setYaxisLimits={setYaxisLimits}
|
||||
|
@ -80,4 +77,4 @@ const GraphSettings: FC<GraphSettingsProps> = ({yaxis, setYaxisLimits, toggleEna
|
|||
</Box>;
|
||||
};
|
||||
|
||||
export default GraphSettings;
|
||||
export default GraphSettings;
|
||||
|
|
|
@ -63,7 +63,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
|
||||
const hasAutocomplete = openAutocomplete && actualOptions.length;
|
||||
|
||||
if ((arrowUp || arrowDown || enter) && (hasAutocomplete || ctrlMetaKey)) {
|
||||
if ((arrowUp || arrowDown || enter) && (hasAutocomplete || ctrlMetaKey || !shiftKey)) {
|
||||
e.preventDefault();
|
||||
}
|
||||
|
||||
|
@ -84,7 +84,7 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
// Enter
|
||||
if (enter && hasAutocomplete && !shiftKey && !ctrlMetaKey) {
|
||||
setQuery(actualOptions[focusOption], index);
|
||||
} else if (enter && ctrlKey) {
|
||||
} else if (enter && !shiftKey) {
|
||||
runQuery();
|
||||
}
|
||||
};
|
||||
|
@ -129,4 +129,4 @@ const QueryEditor: FC<QueryEditorProps> = ({
|
|||
</Box>;
|
||||
};
|
||||
|
||||
export default QueryEditor;
|
||||
export default QueryEditor;
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import React, {FC, useEffect, useState, useMemo} from "preact/compat";
|
||||
import {useAppDispatch, useAppState} from "../../../../state/common/StateContext";
|
||||
import {dateFromSeconds, formatDateForNativeInput} from "../../../../utils/time";
|
||||
import makeStyles from "@mui/styles/makeStyles";
|
||||
import TimeDurationSelector from "./TimeDurationSelector";
|
||||
import dayjs from "dayjs";
|
||||
import QueryBuilderIcon from "@mui/icons-material/QueryBuilder";
|
||||
|
@ -17,7 +16,7 @@ import Tooltip from "@mui/material/Tooltip";
|
|||
|
||||
const formatDate = "YYYY-MM-DD HH:mm:ss";
|
||||
|
||||
const useStyles = makeStyles({
|
||||
const classes = {
|
||||
container: {
|
||||
display: "grid",
|
||||
gridTemplateColumns: "200px auto 200px",
|
||||
|
@ -32,12 +31,10 @@ const useStyles = makeStyles({
|
|||
datePickerItem: {
|
||||
minWidth: "200px",
|
||||
},
|
||||
});
|
||||
};
|
||||
|
||||
export const TimeSelector: FC = () => {
|
||||
|
||||
const classes = useStyles();
|
||||
|
||||
const [until, setUntil] = useState<string>();
|
||||
const [from, setFrom] = useState<string>();
|
||||
|
||||
|
@ -91,9 +88,9 @@ export const TimeSelector: FC = () => {
|
|||
modifiers={[{name: "offset", options: {offset: [0, 6]}}]}>
|
||||
<ClickAwayListener onClickAway={() => setAnchorEl(null)}>
|
||||
<Paper elevation={3}>
|
||||
<Box className={classes.container}>
|
||||
<Box className={classes.timeControls}>
|
||||
<Box className={classes.datePickerItem}>
|
||||
<Box sx={classes.container}>
|
||||
<Box sx={classes.timeControls}>
|
||||
<Box sx={classes.datePickerItem}>
|
||||
<DateTimePicker
|
||||
label="From"
|
||||
ampm={false}
|
||||
|
@ -106,7 +103,7 @@ export const TimeSelector: FC = () => {
|
|||
maxDate={dayjs(until)}
|
||||
PopperProps={{disablePortal: true}}/>
|
||||
</Box>
|
||||
<Box className={classes.datePickerItem}>
|
||||
<Box sx={classes.datePickerItem}>
|
||||
<DateTimePicker
|
||||
label="To"
|
||||
ampm={false}
|
||||
|
|
|
@ -8,7 +8,6 @@ import TableContainer from "@mui/material/TableContainer";
|
|||
import TableHead from "@mui/material/TableHead";
|
||||
import TableRow from "@mui/material/TableRow";
|
||||
import TableSortLabel from "@mui/material/TableSortLabel";
|
||||
import makeStyles from "@mui/styles/makeStyles";
|
||||
import {useSortedCategories} from "../../../hooks/useSortedCategories";
|
||||
import Alert from "@mui/material/Alert";
|
||||
|
||||
|
@ -16,16 +15,8 @@ export interface GraphViewProps {
|
|||
data: InstantMetricResult[];
|
||||
}
|
||||
|
||||
const useStyles = makeStyles({
|
||||
deemphasized: {
|
||||
opacity: 0.4
|
||||
}
|
||||
});
|
||||
|
||||
const TableView: FC<GraphViewProps> = ({data}) => {
|
||||
|
||||
const classes = useStyles();
|
||||
|
||||
const sortedColumns = useSortedCategories(data);
|
||||
|
||||
const [orderBy, setOrderBy] = useState("");
|
||||
|
@ -88,7 +79,7 @@ const TableView: FC<GraphViewProps> = ({data}) => {
|
|||
{row.metadata.map((rowMeta, index2) => {
|
||||
const prevRowValue = rows[index - 1] && rows[index - 1].metadata[index2];
|
||||
return (
|
||||
<TableCell className={prevRowValue === rowMeta ? classes.deemphasized : undefined}
|
||||
<TableCell sx={prevRowValue === rowMeta ? {opacity: 0.4} : {}}
|
||||
key={index2}>{rowMeta}</TableCell>
|
||||
);
|
||||
}
|
||||
|
@ -104,4 +95,4 @@ const TableView: FC<GraphViewProps> = ({data}) => {
|
|||
);
|
||||
};
|
||||
|
||||
export default TableView;
|
||||
export default TableView;
|
||||
|
|
|
@ -6,7 +6,6 @@ import Toolbar from "@mui/material/Toolbar";
|
|||
import Typography from "@mui/material/Typography";
|
||||
import {ExecutionControls} from "../CustomPanel/Configurator/Time/ExecutionControls";
|
||||
import Logo from "../common/Logo";
|
||||
import makeStyles from "@mui/styles/makeStyles";
|
||||
import {setQueryStringWithoutPageReload} from "../../utils/query-string";
|
||||
import {TimeSelector} from "../CustomPanel/Configurator/Time/TimeSelector";
|
||||
import GlobalSettings from "../CustomPanel/Configurator/Settings/GlobalSettings";
|
||||
|
@ -15,7 +14,7 @@ import Tabs from "@mui/material/Tabs";
|
|||
import Tab from "@mui/material/Tab";
|
||||
import router from "../../router/index";
|
||||
|
||||
const useStyles = makeStyles({
|
||||
const classes = {
|
||||
logo: {
|
||||
position: "relative",
|
||||
display: "flex",
|
||||
|
@ -51,11 +50,10 @@ const useStyles = makeStyles({
|
|||
boxShadow: "rgba(0, 0, 0, 0.15) 0px 2px 8px"
|
||||
}
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const Header: FC = () => {
|
||||
|
||||
const classes = useStyles();
|
||||
const {search, pathname} = useLocation();
|
||||
const navigate = useNavigate();
|
||||
|
||||
|
@ -74,14 +72,14 @@ const Header: FC = () => {
|
|||
return <AppBar position="static" sx={{px: 1, boxShadow: "none"}}>
|
||||
<Toolbar>
|
||||
<Box display="grid" alignItems="center" justifyContent="center">
|
||||
<Box onClick={onClickLogo} className={classes.logo}>
|
||||
<Box onClick={onClickLogo} sx={classes.logo}>
|
||||
<Logo style={{color: "inherit", marginRight: "6px"}}/>
|
||||
<Typography variant="h5">
|
||||
<span style={{fontWeight: "bolder"}}>VM</span>
|
||||
<span style={{fontWeight: "lighter"}}>UI</span>
|
||||
</Typography>
|
||||
</Box>
|
||||
<Link className={classes.issueLink} target="_blank"
|
||||
<Link sx={classes.issueLink} target="_blank"
|
||||
href="https://github.com/VictoriaMetrics/VictoriaMetrics/issues/new">
|
||||
create an issue
|
||||
</Link>
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import React, {FC, useEffect, useMemo, useState} from "preact/compat";
|
||||
import {CSSProperties} from "react";
|
||||
import {MouseEvent as ReactMouseEvent} from "react";
|
||||
import {DashboardRow} from "../../types";
|
||||
import Box from "@mui/material/Box";
|
||||
|
@ -10,7 +11,6 @@ import ExpandMoreIcon from "@mui/icons-material/ExpandMore";
|
|||
import Typography from "@mui/material/Typography";
|
||||
import PredefinedPanels from "./PredefinedPanels";
|
||||
import Alert from "@mui/material/Alert";
|
||||
import {CSSProperties} from "@mui/styles";
|
||||
import useResize from "../../hooks/useResize";
|
||||
|
||||
export interface PredefinedDashboardProps extends DashboardRow {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import React, {FC} from "preact/compat";
|
||||
import {CSSProperties} from "@mui/styles";
|
||||
import {CSSProperties} from "react";
|
||||
import SvgIcon from "@mui/material/SvgIcon";
|
||||
|
||||
interface LogoProps {
|
||||
|
@ -17,4 +17,4 @@ const Logo: FC<LogoProps> = ({style}) => (
|
|||
</SvgIcon>
|
||||
);
|
||||
|
||||
export default Logo;
|
||||
export default Logo;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import Switch from "@mui/material/Switch";
|
||||
import {styled} from "@mui/styles";
|
||||
import {styled} from "@mui/material/styles";
|
||||
|
||||
const BasicSwitch = styled(Switch)(() => ({
|
||||
padding: 10,
|
||||
|
@ -22,4 +22,4 @@ const BasicSwitch = styled(Switch)(() => ({
|
|||
},
|
||||
}));
|
||||
|
||||
export default BasicSwitch;
|
||||
export default BasicSwitch;
|
||||
|
|
|
@ -22,18 +22,25 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
|||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for `tls_config` and `proxy_url` options at `oauth2` section in the same way as Prometheus does. See [oauth2 docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#oauth2).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for `min_version` option at `tls_config` section in the same way as Prometheus does. See [tls_config docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add support for DNS-based discovery for notifiers in the same way as Prometheus does. See [these docs](https://docs.victoriametrics.com/vmalert.html#notifier-configuration-file) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2460).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `-replay.disableProgressBar` command-line flag, which allows disabling progressbar in [rules' backfilling mode](https://docs.victoriametrics.com/vmalert.html#rules-backfilling). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1761).
|
||||
* FEATURE: allow specifying TLS cipher suites for incoming https requests via `-tlsCipherSuites` command-line flag. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2404).
|
||||
* FEATURE: allow specifying TLS cipher suites for mTLS connections between cluster components via `-cluster.tlsCipherSuites` command-line flag. See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection).
|
||||
* FEATURE: vmstorage: add `-snapshotsMaxAge` command-line flag for automatic removal of snapshots older than the given age.
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): shown an empty graph on the selected time range when there is no data on it. Previously `No data to show` placeholder was shown instead of the graph in this case. This prevented from zooming and scrolling of such a graph.
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): show the selected `last N minutes/hours/days` in the top right corner. Previously the `start - end` duration was shown instead, which could be hard to interpret. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2402).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): execute the query when `enter` button is present in the same way as Prometheus does. Multi-line query can be entered by pressing `shift-enter` in the query input field.
|
||||
* FEATURE: expose `vm_indexdb_items_added_total` and `vm_indexdb_items_added_size_bytes_total` counters at `/metrics` page, which can be used for monitoring the rate for addition of new entries in `indexdb` (aka `inverted index`) alongside the total size in bytes for the added entries. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2471).
|
||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): show data pocessing speed during data migration.
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `drop_common_labels()` function, which drops common `label="name"` pairs from the passed time series. See [these docs](https://docs.victoriametrics.com/MetricsQL.html#drop_common_labels).
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `tlast_change_over_time(m[d])` function, which returns the timestamp of the last change of `m` on the given lookbehind window `d`. See [these docs](https://docs.victoriametrics.com/MetricsQL.html#tlast_change_over_time).
|
||||
* FEATURE: leave the last raw sample per each `-dedup.minScrapeInterval` discrete interval when the [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled. This aligns better with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness) comparing to the previous behaviour when the first sample per each `-dedup.minScrapeInterval` was left.
|
||||
* FEATURE: add a handler for `/api/v1/status/buildinfo` endpoint, which is used by Grafana starting from v8.5.0 . See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2515).
|
||||
|
||||
* BUGFIX: export staleness markers as `null` values from [JSON export API](https://docs.victoriametrics.com/#how-to-export-data-in-json-line-format). Previously they were exported as `NaN` values. This could break the exported JSON parsing, since `NaN` values aren't supported by [JSON specification](https://www.json.org/).
|
||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): close `vmselect->vmstorage` connections if they were idle for more than 30 seconds. Expose `vm_tcpdialer_conns_idle` metric at `http://vmselect:8481/metrics` with the number of idle connections to `vmstorage`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2508).
|
||||
* BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl.html): return non-zero exit code on error. This allows handling `vmctl` errors in shell scripts. Previously `vmctl` was returning 0 exit code on error. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2322).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly show `scrape_timeout` and `scrape_interval` options at `http://vmagent:8429/config` page. Previously these options weren't displayed even if they were set in `-promscrape.config`.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): skip template execution during rules' validation. This should prevent from `error evaluating annotation template` errors when some template functions expect non-empty args. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2514).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly handle joins on time series filtered by values. For example, `kube_pod_container_resource_requests{resource="cpu"} * on (namespace,pod) group_left() (kube_pod_status_phase{phase=~"Pending|Running"}==1)`. This query could result in `duplicate time series on the right side` error even if `==1` filter leaves only a single time series per `(namespace,pod)` labels. Now such query is properly executed.
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly handle `scalar default vector`, `scalar if vector` and `scalar ifnot vector` queries. Previously such queries could return unexpected results from the `vector` part.
|
||||
* BUGFIX: [Official Grafana dashboards for VictoriaMetrics](https://grafana.com/orgs/victoriametrics): take into account `indexdb` when calculating disk space usage. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2368).
|
||||
|
|
|
@ -377,7 +377,7 @@ the remaining 3 `vmstorage` nodes could provide the `-replicationFactor=3` for n
|
|||
|
||||
When the replication is enabled, `-dedup.minScrapeInterval=1ms` command-line flag must be passed to `vmselect` nodes.
|
||||
Optional `-replicationFactor=N` command-line flag can be passed to `vmselect` for improving query performance when up to `N-1` vmstorage nodes respond slowly and/or temporarily unavailable, since `vmselect` doesn't wait for responses from up to `N-1` `vmstorage` nodes. Sometimes `-replicationFactor` at `vmselect` nodes can result in partial responses. See [this issues](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1207) for details.
|
||||
The `-dedup.minScrapeInterval=1ms` de-duplicates replicated data during queries. If duplicate data is pushed to VictoriaMetrics from identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) instances or Prometheus instances, then the `-dedup.minScrapeInterval` must be set to bigger values according to [deduplication docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#deduplication).
|
||||
The `-dedup.minScrapeInterval=1ms` de-duplicates replicated data during queries. If duplicate data is pushed to VictoriaMetrics from identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) instances or Prometheus instances, then the `-dedup.minScrapeInterval` must be set to bigger values according to [deduplication docs](#deduplication).
|
||||
|
||||
Note that [replication doesn't save from disaster](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883),
|
||||
so it is recommended performing regular backups. See [these docs](#backups) for details.
|
||||
|
@ -390,6 +390,16 @@ HDD-based persistent disks should be enough for the majority of use cases.
|
|||
|
||||
It is recommended using durable replicated persistent volumes in Kubernetes.
|
||||
|
||||
## Deduplication
|
||||
|
||||
Cluster version of VictoriaMetrics supports data deduplication in the same way as single-node version do. See [these docs](https://docs.victoriametrics.com/#deduplication) for details. The only difference is that the same `-dedup.minScrapeInterval` command-line flag value must be passed to both `vmselect` and `vmstorage` nodes because of the following aspects:
|
||||
|
||||
By default `vminsert` tries to route all the samples for a single time series to a single `vmstorage` node. But samples for a single time series can be spread among multiple `vmstorage` nodes under certain conditions:
|
||||
* when adding/removing `vmstorage` nodes. Then new samples for a part of time series will be routed to another `vmstorage` nodes;
|
||||
* when `vmstorage` nodes are temporarily unavailable (for instance, during their restart). Then new samples are re-routed to the remaining available `vmstorage` nodes;
|
||||
* when `vmstorage` node has no enough capacity for processing incoming data stream. Then `vminsert` re-routes new samples to other `vmstorage` nodes.
|
||||
|
||||
|
||||
## Backups
|
||||
|
||||
It is recommended performing periodical backups from [instant snapshots](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282)
|
||||
|
|
|
@ -774,7 +774,7 @@ sum by (__name__) (
|
|||
|
||||
#### label_replace
|
||||
|
||||
`label_replace(q, "dst_label", "replacement", "src_label", "regex")` applies the given `regex` to `src_label` and stores the `replacement` in `dst_label` if the given `regex` matches `src_label`. The `replacement` may contain references to regex captures such as `$1`, `$2`, etc. These references are substituted by the corresponding regex captures. For example, `label_replace(up{job="node-exporter"}, "foo", "bar-$1", "job", "node-(.+)")` would store `bar-node-exporter` label value into `foo` label. This function is supported by PromQL.
|
||||
`label_replace(q, "dst_label", "replacement", "src_label", "regex")` applies the given `regex` to `src_label` and stores the `replacement` in `dst_label` if the given `regex` matches `src_label`. The `replacement` may contain references to regex captures such as `$1`, `$2`, etc. These references are substituted by the corresponding regex captures. For example, `label_replace(up{job="node-exporter"}, "foo", "bar-$1", "job", "node-(.+)")` would store `bar-exporter` label value into `foo` label. This function is supported by PromQL.
|
||||
|
||||
#### label_set
|
||||
|
||||
|
|
|
@ -597,6 +597,8 @@ The UI allows exploring query results via graphs and tables. Graphs support scro
|
|||
|
||||
Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressing `up` or `down` arrows on the keyboard while the cursor is located in the query input field.
|
||||
|
||||
Multi-line queries can be entered by pressing `Shift-Enter` in query input field.
|
||||
|
||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
||||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||
|
@ -1095,7 +1097,7 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
|||
|
||||
## Deduplication
|
||||
|
||||
VictoriaMetrics de-duplicates data points if `-dedup.minScrapeInterval` command-line flag is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would de-duplicate data points on the same time series if they fall within the same discrete 60s bucket. The earliest data point will be kept. In the case of equal timestamps, an arbitrary data point will be kept. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2112#issuecomment-1032587618) for more details on how downsampling works.
|
||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete 60s interval. If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then an arbitrary sample out of these samples is left. This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. It is safe to use deduplication and downsampling simultaneously.
|
||||
|
||||
|
@ -1620,7 +1622,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
The maximum size in bytes of a single DataDog POST request to /api/v1/series
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
|
||||
-dedup.minScrapeInterval duration
|
||||
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling
|
||||
Leave only the last sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling
|
||||
-deleteAuthKey string
|
||||
authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries
|
||||
-denyQueriesOutsideRetention
|
||||
|
|
|
@ -601,6 +601,8 @@ The UI allows exploring query results via graphs and tables. Graphs support scro
|
|||
|
||||
Query history can be navigated by holding `Ctrl` (or `Cmd` on MacOS) and pressing `up` or `down` arrows on the keyboard while the cursor is located in the query input field.
|
||||
|
||||
Multi-line queries can be entered by pressing `Shift-Enter` in query input field.
|
||||
|
||||
When querying the [backfilled data](https://docs.victoriametrics.com/#backfilling), it may be useful disabling response cache by clicking `Enable cache` checkbox.
|
||||
|
||||
VMUI automatically adjusts the interval between datapoints on the graph depending on the horizontal resolution and on the selected time range. The step value can be customized by clickhing `Override step value` checkbox.
|
||||
|
@ -1099,7 +1101,7 @@ with the enabled de-duplication. See [this section](#deduplication) for details.
|
|||
|
||||
## Deduplication
|
||||
|
||||
VictoriaMetrics de-duplicates data points if `-dedup.minScrapeInterval` command-line flag is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would de-duplicate data points on the same time series if they fall within the same discrete 60s bucket. The earliest data point will be kept. In the case of equal timestamps, an arbitrary data point will be kept. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2112#issuecomment-1032587618) for more details on how downsampling works.
|
||||
VictoriaMetrics leaves a single raw sample with the biggest timestamp per each `-dedup.minScrapeInterval` discrete interval if `-dedup.minScrapeInterval` is set to positive duration. For example, `-dedup.minScrapeInterval=60s` would leave a single raw sample with the biggest timestamp per each discrete 60s interval. If multiple raw samples have the same biggest timestamp on the given `-dedup.minScrapeInterval` discrete interval, then an arbitrary sample out of these samples is left. This aligns with the [staleness rules in Prometheus](https://prometheus.io/docs/prometheus/latest/querying/basics/#staleness).
|
||||
|
||||
The `-dedup.minScrapeInterval=D` is equivalent to `-downsampling.period=0s:D` if [downsampling](#downsampling) is enabled. It is safe to use deduplication and downsampling simultaneously.
|
||||
|
||||
|
@ -1624,7 +1626,7 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li
|
|||
The maximum size in bytes of a single DataDog POST request to /api/v1/series
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 67108864)
|
||||
-dedup.minScrapeInterval duration
|
||||
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling
|
||||
Leave only the last sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling
|
||||
-deleteAuthKey string
|
||||
authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries
|
||||
-denyQueriesOutsideRetention
|
||||
|
|
|
@ -485,6 +485,9 @@ per rule before giving up.
|
|||
(rules which depend on each other) rules. It is expected, that remote storage will be able to persist
|
||||
previously accepted data during the delay, so data will be available for the subsequent queries.
|
||||
Keep it equal or bigger than `-remoteWrite.flushInterval`.
|
||||
* `replay.disableProgressBar` - whether to disable progress bar which shows progress work.
|
||||
Progress bar may generate a lot of log records, which is not formatted as standard VictoriaMetrics logger.
|
||||
It could break logs parsing by external system and generate additional load on it.
|
||||
|
||||
See full description for these flags in `./vmalert --help`.
|
||||
|
||||
|
|
16
go.mod
16
go.mod
|
@ -11,21 +11,21 @@ require (
|
|||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.1
|
||||
github.com/VictoriaMetrics/metricsql v0.43.0
|
||||
github.com/aws/aws-sdk-go v1.44.0
|
||||
github.com/aws/aws-sdk-go v1.44.4
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cheggaaa/pb/v3 v3.0.8
|
||||
github.com/cheggaaa/pb/v3 v3.0.9-0.20211222075416-90c02fa07ea4
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/go-kit/kit v0.12.0
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.9.6
|
||||
github.com/klauspost/compress v1.15.1
|
||||
github.com/klauspost/compress v1.15.2
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/oklog/ulid v1.3.1
|
||||
github.com/prometheus/common v0.34.0 // indirect
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||
github.com/urfave/cli/v2 v2.5.0
|
||||
github.com/urfave/cli/v2 v2.5.1
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
github.com/valyala/fastrand v1.1.0
|
||||
github.com/valyala/fasttemplate v1.2.1
|
||||
|
@ -33,8 +33,8 @@ require (
|
|||
github.com/valyala/quicktemplate v1.7.0
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
|
||||
google.golang.org/api v0.75.0
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba
|
||||
google.golang.org/api v0.77.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
|
@ -48,7 +48,7 @@ require (
|
|||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.3.0 // indirect
|
||||
github.com/googleapis/go-type-adapters v1.0.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
|
@ -69,7 +69,7 @@ require (
|
|||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220422154200-b37d22cd5731 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e // indirect
|
||||
google.golang.org/grpc v1.46.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
|
|
30
go.sum
30
go.sum
|
@ -166,8 +166,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
|||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.44.0 h1:jwtHuNqfnJxL4DKHBUVUmQlfueQqBW7oXP6yebZR/R0=
|
||||
github.com/aws/aws-sdk-go v1.44.0/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go v1.44.4 h1:ePN0CVJMdiz2vYUcJH96eyxRrtKGSDMgyhP6rah2OgE=
|
||||
github.com/aws/aws-sdk-go v1.44.4/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
||||
|
@ -198,8 +198,8 @@ github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tj
|
|||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cheggaaa/pb/v3 v3.0.8 h1:bC8oemdChbke2FHIIGy9mn4DPJ2caZYQnfbRqwmdCoA=
|
||||
github.com/cheggaaa/pb/v3 v3.0.8/go.mod h1:UICbiLec/XO6Hw6k+BHEtHeQFzzBH4i2/qk/ow1EJTA=
|
||||
github.com/cheggaaa/pb/v3 v3.0.9-0.20211222075416-90c02fa07ea4 h1:sbHAiGddrdLsd3i9/RYsm0OKOEh+UDFOONxai8YMMcw=
|
||||
github.com/cheggaaa/pb/v3 v3.0.9-0.20211222075416-90c02fa07ea4/go.mod h1:UICbiLec/XO6Hw6k+BHEtHeQFzzBH4i2/qk/ow1EJTA=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
|
@ -493,8 +493,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
@ -669,8 +670,8 @@ github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
|
|||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
|
||||
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw=
|
||||
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
|
@ -976,8 +977,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6
|
|||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli/v2 v2.5.0 h1:2sqblaW62ebcTIEvwb8eRvDfNHeBAeKxfhdynaanhug=
|
||||
github.com/urfave/cli/v2 v2.5.0/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDqXIEJs=
|
||||
github.com/urfave/cli/v2 v2.5.1 h1:YKwdkyA0xTBzOaP2G0DVxBnCheHGP+Y9VbKAs4K1Ess=
|
||||
github.com/urfave/cli/v2 v2.5.1/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDqXIEJs=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||
|
@ -1329,8 +1330,8 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba h1:AyHWHCBVlIYI5rgEM3o+1PLd0sLPcIAoaUckGQMaWtw=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
|
@ -1485,8 +1486,9 @@ google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQ
|
|||
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
||||
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
|
||||
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
|
||||
google.golang.org/api v0.75.0 h1:0AYh/ae6l9TDUvIQrDw5QRpM100P6oHgD+o3dYHMzJg=
|
||||
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
|
||||
google.golang.org/api v0.77.0 h1:msijLTxwkJ7Jub5tv9KBVCKtHOQwnvnvkX7ErFFCVxY=
|
||||
google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
@ -1579,8 +1581,8 @@ google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX
|
|||
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220422154200-b37d22cd5731 h1:nquqdM9+ps0JZcIiI70+tqoaIFS5Ql4ZuK8UXnz3HfE=
|
||||
google.golang.org/genproto v0.0.0-20220422154200-b37d22cd5731/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e h1:gMjH4zLGs9m+dGzR7qHCHaXMOwsJHJKKkHtyXhtOrJk=
|
||||
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
|
|
@ -12,14 +12,14 @@ import (
|
|||
// NewDuration returns new `duration` flag with the given name, defaultValue and description.
|
||||
//
|
||||
// DefaultValue is in months.
|
||||
func NewDuration(name string, defaultValue float64, description string) *Duration {
|
||||
func NewDuration(name string, defaultValue string, description string) *Duration {
|
||||
description += "\nThe following optional suffixes are supported: h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months"
|
||||
d := Duration{
|
||||
Msecs: int64(defaultValue * msecsPerMonth),
|
||||
valueString: fmt.Sprintf("%g", defaultValue),
|
||||
d := &Duration{}
|
||||
if err := d.Set(defaultValue); err != nil {
|
||||
panic(fmt.Sprintf("BUG: can not parse default value %s for flag %s", defaultValue, name))
|
||||
}
|
||||
flag.Var(&d, name, description)
|
||||
return &d
|
||||
flag.Var(d, name, description)
|
||||
return d
|
||||
}
|
||||
|
||||
// Duration is a flag for holding duration.
|
||||
|
|
|
@ -56,5 +56,6 @@ func TestDurationSetSuccess(t *testing.T) {
|
|||
f("1h", 3600*1000)
|
||||
f("1.5d", 1.5*24*3600*1000)
|
||||
f("2.3W", 2.3*7*24*3600*1000)
|
||||
f("1w", 7*24*3600*1000)
|
||||
f("0.25y", 0.25*365*24*3600*1000)
|
||||
}
|
||||
|
|
|
@ -10,28 +10,24 @@ func DeduplicateSamples(srcTimestamps []int64, srcValues []float64, dedupInterva
|
|||
// Fast path - nothing to deduplicate
|
||||
return srcTimestamps, srcValues
|
||||
}
|
||||
return deduplicateInternal(srcTimestamps, srcValues, dedupInterval)
|
||||
}
|
||||
|
||||
func deduplicateInternal(srcTimestamps []int64, srcValues []float64, dedupInterval int64) ([]int64, []float64) {
|
||||
tsNext := (srcTimestamps[0] - srcTimestamps[0]%dedupInterval) + dedupInterval
|
||||
dstTimestamps := srcTimestamps[:1]
|
||||
dstValues := srcValues[:1]
|
||||
for i := 1; i < len(srcTimestamps); i++ {
|
||||
ts := srcTimestamps[i]
|
||||
if ts < tsNext {
|
||||
tsNext := srcTimestamps[0] + dedupInterval - 1
|
||||
tsNext -= tsNext % dedupInterval
|
||||
dstTimestamps := srcTimestamps[:0]
|
||||
dstValues := srcValues[:0]
|
||||
for i, ts := range srcTimestamps[1:] {
|
||||
if ts <= tsNext {
|
||||
continue
|
||||
}
|
||||
dstTimestamps = append(dstTimestamps, ts)
|
||||
dstTimestamps = append(dstTimestamps, srcTimestamps[i])
|
||||
dstValues = append(dstValues, srcValues[i])
|
||||
|
||||
// Update tsNext
|
||||
tsNext += dedupInterval
|
||||
if ts >= tsNext {
|
||||
// Slow path for updating ts.
|
||||
tsNext = (ts - ts%dedupInterval) + dedupInterval
|
||||
if tsNext < ts {
|
||||
tsNext = ts + dedupInterval - 1
|
||||
tsNext -= tsNext % dedupInterval
|
||||
}
|
||||
}
|
||||
dstTimestamps = append(dstTimestamps, srcTimestamps[len(srcTimestamps)-1])
|
||||
dstValues = append(dstValues, srcValues[len(srcValues)-1])
|
||||
return dstTimestamps, dstValues
|
||||
}
|
||||
|
||||
|
@ -40,43 +36,41 @@ func deduplicateSamplesDuringMerge(srcTimestamps, srcValues []int64, dedupInterv
|
|||
// Fast path - nothing to deduplicate
|
||||
return srcTimestamps, srcValues
|
||||
}
|
||||
return deduplicateDuringMergeInternal(srcTimestamps, srcValues, dedupInterval)
|
||||
}
|
||||
|
||||
func deduplicateDuringMergeInternal(srcTimestamps, srcValues []int64, dedupInterval int64) ([]int64, []int64) {
|
||||
tsNext := (srcTimestamps[0] - srcTimestamps[0]%dedupInterval) + dedupInterval
|
||||
dstTimestamps := srcTimestamps[:1]
|
||||
dstValues := srcValues[:1]
|
||||
for i := 1; i < len(srcTimestamps); i++ {
|
||||
ts := srcTimestamps[i]
|
||||
if ts < tsNext {
|
||||
tsNext := srcTimestamps[0] + dedupInterval - 1
|
||||
tsNext -= tsNext % dedupInterval
|
||||
dstTimestamps := srcTimestamps[:0]
|
||||
dstValues := srcValues[:0]
|
||||
for i, ts := range srcTimestamps[1:] {
|
||||
if ts <= tsNext {
|
||||
continue
|
||||
}
|
||||
dstTimestamps = append(dstTimestamps, ts)
|
||||
dstTimestamps = append(dstTimestamps, srcTimestamps[i])
|
||||
dstValues = append(dstValues, srcValues[i])
|
||||
|
||||
// Update tsNext
|
||||
tsNext += dedupInterval
|
||||
if ts >= tsNext {
|
||||
// Slow path for updating ts.
|
||||
tsNext = (ts - ts%dedupInterval) + dedupInterval
|
||||
if tsNext < ts {
|
||||
tsNext = ts + dedupInterval - 1
|
||||
tsNext -= tsNext % dedupInterval
|
||||
}
|
||||
}
|
||||
dstTimestamps = append(dstTimestamps, srcTimestamps[len(srcTimestamps)-1])
|
||||
dstValues = append(dstValues, srcValues[len(srcValues)-1])
|
||||
return dstTimestamps, dstValues
|
||||
}
|
||||
|
||||
func needsDedup(timestamps []int64, dedupInterval int64) bool {
|
||||
if len(timestamps) == 0 || dedupInterval <= 0 {
|
||||
if len(timestamps) < 2 || dedupInterval <= 0 {
|
||||
return false
|
||||
}
|
||||
tsNext := (timestamps[0] - timestamps[0]%dedupInterval) + dedupInterval
|
||||
tsNext := timestamps[0] + dedupInterval - 1
|
||||
tsNext -= tsNext % dedupInterval
|
||||
for _, ts := range timestamps[1:] {
|
||||
if ts < tsNext {
|
||||
if ts <= tsNext {
|
||||
return true
|
||||
}
|
||||
tsNext += dedupInterval
|
||||
if ts >= tsNext {
|
||||
tsNext = (ts - ts%dedupInterval) + dedupInterval
|
||||
if tsNext < ts {
|
||||
tsNext = ts + dedupInterval - 1
|
||||
tsNext -= tsNext % dedupInterval
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
|
|
@ -19,19 +19,26 @@ func TestNeedsDedup(t *testing.T) {
|
|||
f(0, []int64{1, 2}, false)
|
||||
f(10, []int64{1}, false)
|
||||
f(10, []int64{1, 2}, true)
|
||||
f(10, []int64{9, 10}, false)
|
||||
f(10, []int64{9, 10, 19}, true)
|
||||
f(10, []int64{9, 11}, false)
|
||||
f(10, []int64{10, 11}, false)
|
||||
f(10, []int64{0, 10, 11}, false)
|
||||
f(10, []int64{9, 10}, true)
|
||||
f(10, []int64{0, 10, 19}, false)
|
||||
f(10, []int64{9, 19}, false)
|
||||
f(10, []int64{0, 9, 19}, true)
|
||||
f(10, []int64{0, 11, 19}, true)
|
||||
f(10, []int64{0, 11, 20}, true)
|
||||
f(10, []int64{0, 11, 21}, false)
|
||||
f(10, []int64{0, 19}, false)
|
||||
f(10, []int64{0, 35, 40}, false)
|
||||
f(10, []int64{0, 35, 40, 41}, true)
|
||||
f(10, []int64{0, 30, 40}, false)
|
||||
f(10, []int64{0, 31, 40}, true)
|
||||
f(10, []int64{0, 31, 41}, false)
|
||||
f(10, []int64{0, 31, 49}, false)
|
||||
}
|
||||
|
||||
func TestDeduplicateSamples(t *testing.T) {
|
||||
// Disable deduplication before exit, since the rest of tests expect disabled dedup.
|
||||
|
||||
f := func(scrapeInterval time.Duration, timestamps, timestampsExpected []int64) {
|
||||
f := func(scrapeInterval time.Duration, timestamps, timestampsExpected []int64, valuesExpected []float64) {
|
||||
t.Helper()
|
||||
timestampsCopy := make([]int64, len(timestamps))
|
||||
values := make([]float64, len(timestamps))
|
||||
|
@ -42,30 +49,10 @@ func TestDeduplicateSamples(t *testing.T) {
|
|||
dedupInterval := scrapeInterval.Milliseconds()
|
||||
timestampsCopy, values = DeduplicateSamples(timestampsCopy, values, dedupInterval)
|
||||
if !reflect.DeepEqual(timestampsCopy, timestampsExpected) {
|
||||
t.Fatalf("invalid DeduplicateSamples(%v) result;\ngot\n%v\nwant\n%v", timestamps, timestampsCopy, timestampsExpected)
|
||||
t.Fatalf("invalid DeduplicateSamples(%v) timestamps;\ngot\n%v\nwant\n%v", timestamps, timestampsCopy, timestampsExpected)
|
||||
}
|
||||
// Verify values
|
||||
if len(timestampsCopy) == 0 {
|
||||
if len(values) != 0 {
|
||||
t.Fatalf("values must be empty; got %v", values)
|
||||
}
|
||||
return
|
||||
}
|
||||
j := 0
|
||||
for i, ts := range timestamps {
|
||||
if ts != timestampsCopy[j] {
|
||||
continue
|
||||
}
|
||||
if values[j] != float64(i) {
|
||||
t.Fatalf("unexpected value at index %d; got %v; want %v; values: %v", j, values[j], i, values)
|
||||
}
|
||||
j++
|
||||
if j == len(timestampsCopy) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if j != len(timestampsCopy) {
|
||||
t.Fatalf("superfluous timestamps found starting from index %d: %v", j, timestampsCopy[j:])
|
||||
if !reflect.DeepEqual(values, valuesExpected) {
|
||||
t.Fatalf("invalid DeduplicateSamples(%v) values;\ngot\n%v\nwant\n%v", timestamps, values, valuesExpected)
|
||||
}
|
||||
|
||||
// Verify that the second call to DeduplicateSamples doesn't modify samples.
|
||||
|
@ -78,19 +65,19 @@ func TestDeduplicateSamples(t *testing.T) {
|
|||
t.Fatalf("invalid DeduplicateSamples(%v) values for the second call;\ngot\n%v\nwant\n%v", timestamps, values, valuesCopy)
|
||||
}
|
||||
}
|
||||
f(time.Millisecond, nil, []int64{})
|
||||
f(time.Millisecond, []int64{123}, []int64{123})
|
||||
f(time.Millisecond, []int64{123, 456}, []int64{123, 456})
|
||||
f(time.Millisecond, []int64{0, 0, 0, 1, 1, 2, 3, 3, 3, 4}, []int64{0, 1, 2, 3, 4})
|
||||
f(0, []int64{0, 0, 0, 1, 1, 2, 3, 3, 3, 4}, []int64{0, 0, 0, 1, 1, 2, 3, 3, 3, 4})
|
||||
f(100*time.Millisecond, []int64{0, 100, 100, 101, 150, 180, 205, 300, 1000}, []int64{0, 100, 205, 300, 1000})
|
||||
f(10*time.Second, []int64{10e3, 13e3, 21e3, 22e3, 30e3, 33e3, 39e3, 45e3}, []int64{10e3, 21e3, 30e3, 45e3})
|
||||
f(time.Millisecond, nil, []int64{}, []float64{})
|
||||
f(time.Millisecond, []int64{123}, []int64{123}, []float64{0})
|
||||
f(time.Millisecond, []int64{123, 456}, []int64{123, 456}, []float64{0, 1})
|
||||
f(time.Millisecond, []int64{0, 0, 0, 1, 1, 2, 3, 3, 3, 4}, []int64{0, 1, 2, 3, 4}, []float64{2, 4, 5, 8, 9})
|
||||
f(0, []int64{0, 0, 0, 1, 1, 2, 3, 3, 3, 4}, []int64{0, 0, 0, 1, 1, 2, 3, 3, 3, 4}, []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
|
||||
f(100*time.Millisecond, []int64{0, 100, 100, 101, 150, 180, 205, 300, 1000}, []int64{0, 100, 180, 300, 1000}, []float64{0, 2, 5, 7, 8})
|
||||
f(10*time.Second, []int64{10e3, 13e3, 21e3, 22e3, 30e3, 33e3, 39e3, 45e3}, []int64{10e3, 13e3, 30e3, 39e3, 45e3}, []float64{0, 1, 4, 6, 7})
|
||||
}
|
||||
|
||||
func TestDeduplicateSamplesDuringMerge(t *testing.T) {
|
||||
// Disable deduplication before exit, since the rest of tests expect disabled dedup.
|
||||
|
||||
f := func(scrapeInterval time.Duration, timestamps, timestampsExpected []int64) {
|
||||
f := func(scrapeInterval time.Duration, timestamps, timestampsExpected, valuesExpected []int64) {
|
||||
t.Helper()
|
||||
timestampsCopy := make([]int64, len(timestamps))
|
||||
values := make([]int64, len(timestamps))
|
||||
|
@ -101,30 +88,10 @@ func TestDeduplicateSamplesDuringMerge(t *testing.T) {
|
|||
dedupInterval := scrapeInterval.Milliseconds()
|
||||
timestampsCopy, values = deduplicateSamplesDuringMerge(timestampsCopy, values, dedupInterval)
|
||||
if !reflect.DeepEqual(timestampsCopy, timestampsExpected) {
|
||||
t.Fatalf("invalid deduplicateSamplesDuringMerge(%v) result;\ngot\n%v\nwant\n%v", timestamps, timestampsCopy, timestampsExpected)
|
||||
t.Fatalf("invalid deduplicateSamplesDuringMerge(%v) timestamps;\ngot\n%v\nwant\n%v", timestamps, timestampsCopy, timestampsExpected)
|
||||
}
|
||||
// Verify values
|
||||
if len(timestampsCopy) == 0 {
|
||||
if len(values) != 0 {
|
||||
t.Fatalf("values must be empty; got %v", values)
|
||||
}
|
||||
return
|
||||
}
|
||||
j := 0
|
||||
for i, ts := range timestamps {
|
||||
if ts != timestampsCopy[j] {
|
||||
continue
|
||||
}
|
||||
if values[j] != int64(i) {
|
||||
t.Fatalf("unexpected value at index %d; got %v; want %v; values: %v", j, values[j], i, values)
|
||||
}
|
||||
j++
|
||||
if j == len(timestampsCopy) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if j != len(timestampsCopy) {
|
||||
t.Fatalf("superfluous timestamps found starting from index %d: %v", j, timestampsCopy[j:])
|
||||
if !reflect.DeepEqual(values, valuesExpected) {
|
||||
t.Fatalf("invalid DeduplicateSamples(%v) values;\ngot\n%v\nwant\n%v", timestamps, values, valuesExpected)
|
||||
}
|
||||
|
||||
// Verify that the second call to DeduplicateSamples doesn't modify samples.
|
||||
|
@ -137,21 +104,10 @@ func TestDeduplicateSamplesDuringMerge(t *testing.T) {
|
|||
t.Fatalf("invalid deduplicateSamplesDuringMerge(%v) values for the second call;\ngot\n%v\nwant\n%v", timestamps, values, valuesCopy)
|
||||
}
|
||||
}
|
||||
f(time.Millisecond, nil, []int64{})
|
||||
f(time.Millisecond, []int64{123}, []int64{123})
|
||||
f(time.Millisecond, []int64{123, 456}, []int64{123, 456})
|
||||
f(time.Millisecond, []int64{0, 0, 0, 1, 1, 2, 3, 3, 3, 4}, []int64{0, 1, 2, 3, 4})
|
||||
f(100*time.Millisecond, []int64{0, 100, 100, 101, 150, 180, 200, 300, 1000}, []int64{0, 100, 200, 300, 1000})
|
||||
f(10*time.Second, []int64{10e3, 13e3, 21e3, 22e3, 30e3, 33e3, 39e3, 45e3}, []int64{10e3, 21e3, 30e3, 45e3})
|
||||
|
||||
var timestamps, timestampsExpected []int64
|
||||
for i := 0; i < 40; i++ {
|
||||
timestamps = append(timestamps, int64(i*1000))
|
||||
if i%2 == 0 {
|
||||
timestampsExpected = append(timestampsExpected, int64(i*1000))
|
||||
}
|
||||
}
|
||||
f(0, timestamps, timestamps)
|
||||
f(time.Second, timestamps, timestamps)
|
||||
f(2*time.Second, timestamps, timestampsExpected)
|
||||
f(time.Millisecond, nil, []int64{}, []int64{})
|
||||
f(time.Millisecond, []int64{123}, []int64{123}, []int64{0})
|
||||
f(time.Millisecond, []int64{123, 456}, []int64{123, 456}, []int64{0, 1})
|
||||
f(time.Millisecond, []int64{0, 0, 0, 1, 1, 2, 3, 3, 3, 4}, []int64{0, 1, 2, 3, 4}, []int64{2, 4, 5, 8, 9})
|
||||
f(100*time.Millisecond, []int64{0, 100, 100, 101, 150, 180, 200, 300, 1000}, []int64{0, 100, 200, 300, 1000}, []int64{0, 2, 6, 7, 8})
|
||||
f(10*time.Second, []int64{10e3, 13e3, 21e3, 22e3, 30e3, 33e3, 39e3, 45e3}, []int64{10e3, 13e3, 30e3, 39e3, 45e3}, []int64{0, 1, 4, 6, 7})
|
||||
}
|
||||
|
|
|
@ -418,6 +418,39 @@ func (s *Storage) DeleteSnapshot(snapshotName string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeleteStaleSnapshots deletes snapshot older than given maxAge
|
||||
func (s *Storage) DeleteStaleSnapshots(maxAge time.Duration) error {
|
||||
list, err := s.ListSnapshots()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
expireDeadline := time.Now().UTC().Add(-maxAge)
|
||||
for _, snapshotName := range list {
|
||||
t, err := snapshotTime(snapshotName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot parse snapshot date from %q: %w", snapshotName, err)
|
||||
}
|
||||
if t.Before(expireDeadline) {
|
||||
if err := s.DeleteSnapshot(snapshotName); err != nil {
|
||||
return fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func snapshotTime(snapshotName string) (time.Time, error) {
|
||||
if !snapshotNameRegexp.MatchString(snapshotName) {
|
||||
return time.Time{}, fmt.Errorf("unexpected snapshotName must be in the format `YYYYMMDDhhmmss-idx`; got %q", snapshotName)
|
||||
}
|
||||
n := strings.IndexByte(snapshotName, '-')
|
||||
if n < 0 {
|
||||
return time.Time{}, fmt.Errorf("cannot find `-` in snapshotName=%q", snapshotName)
|
||||
}
|
||||
s := snapshotName[:n]
|
||||
return time.Parse("20060102150405", s)
|
||||
}
|
||||
|
||||
var snapshotIdx = uint64(time.Now().UnixNano())
|
||||
|
||||
func nextSnapshotIdx() uint64 {
|
||||
|
|
|
@ -1106,6 +1106,58 @@ func testStorageAddMetrics(s *Storage, workerNum int) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestStorageDeleteStaleSnapshots(t *testing.T) {
|
||||
path := "TestStorageDeleteStaleSnapshots"
|
||||
s, err := OpenStorage(path, 0, 1e5, 1e5)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot open storage: %s", err)
|
||||
}
|
||||
const rowsPerAdd = 1e3
|
||||
const addsCount = 10
|
||||
for i := 0; i < addsCount; i++ {
|
||||
mrs := testGenerateMetricRows(rowsPerAdd, 0, 1e10)
|
||||
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
|
||||
t.Fatalf("unexpected error when adding mrs: %s", err)
|
||||
}
|
||||
}
|
||||
// Try creating a snapshot from the storage.
|
||||
snapshotName, err := s.CreateSnapshot()
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create snapshot from the storage: %s", err)
|
||||
}
|
||||
// Delete snapshots older than 1 month
|
||||
if err := s.DeleteStaleSnapshots(30 * 24 * time.Hour); err != nil {
|
||||
t.Fatalf("error in DeleteStaleSnapshots(1 month): %s", err)
|
||||
}
|
||||
snapshots, err := s.ListSnapshots()
|
||||
if err != nil {
|
||||
t.Fatalf("cannot list snapshots: %s", err)
|
||||
}
|
||||
if len(snapshots) != 1 {
|
||||
t.Fatalf("expecting one snapshot; got %q", snapshots)
|
||||
}
|
||||
if snapshots[0] != snapshotName {
|
||||
t.Fatalf("snapshot %q is missing in %q", snapshotName, snapshots)
|
||||
}
|
||||
|
||||
// Delete the snapshot which is older than 1 nanoseconds
|
||||
time.Sleep(2 * time.Nanosecond)
|
||||
if err := s.DeleteStaleSnapshots(time.Nanosecond); err != nil {
|
||||
t.Fatalf("cannot delete snapshot %q: %s", snapshotName, err)
|
||||
}
|
||||
snapshots, err = s.ListSnapshots()
|
||||
if err != nil {
|
||||
t.Fatalf("cannot list snapshots: %s", err)
|
||||
}
|
||||
if len(snapshots) != 0 {
|
||||
t.Fatalf("expecting zero snapshots; got %q", snapshots)
|
||||
}
|
||||
s.MustClose()
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
t.Fatalf("cannot remove %q: %s", path, err)
|
||||
}
|
||||
}
|
||||
|
||||
func containsString(a []string, s string) bool {
|
||||
for i := range a {
|
||||
if a[i] == s {
|
||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
|||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.44.0"
|
||||
const SDKVersion = "1.44.4"
|
||||
|
|
7
vendor/github.com/cheggaaa/pb/v3/pb.go
generated
vendored
7
vendor/github.com/cheggaaa/pb/v3/pb.go
generated
vendored
|
@ -408,6 +408,13 @@ func (pb *ProgressBar) IsStarted() bool {
|
|||
return pb.finish != nil
|
||||
}
|
||||
|
||||
// IsFinished indicates progress bar is finished
|
||||
func (pb *ProgressBar) IsFinished() bool {
|
||||
pb.mu.RLock()
|
||||
defer pb.mu.RUnlock()
|
||||
return pb.finished
|
||||
}
|
||||
|
||||
// SetTemplateString sets ProgressBar tempate string and parse it
|
||||
func (pb *ProgressBar) SetTemplateString(tmpl string) *ProgressBar {
|
||||
pb.mu.Lock()
|
||||
|
|
105
vendor/github.com/cheggaaa/pb/v3/pool.go
generated
vendored
Normal file
105
vendor/github.com/cheggaaa/pb/v3/pool.go
generated
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
// +build linux darwin freebsd netbsd openbsd solaris dragonfly windows plan9 aix
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cheggaaa/pb/v3/termutil"
|
||||
)
|
||||
|
||||
// Create and start new pool with given bars
|
||||
// You need call pool.Stop() after work
|
||||
func StartPool(pbs ...*ProgressBar) (pool *Pool, err error) {
|
||||
pool = new(Pool)
|
||||
if err = pool.Start(); err != nil {
|
||||
return
|
||||
}
|
||||
pool.Add(pbs...)
|
||||
return
|
||||
}
|
||||
|
||||
// NewPool initialises a pool with progress bars, but
|
||||
// doesn't start it. You need to call Start manually
|
||||
func NewPool(pbs ...*ProgressBar) (pool *Pool) {
|
||||
pool = new(Pool)
|
||||
pool.Add(pbs...)
|
||||
return
|
||||
}
|
||||
|
||||
type Pool struct {
|
||||
Output io.Writer
|
||||
RefreshRate time.Duration
|
||||
bars []*ProgressBar
|
||||
lastBarsCount int
|
||||
shutdownCh chan struct{}
|
||||
workerCh chan struct{}
|
||||
m sync.Mutex
|
||||
finishOnce sync.Once
|
||||
}
|
||||
|
||||
// Add progress bars.
|
||||
func (p *Pool) Add(pbs ...*ProgressBar) {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
for _, bar := range pbs {
|
||||
bar.Set(Static, true)
|
||||
bar.Start()
|
||||
p.bars = append(p.bars, bar)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Pool) Start() (err error) {
|
||||
p.RefreshRate = defaultRefreshRate
|
||||
p.shutdownCh, err = termutil.RawModeOn()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
p.workerCh = make(chan struct{})
|
||||
go p.writer()
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Pool) writer() {
|
||||
var first = true
|
||||
defer func() {
|
||||
if first == false {
|
||||
p.print(false)
|
||||
} else {
|
||||
p.print(true)
|
||||
p.print(false)
|
||||
}
|
||||
close(p.workerCh)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-time.After(p.RefreshRate):
|
||||
if p.print(first) {
|
||||
p.print(false)
|
||||
return
|
||||
}
|
||||
first = false
|
||||
case <-p.shutdownCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Restore terminal state and close pool
|
||||
func (p *Pool) Stop() error {
|
||||
p.finishOnce.Do(func() {
|
||||
if p.shutdownCh != nil {
|
||||
close(p.shutdownCh)
|
||||
}
|
||||
})
|
||||
|
||||
// Wait for the worker to complete
|
||||
select {
|
||||
case <-p.workerCh:
|
||||
}
|
||||
|
||||
return termutil.RawModeOff()
|
||||
}
|
46
vendor/github.com/cheggaaa/pb/v3/pool_win.go
generated
vendored
Normal file
46
vendor/github.com/cheggaaa/pb/v3/pool_win.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// +build windows
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/cheggaaa/pb/v3/termutil"
|
||||
)
|
||||
|
||||
func (p *Pool) print(first bool) bool {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
var out string
|
||||
if !first {
|
||||
coords, err := termutil.GetCursorPos()
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
coords.Y -= int16(p.lastBarsCount)
|
||||
if coords.Y < 0 {
|
||||
coords.Y = 0
|
||||
}
|
||||
coords.X = 0
|
||||
|
||||
err = termutil.SetCursorPos(coords)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
}
|
||||
}
|
||||
isFinished := true
|
||||
for _, bar := range p.bars {
|
||||
if !bar.IsFinished() {
|
||||
isFinished = false
|
||||
}
|
||||
out += fmt.Sprintf("\r%s\n", bar.String())
|
||||
}
|
||||
if p.Output != nil {
|
||||
fmt.Fprint(p.Output, out)
|
||||
} else {
|
||||
fmt.Print(out)
|
||||
}
|
||||
p.lastBarsCount = len(p.bars)
|
||||
return isFinished
|
||||
}
|
43
vendor/github.com/cheggaaa/pb/v3/pool_x.go
generated
vendored
Normal file
43
vendor/github.com/cheggaaa/pb/v3/pool_x.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
// +build linux darwin freebsd netbsd openbsd solaris dragonfly plan9 aix
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/cheggaaa/pb/v3/termutil"
|
||||
)
|
||||
|
||||
func (p *Pool) print(first bool) bool {
|
||||
p.m.Lock()
|
||||
defer p.m.Unlock()
|
||||
var out string
|
||||
if !first {
|
||||
out = fmt.Sprintf("\033[%dA", p.lastBarsCount)
|
||||
}
|
||||
isFinished := true
|
||||
bars := p.bars
|
||||
rows, cols, err := termutil.TerminalSize()
|
||||
if err != nil {
|
||||
cols = defaultBarWidth
|
||||
}
|
||||
if rows > 0 && len(bars) > rows {
|
||||
// we need to hide bars that overflow terminal height
|
||||
bars = bars[len(bars)-rows:]
|
||||
}
|
||||
for _, bar := range bars {
|
||||
if !bar.IsFinished() {
|
||||
isFinished = false
|
||||
}
|
||||
bar.SetWidth(cols)
|
||||
out += fmt.Sprintf("\r%s\n", bar.String())
|
||||
}
|
||||
if p.Output != nil {
|
||||
fmt.Fprint(p.Output, out)
|
||||
} else {
|
||||
fmt.Fprint(os.Stderr, out)
|
||||
}
|
||||
p.lastBarsCount = len(bars)
|
||||
return isFinished
|
||||
}
|
16
vendor/github.com/cheggaaa/pb/v3/termutil/term.go
generated
vendored
16
vendor/github.com/cheggaaa/pb/v3/termutil/term.go
generated
vendored
|
@ -10,6 +10,16 @@ import (
|
|||
var echoLocked bool
|
||||
var echoLockMutex sync.Mutex
|
||||
var errLocked = errors.New("terminal locked")
|
||||
var autoTerminate = true
|
||||
|
||||
// AutoTerminate enables or disables automatic terminate signal catching.
|
||||
// It's needed to restore the terminal state after the pool was used.
|
||||
// By default, it's enabled.
|
||||
func AutoTerminate(enable bool) {
|
||||
echoLockMutex.Lock()
|
||||
defer echoLockMutex.Unlock()
|
||||
autoTerminate = enable
|
||||
}
|
||||
|
||||
// RawModeOn switches terminal to raw mode
|
||||
func RawModeOn() (quit chan struct{}, err error) {
|
||||
|
@ -45,8 +55,10 @@ func RawModeOff() (err error) {
|
|||
// listen exit signals and restore terminal state
|
||||
func catchTerminate(quit chan struct{}) {
|
||||
sig := make(chan os.Signal, 1)
|
||||
signal.Notify(sig, unlockSignals...)
|
||||
defer signal.Stop(sig)
|
||||
if autoTerminate {
|
||||
signal.Notify(sig, unlockSignals...)
|
||||
defer signal.Stop(sig)
|
||||
}
|
||||
select {
|
||||
case <-quit:
|
||||
RawModeOff()
|
||||
|
|
4
vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go
generated
vendored
4
vendor/github.com/cheggaaa/pb/v3/termutil/term_win.go
generated
vendored
|
@ -111,7 +111,7 @@ func termWidthTPut() (width int, err error) {
|
|||
return strconv.Atoi(string(res))
|
||||
}
|
||||
|
||||
func getCursorPos() (pos coordinates, err error) {
|
||||
func GetCursorPos() (pos coordinates, err error) {
|
||||
var info consoleScreenBufferInfo
|
||||
_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(syscall.Stdout), uintptr(unsafe.Pointer(&info)), 0)
|
||||
if e != 0 {
|
||||
|
@ -120,7 +120,7 @@ func getCursorPos() (pos coordinates, err error) {
|
|||
return info.dwCursorPosition, nil
|
||||
}
|
||||
|
||||
func setCursorPos(pos coordinates) error {
|
||||
func SetCursorPos(pos coordinates) error {
|
||||
_, _, e := syscall.Syscall(setConsoleCursorPosition.Addr(), 2, uintptr(syscall.Stdout), uintptr(uint32(uint16(pos.Y))<<16|uint32(uint16(pos.X))), 0)
|
||||
if e != 0 {
|
||||
return error(e)
|
||||
|
|
10
vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go
generated
vendored
10
vendor/github.com/cheggaaa/pb/v3/termutil/term_x.go
generated
vendored
|
@ -35,6 +35,12 @@ func init() {
|
|||
|
||||
// TerminalWidth returns width of the terminal.
|
||||
func TerminalWidth() (int, error) {
|
||||
_, c, err := TerminalSize()
|
||||
return c, err
|
||||
}
|
||||
|
||||
// TerminalSize returns size of the terminal.
|
||||
func TerminalSize() (rows, cols int, err error) {
|
||||
w := new(window)
|
||||
res, _, err := syscall.Syscall(sysIoctl,
|
||||
tty.Fd(),
|
||||
|
@ -42,9 +48,9 @@ func TerminalWidth() (int, error) {
|
|||
uintptr(unsafe.Pointer(w)),
|
||||
)
|
||||
if int(res) == -1 {
|
||||
return 0, err
|
||||
return 0, 0, err
|
||||
}
|
||||
return int(w.Col), nil
|
||||
return int(w.Row), int(w.Col), nil
|
||||
}
|
||||
|
||||
var oldState syscall.Termios
|
||||
|
|
2
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
2
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
|
@ -40,6 +40,8 @@ import (
|
|||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// TODO(≥go1.18): Use any instead of interface{}.
|
||||
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
|
|
5
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
5
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
|
@ -116,7 +116,10 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out
|
|||
}
|
||||
|
||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||
if v.MaxDepth == 0 {
|
||||
// As a special case, treat equal []byte as a leaf nodes.
|
||||
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0))
|
||||
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
|
||||
if v.MaxDepth == 0 || isEqualBytes {
|
||||
switch opts.DiffMode {
|
||||
case diffUnknown, diffIdentical:
|
||||
// Format Equal.
|
||||
|
|
10
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
|
@ -211,7 +211,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
|||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||
out = opts.formatString("", string(b))
|
||||
skipType = true
|
||||
return opts.WithTypeMode(emitType).FormatType(t, out)
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,12 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
|||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
// Skip the name only if this is an unnamed pointer type.
|
||||
// Otherwise taking the address of a value does not reproduce
|
||||
// the named pointer type.
|
||||
if v.Type().Name() == "" {
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
}
|
||||
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
|
@ -293,7 +298,6 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
|||
}
|
||||
// Interfaces accept different concrete types,
|
||||
// so configure the underlying value to explicitly print the type.
|
||||
skipType = true // Print the concrete type instead
|
||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
||||
|
|
7
vendor/github.com/klauspost/compress/.gitignore
generated
vendored
7
vendor/github.com/klauspost/compress/.gitignore
generated
vendored
|
@ -23,3 +23,10 @@ _testmain.go
|
|||
*.test
|
||||
*.prof
|
||||
/s2/cmd/_s2sx/sfx-exe
|
||||
|
||||
# Linux perf files
|
||||
perf.data
|
||||
perf.data.old
|
||||
|
||||
# gdb history
|
||||
.gdb_history
|
||||
|
|
7
vendor/github.com/klauspost/compress/README.md
generated
vendored
7
vendor/github.com/klauspost/compress/README.md
generated
vendored
|
@ -17,6 +17,13 @@ This package provides various compression algorithms.
|
|||
|
||||
# changelog
|
||||
|
||||
* Mar 11, 2022 (v1.15.1)
|
||||
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
|
||||
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)
|
||||
* zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520)
|
||||
* zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521)
|
||||
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
|
||||
|
||||
* Mar 3, 2022 (v1.15.0)
|
||||
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
|
||||
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
|
||||
|
|
4
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
4
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
|
@ -12,14 +12,14 @@ import (
|
|||
|
||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress4X when tablelog > 8.
|
||||
// go:noescape
|
||||
//go:noescape
|
||||
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
|
||||
|
||||
// decompress4x_8b_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress4X when tablelog <= 8 which decodes 4 entries
|
||||
// per loop.
|
||||
// go:noescape
|
||||
//go:noescape
|
||||
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
|
||||
|
||||
|
|
34
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
generated
vendored
Normal file
34
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
// Package cpuinfo gives runtime info about the current CPU.
|
||||
//
|
||||
// This is a very limited module meant for use internally
|
||||
// in this project. For more versatile solution check
|
||||
// https://github.com/klauspost/cpuid.
|
||||
package cpuinfo
|
||||
|
||||
// HasBMI1 checks whether an x86 CPU supports the BMI1 extension.
|
||||
func HasBMI1() bool {
|
||||
return hasBMI1
|
||||
}
|
||||
|
||||
// HasBMI2 checks whether an x86 CPU supports the BMI2 extension.
|
||||
func HasBMI2() bool {
|
||||
return hasBMI2
|
||||
}
|
||||
|
||||
// DisableBMI2 will disable BMI2, for testing purposes.
|
||||
// Call returned function to restore previous state.
|
||||
func DisableBMI2() func() {
|
||||
old := hasBMI2
|
||||
hasBMI2 = false
|
||||
return func() {
|
||||
hasBMI2 = old
|
||||
}
|
||||
}
|
||||
|
||||
// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions.
|
||||
func HasBMI() bool {
|
||||
return HasBMI1() && HasBMI2()
|
||||
}
|
||||
|
||||
var hasBMI1 bool
|
||||
var hasBMI2 bool
|
11
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
generated
vendored
Normal file
11
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
//go:build amd64 && !appengine && !noasm && gc
|
||||
// +build amd64,!appengine,!noasm,gc
|
||||
|
||||
package cpuinfo
|
||||
|
||||
// go:noescape
|
||||
func x86extensions() (bmi1, bmi2 bool)
|
||||
|
||||
func init() {
|
||||
hasBMI1, hasBMI2 = x86extensions()
|
||||
}
|
36
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
generated
vendored
Normal file
36
vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
#include "funcdata.h"
|
||||
#include "go_asm.h"
|
||||
|
||||
TEXT ·x86extensions(SB), NOSPLIT, $0
|
||||
// 1. determine max EAX value
|
||||
XORQ AX, AX
|
||||
CPUID
|
||||
|
||||
CMPQ AX, $7
|
||||
JB unsupported
|
||||
|
||||
// 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction"
|
||||
MOVQ $7, AX
|
||||
MOVQ $0, CX
|
||||
CPUID
|
||||
|
||||
BTQ $3, BX // bit 3 = BMI1
|
||||
SETCS AL
|
||||
|
||||
BTQ $8, BX // bit 8 = BMI2
|
||||
SETCS AH
|
||||
|
||||
MOVB AL, bmi1+0(FP)
|
||||
MOVB AH, bmi2+1(FP)
|
||||
RET
|
||||
|
||||
unsupported:
|
||||
XORQ AX, AX
|
||||
MOVB AL, bmi1+0(FP)
|
||||
MOVB AL, bmi2+1(FP)
|
||||
RET
|
54
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
54
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
|
@ -386,47 +386,31 @@ In practice this means that concurrency is often limited to utilizing about 3 co
|
|||
|
||||
### Benchmarks
|
||||
|
||||
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
|
||||
|
||||
The first two are streaming decodes and the last are smaller inputs.
|
||||
|
||||
|
||||
Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used.
|
||||
|
||||
```
|
||||
BenchmarkDecoderSilesia-8 3 385000067 ns/op 550.51 MB/s 5498 B/op 8 allocs/op
|
||||
BenchmarkDecoderSilesiaCgo-8 6 197666567 ns/op 1072.25 MB/s 270672 B/op 8 allocs/op
|
||||
BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op
|
||||
BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op
|
||||
|
||||
BenchmarkDecoderEnwik9-8 1 2027001600 ns/op 493.34 MB/s 10496 B/op 18 allocs/op
|
||||
BenchmarkDecoderEnwik9Cgo-8 2 979499200 ns/op 1020.93 MB/s 270672 B/op 8 allocs/op
|
||||
Concurrent blocks, performance:
|
||||
|
||||
Concurrent performance:
|
||||
|
||||
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-16 28915 42469 ns/op 4340.07 MB/s 114 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-16 116505 9965 ns/op 11900.16 MB/s 16 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-16 8952 134272 ns/op 3588.70 MB/s 915 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-16 11820 102538 ns/op 4161.90 MB/s 594 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-16 34782 34184 ns/op 3661.88 MB/s 60 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-16 27712 43447 ns/op 3500.58 MB/s 99 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-16 62826 18750 ns/op 21845.10 MB/s 104 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-16 631545 1794 ns/op 57078.74 MB/s 2 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-16 1690140 712 ns/op 172938.13 MB/s 1 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-16 10432 113593 ns/op 6180.73 MB/s 1143 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/html.zst-16 113206 10671 ns/op 9596.27 MB/s 15 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-16 1530615 779 ns/op 5229.49 MB/s 0 B/op 0 allocs/op
|
||||
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/kppkn.gtb.zst-16 65217 16192 ns/op 11383.34 MB/s 46 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/geo.protodata.zst-16 292671 4039 ns/op 29363.19 MB/s 6 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/plrabn12.txt.zst-16 26314 46021 ns/op 10470.43 MB/s 293 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/lcet10.txt.zst-16 33897 34900 ns/op 12227.96 MB/s 205 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/asyoulik.txt.zst-16 104348 11433 ns/op 10949.01 MB/s 20 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/alice29.txt.zst-16 75949 15510 ns/op 9805.60 MB/s 32 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/html_x_4.zst-16 173910 6756 ns/op 60624.29 MB/s 37 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/paper-100k.pdf.zst-16 923076 1339 ns/op 76474.87 MB/s 1 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/fireworks.jpeg.zst-16 922920 1351 ns/op 91102.57 MB/s 2 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/urls.10K.zst-16 27649 43618 ns/op 16096.19 MB/s 407 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/html.zst-16 279073 4160 ns/op 24614.18 MB/s 6 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallelCgo/comp-data.bin.zst-16 749938 1579 ns/op 2581.71 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op
|
||||
BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op
|
||||
```
|
||||
|
||||
This reflects the performance around May 2020, but this may be out of date.
|
||||
This reflects the performance around May 2022, but this may be out of date.
|
||||
|
||||
## Zstd inside ZIP files
|
||||
|
||||
|
|
42
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
42
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
|
@ -5,9 +5,14 @@
|
|||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/huff0"
|
||||
|
@ -38,6 +43,9 @@ const (
|
|||
// maxCompressedBlockSize is the biggest allowed compressed block size (128KB)
|
||||
maxCompressedBlockSize = 128 << 10
|
||||
|
||||
compressedBlockOverAlloc = 16
|
||||
maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc
|
||||
|
||||
// Maximum possible block size (all Raw+Uncompressed).
|
||||
maxBlockSize = (1 << 21) - 1
|
||||
|
||||
|
@ -136,7 +144,7 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||
b.Type = blockType((bh >> 1) & 3)
|
||||
// find size.
|
||||
cSize := int(bh >> 3)
|
||||
maxSize := maxBlockSize
|
||||
maxSize := maxCompressedBlockSizeAlloc
|
||||
switch b.Type {
|
||||
case blockTypeReserved:
|
||||
return ErrReservedBlockType
|
||||
|
@ -157,9 +165,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||
println("Data size on stream:", cSize)
|
||||
}
|
||||
b.RLESize = 0
|
||||
maxSize = maxCompressedBlockSize
|
||||
maxSize = maxCompressedBlockSizeAlloc
|
||||
if windowSize < maxCompressedBlockSize && b.lowMem {
|
||||
maxSize = int(windowSize)
|
||||
maxSize = int(windowSize) + compressedBlockOverAlloc
|
||||
}
|
||||
if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize {
|
||||
if debugDecoder {
|
||||
|
@ -190,9 +198,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||
// Read block data.
|
||||
if cap(b.dataStorage) < cSize {
|
||||
if b.lowMem || cSize > maxCompressedBlockSize {
|
||||
b.dataStorage = make([]byte, 0, cSize)
|
||||
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
|
||||
} else {
|
||||
b.dataStorage = make([]byte, 0, maxCompressedBlockSize)
|
||||
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
|
||||
}
|
||||
}
|
||||
if cap(b.dst) <= maxSize {
|
||||
|
@ -486,10 +494,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
|||
b.dst = append(b.dst, hist.decoders.literals...)
|
||||
return nil
|
||||
}
|
||||
err = hist.decoders.decodeSync(hist)
|
||||
before := len(hist.decoders.out)
|
||||
err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hist.decoders.maxSyncLen > 0 {
|
||||
hist.decoders.maxSyncLen += uint64(before)
|
||||
hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out))
|
||||
}
|
||||
b.dst = hist.decoders.out
|
||||
hist.recentOffsets = hist.decoders.prevOffset
|
||||
return nil
|
||||
|
@ -632,6 +645,22 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
|||
println("initializing sequences:", err)
|
||||
return err
|
||||
}
|
||||
// Extract blocks...
|
||||
if false && hist.dict == nil {
|
||||
fatalErr := func(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
|
||||
var buf bytes.Buffer
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
|
||||
buf.Write(in)
|
||||
ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -650,6 +679,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
|
|||
}
|
||||
hist.decoders.windowSize = hist.windowSize
|
||||
hist.decoders.prevOffset = hist.recentOffsets
|
||||
|
||||
err := hist.decoders.decode(b.sequence)
|
||||
hist.recentOffsets = hist.decoders.prevOffset
|
||||
return err
|
||||
|
|
14
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
14
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
|
@ -347,18 +347,20 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||
}
|
||||
frame.history.setDict(&dict)
|
||||
}
|
||||
|
||||
if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
if frame.WindowSize > d.o.maxWindowSize {
|
||||
return dst, ErrWindowSizeExceeded
|
||||
}
|
||||
if frame.FrameContentSize < 1<<30 {
|
||||
// Never preallocate more than 1 GB up front.
|
||||
if frame.FrameContentSize != fcsUnknown {
|
||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
||||
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
|
||||
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc)
|
||||
copy(dst2, dst)
|
||||
dst = dst2
|
||||
}
|
||||
}
|
||||
|
||||
if cap(dst) == 0 {
|
||||
// Allocate len(input) * 2 by default if nothing is provided
|
||||
// and we didn't get frame content size.
|
||||
|
|
4
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
|
@ -31,7 +31,7 @@ func (o *decoderOptions) setDefault() {
|
|||
if o.concurrent > 4 {
|
||||
o.concurrent = 4
|
||||
}
|
||||
o.maxDecodedSize = 1 << 63
|
||||
o.maxDecodedSize = 64 << 30
|
||||
}
|
||||
|
||||
// WithDecoderLowmem will set whether to use a lower amount of memory,
|
||||
|
@ -66,7 +66,7 @@ func WithDecoderConcurrency(n int) DOption {
|
|||
// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory
|
||||
// non-streaming operations or maximum window size for streaming operations.
|
||||
// This can be used to control memory usage of potentially hostile content.
|
||||
// Maximum and default is 1 << 63 bytes.
|
||||
// Maximum is 1 << 63 bytes. Default is 64GiB.
|
||||
func WithDecoderMaxMemory(n uint64) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
if n == 0 {
|
||||
|
|
13
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
13
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
|
@ -326,6 +326,19 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
|||
d.history.ignoreBuffer = len(dst)
|
||||
// Store input length, so we only check new data.
|
||||
crcStart := len(dst)
|
||||
d.history.decoders.maxSyncLen = 0
|
||||
if d.FrameContentSize != fcsUnknown {
|
||||
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
|
||||
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
|
||||
// Alloc for output
|
||||
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
|
||||
copy(dst2, dst)
|
||||
dst = dst2
|
||||
}
|
||||
}
|
||||
var err error
|
||||
for {
|
||||
err = dec.reset(d.rawInput, d.WindowSize)
|
||||
|
|
25
vendor/github.com/klauspost/compress/zstd/fse_decoder.go
generated
vendored
25
vendor/github.com/klauspost/compress/zstd/fse_decoder.go
generated
vendored
|
@ -5,8 +5,10 @@
|
|||
package zstd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -182,6 +184,29 @@ func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error {
|
|||
return s.buildDtable()
|
||||
}
|
||||
|
||||
func (s *fseDecoder) mustReadFrom(r io.Reader) {
|
||||
fatalErr := func(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
// dt [maxTablesize]decSymbol // Decompression table.
|
||||
// symbolLen uint16 // Length of active part of the symbol table.
|
||||
// actualTableLog uint8 // Selected tablelog.
|
||||
// maxBits uint8 // Maximum number of additional bits
|
||||
// // used for table creation to avoid allocations.
|
||||
// stateTable [256]uint16
|
||||
// norm [maxSymbolValue + 1]int16
|
||||
// preDefined bool
|
||||
fatalErr(binary.Read(r, binary.LittleEndian, &s.dt))
|
||||
fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen))
|
||||
fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog))
|
||||
fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits))
|
||||
fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable))
|
||||
fatalErr(binary.Read(r, binary.LittleEndian, &s.norm))
|
||||
fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined))
|
||||
}
|
||||
|
||||
// decSymbol contains information about a state entry,
|
||||
// Including the state offset base, the output symbol and
|
||||
// the number of bits to read for the low part of the destination state.
|
||||
|
|
168
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
168
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
|
@ -73,6 +73,7 @@ type sequenceDecs struct {
|
|||
seqSize int
|
||||
windowSize int
|
||||
maxBits uint8
|
||||
maxSyncLen uint64
|
||||
}
|
||||
|
||||
// initialize all 3 decoders from the stream input.
|
||||
|
@ -98,153 +99,13 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
// decode sequences from the stream with the provided history.
|
||||
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
br := s.br
|
||||
|
||||
// Grab full sizes tables, to avoid bounds checks.
|
||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||
s.seqSize = 0
|
||||
litRemain := len(s.literals)
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
maxBlockSize = s.windowSize
|
||||
}
|
||||
for i := range seqs {
|
||||
var ll, mo, ml int
|
||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
// Final will not read from stream.
|
||||
var llB, mlB, moB uint8
|
||||
ll, llB = llState.final()
|
||||
ml, mlB = mlState.final()
|
||||
mo, moB = ofState.final()
|
||||
|
||||
// extra bits are stored in reverse order.
|
||||
br.fillFast()
|
||||
mo += br.getBits(moB)
|
||||
if s.maxBits > 32 {
|
||||
br.fillFast()
|
||||
}
|
||||
ml += br.getBits(mlB)
|
||||
ll += br.getBits(llB)
|
||||
|
||||
if moB > 1 {
|
||||
s.prevOffset[2] = s.prevOffset[1]
|
||||
s.prevOffset[1] = s.prevOffset[0]
|
||||
s.prevOffset[0] = mo
|
||||
} else {
|
||||
// mo = s.adjustOffset(mo, ll, moB)
|
||||
// Inlined for rather big speedup
|
||||
if ll == 0 {
|
||||
// There is an exception though, when current sequence's literals_length = 0.
|
||||
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
|
||||
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
|
||||
mo++
|
||||
}
|
||||
|
||||
if mo == 0 {
|
||||
mo = s.prevOffset[0]
|
||||
} else {
|
||||
var temp int
|
||||
if mo == 3 {
|
||||
temp = s.prevOffset[0] - 1
|
||||
} else {
|
||||
temp = s.prevOffset[mo]
|
||||
}
|
||||
|
||||
if temp == 0 {
|
||||
// 0 is not valid; input is corrupted; force offset to 1
|
||||
println("WARNING: temp was 0")
|
||||
temp = 1
|
||||
}
|
||||
|
||||
if mo != 1 {
|
||||
s.prevOffset[2] = s.prevOffset[1]
|
||||
}
|
||||
s.prevOffset[1] = s.prevOffset[0]
|
||||
s.prevOffset[0] = temp
|
||||
mo = temp
|
||||
}
|
||||
}
|
||||
br.fillFast()
|
||||
} else {
|
||||
if br.overread() {
|
||||
if debugDecoder {
|
||||
printf("reading sequence %d, exceeded available data\n", i)
|
||||
}
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
ll, mo, ml = s.next(br, llState, mlState, ofState)
|
||||
br.fill()
|
||||
}
|
||||
|
||||
if debugSequences {
|
||||
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
|
||||
}
|
||||
// Evaluate.
|
||||
// We might be doing this async, so do it early.
|
||||
if mo == 0 && ml > 0 {
|
||||
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||
}
|
||||
if ml > maxMatchLen {
|
||||
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||
}
|
||||
s.seqSize += ll + ml
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
}
|
||||
litRemain -= ll
|
||||
if litRemain < 0 {
|
||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
|
||||
}
|
||||
seqs[i] = seqVals{
|
||||
ll: ll,
|
||||
ml: ml,
|
||||
mo: mo,
|
||||
}
|
||||
if i == len(seqs)-1 {
|
||||
// This is the last sequence, so we shouldn't update state.
|
||||
break
|
||||
}
|
||||
|
||||
// Manually inlined, ~ 5-20% faster
|
||||
// Update all 3 states at once. Approx 20% faster.
|
||||
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
|
||||
if nBits == 0 {
|
||||
llState = llTable[llState.newState()&maxTableMask]
|
||||
mlState = mlTable[mlState.newState()&maxTableMask]
|
||||
ofState = ofTable[ofState.newState()&maxTableMask]
|
||||
} else {
|
||||
bits := br.get32BitsFast(nBits)
|
||||
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
||||
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
||||
|
||||
lowBits = uint16(bits >> (ofState.nbBits() & 31))
|
||||
lowBits &= bitMask[mlState.nbBits()&15]
|
||||
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
|
||||
|
||||
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
|
||||
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
|
||||
}
|
||||
}
|
||||
s.seqSize += litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// execute will execute the decoded sequence with the provided history.
|
||||
// The sequence must be evaluated before being sent.
|
||||
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
||||
if len(s.dict) == 0 {
|
||||
return s.executeSimple(seqs, hist)
|
||||
}
|
||||
|
||||
// Ensure we have enough output size...
|
||||
if len(s.out)+s.seqSize > cap(s.out) {
|
||||
addBytes := s.seqSize + len(s.out)
|
||||
|
@ -341,14 +202,19 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
|||
}
|
||||
|
||||
// decode sequences from the stream with the provided history.
|
||||
func (s *sequenceDecs) decodeSync(history *history) error {
|
||||
func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||
if true {
|
||||
supported, err := s.decodeSyncSimple(hist)
|
||||
if supported {
|
||||
return err
|
||||
}
|
||||
}
|
||||
br := s.br
|
||||
seqs := s.nSeqs
|
||||
startSize := len(s.out)
|
||||
// Grab full sizes tables, to avoid bounds checks.
|
||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||
hist := history.b[history.ignoreBuffer:]
|
||||
out := s.out
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
|
@ -433,7 +299,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
|||
}
|
||||
size := ll + ml + len(out)
|
||||
if size-startSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
||||
}
|
||||
if size > cap(out) {
|
||||
// Not enough size, which can happen under high volume block streaming conditions
|
||||
|
@ -463,13 +329,13 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
|||
|
||||
if mo > len(out)+len(hist) || mo > s.windowSize {
|
||||
if len(s.dict) == 0 {
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
|
||||
}
|
||||
|
||||
// we may be in dictionary.
|
||||
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
|
||||
if dictO < 0 || dictO >= len(s.dict) {
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize)
|
||||
}
|
||||
end := dictO + ml
|
||||
if end > len(s.dict) {
|
||||
|
@ -543,8 +409,8 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
|||
}
|
||||
|
||||
// Check if space for literals
|
||||
if len(s.literals)+len(s.out)-startSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
|
||||
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
|
||||
}
|
||||
|
||||
// Add final literals
|
||||
|
|
350
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
Normal file
350
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,350 @@
|
|||
//go:build amd64 && !appengine && !noasm && gc
|
||||
// +build amd64,!appengine,!noasm,gc
|
||||
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/klauspost/compress/internal/cpuinfo"
|
||||
)
|
||||
|
||||
type decodeSyncAsmContext struct {
|
||||
llTable []decSymbol
|
||||
mlTable []decSymbol
|
||||
ofTable []decSymbol
|
||||
llState uint64
|
||||
mlState uint64
|
||||
ofState uint64
|
||||
iteration int
|
||||
litRemain int
|
||||
out []byte
|
||||
outPosition int
|
||||
literals []byte
|
||||
litPosition int
|
||||
history []byte
|
||||
windowSize int
|
||||
ll int // set on error (not for all errors, please refer to _generate/gen.go)
|
||||
ml int // set on error (not for all errors, please refer to _generate/gen.go)
|
||||
mo int // set on error (not for all errors, please refer to _generate/gen.go)
|
||||
}
|
||||
|
||||
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// decode sequences from the stream with the provided history but without a dictionary.
|
||||
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||
if len(s.dict) > 0 {
|
||||
return false, nil
|
||||
}
|
||||
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
|
||||
return false, nil
|
||||
}
|
||||
useSafe := false
|
||||
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
|
||||
useSafe = true
|
||||
}
|
||||
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
|
||||
useSafe = true
|
||||
}
|
||||
br := s.br
|
||||
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
maxBlockSize = s.windowSize
|
||||
}
|
||||
|
||||
ctx := decodeSyncAsmContext{
|
||||
llTable: s.litLengths.fse.dt[:maxTablesize],
|
||||
mlTable: s.matchLengths.fse.dt[:maxTablesize],
|
||||
ofTable: s.offsets.fse.dt[:maxTablesize],
|
||||
llState: uint64(s.litLengths.state.state),
|
||||
mlState: uint64(s.matchLengths.state.state),
|
||||
ofState: uint64(s.offsets.state.state),
|
||||
iteration: s.nSeqs - 1,
|
||||
litRemain: len(s.literals),
|
||||
out: s.out,
|
||||
outPosition: len(s.out),
|
||||
literals: s.literals,
|
||||
windowSize: s.windowSize,
|
||||
history: hist,
|
||||
}
|
||||
|
||||
s.seqSize = 0
|
||||
startSize := len(s.out)
|
||||
|
||||
var errCode int
|
||||
if cpuinfo.HasBMI2() {
|
||||
if useSafe {
|
||||
errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx)
|
||||
} else {
|
||||
errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx)
|
||||
}
|
||||
} else {
|
||||
if useSafe {
|
||||
errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx)
|
||||
} else {
|
||||
errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx)
|
||||
}
|
||||
}
|
||||
switch errCode {
|
||||
case noError:
|
||||
break
|
||||
|
||||
case errorMatchLenOfsMismatch:
|
||||
return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml)
|
||||
|
||||
case errorMatchLenTooBig:
|
||||
return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml)
|
||||
|
||||
case errorMatchOffTooBig:
|
||||
return true, fmt.Errorf("match offset (%d) bigger than current history (%d)",
|
||||
ctx.mo, ctx.outPosition+len(hist)-startSize)
|
||||
|
||||
case errorNotEnoughLiterals:
|
||||
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
|
||||
ctx.ll, ctx.litRemain+ctx.ll)
|
||||
|
||||
case errorNotEnoughSpace:
|
||||
size := ctx.outPosition + ctx.ll + ctx.ml
|
||||
if debugDecoder {
|
||||
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
|
||||
}
|
||||
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
||||
|
||||
default:
|
||||
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
|
||||
}
|
||||
|
||||
s.seqSize += ctx.litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||
return true, err
|
||||
}
|
||||
|
||||
s.literals = s.literals[ctx.litPosition:]
|
||||
t := ctx.outPosition
|
||||
s.out = s.out[:t]
|
||||
|
||||
// Add final literals
|
||||
s.out = append(s.out, s.literals...)
|
||||
if debugDecoder {
|
||||
t += len(s.literals)
|
||||
if t != len(s.out) {
|
||||
panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t))
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------
|
||||
|
||||
type decodeAsmContext struct {
|
||||
llTable []decSymbol
|
||||
mlTable []decSymbol
|
||||
ofTable []decSymbol
|
||||
llState uint64
|
||||
mlState uint64
|
||||
ofState uint64
|
||||
iteration int
|
||||
seqs []seqVals
|
||||
litRemain int
|
||||
}
|
||||
|
||||
const noError = 0
|
||||
|
||||
// error reported when mo == 0 && ml > 0
|
||||
const errorMatchLenOfsMismatch = 1
|
||||
|
||||
// error reported when ml > maxMatchLen
|
||||
const errorMatchLenTooBig = 2
|
||||
|
||||
// error reported when mo > available history or mo > s.windowSize
|
||||
const errorMatchOffTooBig = 3
|
||||
|
||||
// error reported when the sum of literal lengths exeeceds the literal buffer size
|
||||
const errorNotEnoughLiterals = 4
|
||||
|
||||
// error reported when capacity of `out` is too small
|
||||
const errorNotEnoughSpace = 5
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// decode sequences from the stream without the provided history.
|
||||
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
br := s.br
|
||||
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
maxBlockSize = s.windowSize
|
||||
}
|
||||
|
||||
ctx := decodeAsmContext{
|
||||
llTable: s.litLengths.fse.dt[:maxTablesize],
|
||||
mlTable: s.matchLengths.fse.dt[:maxTablesize],
|
||||
ofTable: s.offsets.fse.dt[:maxTablesize],
|
||||
llState: uint64(s.litLengths.state.state),
|
||||
mlState: uint64(s.matchLengths.state.state),
|
||||
ofState: uint64(s.offsets.state.state),
|
||||
seqs: seqs,
|
||||
iteration: len(seqs) - 1,
|
||||
litRemain: len(s.literals),
|
||||
}
|
||||
|
||||
s.seqSize = 0
|
||||
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
|
||||
var errCode int
|
||||
if cpuinfo.HasBMI2() {
|
||||
if lte56bits {
|
||||
errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx)
|
||||
} else {
|
||||
errCode = sequenceDecs_decode_bmi2(s, br, &ctx)
|
||||
}
|
||||
} else {
|
||||
if lte56bits {
|
||||
errCode = sequenceDecs_decode_56_amd64(s, br, &ctx)
|
||||
} else {
|
||||
errCode = sequenceDecs_decode_amd64(s, br, &ctx)
|
||||
}
|
||||
}
|
||||
if errCode != 0 {
|
||||
i := len(seqs) - ctx.iteration - 1
|
||||
switch errCode {
|
||||
case errorMatchLenOfsMismatch:
|
||||
ml := ctx.seqs[i].ml
|
||||
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||
|
||||
case errorMatchLenTooBig:
|
||||
ml := ctx.seqs[i].ml
|
||||
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||
|
||||
case errorNotEnoughLiterals:
|
||||
ll := ctx.seqs[i].ll
|
||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
|
||||
}
|
||||
|
||||
return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
|
||||
}
|
||||
|
||||
if ctx.litRemain < 0 {
|
||||
return fmt.Errorf("literal count is too big: total available %d, total requested %d",
|
||||
len(s.literals), len(s.literals)-ctx.litRemain)
|
||||
}
|
||||
|
||||
s.seqSize += ctx.litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// --------------------------------------------------------------------------------
|
||||
|
||||
type executeAsmContext struct {
|
||||
seqs []seqVals
|
||||
seqIndex int
|
||||
out []byte
|
||||
history []byte
|
||||
literals []byte
|
||||
outPosition int
|
||||
litPosition int
|
||||
windowSize int
|
||||
}
|
||||
|
||||
// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm.
|
||||
//
|
||||
// Returns false if a match offset is too big.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//go:noescape
|
||||
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||
|
||||
// executeSimple handles cases when dictionary is not used.
|
||||
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
|
||||
// Ensure we have enough output size...
|
||||
if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) {
|
||||
addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc
|
||||
s.out = append(s.out, make([]byte, addBytes)...)
|
||||
s.out = s.out[:len(s.out)-addBytes]
|
||||
}
|
||||
|
||||
if debugDecoder {
|
||||
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
|
||||
}
|
||||
|
||||
var t = len(s.out)
|
||||
out := s.out[:t+s.seqSize]
|
||||
|
||||
ctx := executeAsmContext{
|
||||
seqs: seqs,
|
||||
seqIndex: 0,
|
||||
out: out,
|
||||
history: hist,
|
||||
outPosition: t,
|
||||
litPosition: 0,
|
||||
literals: s.literals,
|
||||
windowSize: s.windowSize,
|
||||
}
|
||||
|
||||
ok := sequenceDecs_executeSimple_amd64(&ctx)
|
||||
if !ok {
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)",
|
||||
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist))
|
||||
}
|
||||
s.literals = s.literals[ctx.litPosition:]
|
||||
t = ctx.outPosition
|
||||
|
||||
// Add final literals
|
||||
copy(out[t:], s.literals)
|
||||
if debugDecoder {
|
||||
t += len(s.literals)
|
||||
if t != len(out) {
|
||||
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
|
||||
}
|
||||
}
|
||||
s.out = out
|
||||
|
||||
return nil
|
||||
}
|
3519
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
Normal file
3519
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
237
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
Normal file
237
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
|||
//go:build !amd64 || appengine || !gc || noasm
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// decode sequences from the stream with the provided history but without dictionary.
|
||||
func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// decode sequences from the stream without the provided history.
|
||||
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
br := s.br
|
||||
|
||||
// Grab full sizes tables, to avoid bounds checks.
|
||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||
s.seqSize = 0
|
||||
litRemain := len(s.literals)
|
||||
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
maxBlockSize = s.windowSize
|
||||
}
|
||||
for i := range seqs {
|
||||
var ll, mo, ml int
|
||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
// Final will not read from stream.
|
||||
var llB, mlB, moB uint8
|
||||
ll, llB = llState.final()
|
||||
ml, mlB = mlState.final()
|
||||
mo, moB = ofState.final()
|
||||
|
||||
// extra bits are stored in reverse order.
|
||||
br.fillFast()
|
||||
mo += br.getBits(moB)
|
||||
if s.maxBits > 32 {
|
||||
br.fillFast()
|
||||
}
|
||||
ml += br.getBits(mlB)
|
||||
ll += br.getBits(llB)
|
||||
|
||||
if moB > 1 {
|
||||
s.prevOffset[2] = s.prevOffset[1]
|
||||
s.prevOffset[1] = s.prevOffset[0]
|
||||
s.prevOffset[0] = mo
|
||||
} else {
|
||||
// mo = s.adjustOffset(mo, ll, moB)
|
||||
// Inlined for rather big speedup
|
||||
if ll == 0 {
|
||||
// There is an exception though, when current sequence's literals_length = 0.
|
||||
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
|
||||
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
|
||||
mo++
|
||||
}
|
||||
|
||||
if mo == 0 {
|
||||
mo = s.prevOffset[0]
|
||||
} else {
|
||||
var temp int
|
||||
if mo == 3 {
|
||||
temp = s.prevOffset[0] - 1
|
||||
} else {
|
||||
temp = s.prevOffset[mo]
|
||||
}
|
||||
|
||||
if temp == 0 {
|
||||
// 0 is not valid; input is corrupted; force offset to 1
|
||||
println("WARNING: temp was 0")
|
||||
temp = 1
|
||||
}
|
||||
|
||||
if mo != 1 {
|
||||
s.prevOffset[2] = s.prevOffset[1]
|
||||
}
|
||||
s.prevOffset[1] = s.prevOffset[0]
|
||||
s.prevOffset[0] = temp
|
||||
mo = temp
|
||||
}
|
||||
}
|
||||
br.fillFast()
|
||||
} else {
|
||||
if br.overread() {
|
||||
if debugDecoder {
|
||||
printf("reading sequence %d, exceeded available data\n", i)
|
||||
}
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
ll, mo, ml = s.next(br, llState, mlState, ofState)
|
||||
br.fill()
|
||||
}
|
||||
|
||||
if debugSequences {
|
||||
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
|
||||
}
|
||||
// Evaluate.
|
||||
// We might be doing this async, so do it early.
|
||||
if mo == 0 && ml > 0 {
|
||||
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||
}
|
||||
if ml > maxMatchLen {
|
||||
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||
}
|
||||
s.seqSize += ll + ml
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
}
|
||||
litRemain -= ll
|
||||
if litRemain < 0 {
|
||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
|
||||
}
|
||||
seqs[i] = seqVals{
|
||||
ll: ll,
|
||||
ml: ml,
|
||||
mo: mo,
|
||||
}
|
||||
if i == len(seqs)-1 {
|
||||
// This is the last sequence, so we shouldn't update state.
|
||||
break
|
||||
}
|
||||
|
||||
// Manually inlined, ~ 5-20% faster
|
||||
// Update all 3 states at once. Approx 20% faster.
|
||||
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
|
||||
if nBits == 0 {
|
||||
llState = llTable[llState.newState()&maxTableMask]
|
||||
mlState = mlTable[mlState.newState()&maxTableMask]
|
||||
ofState = ofTable[ofState.newState()&maxTableMask]
|
||||
} else {
|
||||
bits := br.get32BitsFast(nBits)
|
||||
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
||||
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
||||
|
||||
lowBits = uint16(bits >> (ofState.nbBits() & 31))
|
||||
lowBits &= bitMask[mlState.nbBits()&15]
|
||||
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
|
||||
|
||||
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
|
||||
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
|
||||
}
|
||||
}
|
||||
s.seqSize += litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// executeSimple handles cases when a dictionary is not used.
|
||||
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error {
|
||||
// Ensure we have enough output size...
|
||||
if len(s.out)+s.seqSize > cap(s.out) {
|
||||
addBytes := s.seqSize + len(s.out)
|
||||
s.out = append(s.out, make([]byte, addBytes)...)
|
||||
s.out = s.out[:len(s.out)-addBytes]
|
||||
}
|
||||
|
||||
if debugDecoder {
|
||||
printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize)
|
||||
}
|
||||
|
||||
var t = len(s.out)
|
||||
out := s.out[:t+s.seqSize]
|
||||
|
||||
for _, seq := range seqs {
|
||||
// Add literals
|
||||
copy(out[t:], s.literals[:seq.ll])
|
||||
t += seq.ll
|
||||
s.literals = s.literals[seq.ll:]
|
||||
|
||||
// Malformed input
|
||||
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
|
||||
}
|
||||
|
||||
// Copy from history.
|
||||
if v := seq.mo - t; v > 0 {
|
||||
// v is the start position in history from end.
|
||||
start := len(hist) - v
|
||||
if seq.ml > v {
|
||||
// Some goes into the current block.
|
||||
// Copy remainder of history
|
||||
copy(out[t:], hist[start:])
|
||||
t += v
|
||||
seq.ml -= v
|
||||
} else {
|
||||
copy(out[t:], hist[start:start+seq.ml])
|
||||
t += seq.ml
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// We must be in the current buffer now
|
||||
if seq.ml > 0 {
|
||||
start := t - seq.mo
|
||||
if seq.ml <= t-start {
|
||||
// No overlap
|
||||
copy(out[t:], out[start:start+seq.ml])
|
||||
t += seq.ml
|
||||
} else {
|
||||
// Overlapping copy
|
||||
// Extend destination slice and copy one byte at the time.
|
||||
src := out[start : start+seq.ml]
|
||||
dst := out[t:]
|
||||
dst = dst[:len(src)]
|
||||
t += len(src)
|
||||
// Destination is the space we just added.
|
||||
for i := range src {
|
||||
dst[i] = src[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add final literals
|
||||
copy(out[t:], s.literals)
|
||||
if debugDecoder {
|
||||
t += len(s.literals)
|
||||
if t != len(out) {
|
||||
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
|
||||
}
|
||||
}
|
||||
s.out = out
|
||||
|
||||
return nil
|
||||
}
|
50
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
50
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
|
@ -21,23 +21,34 @@ const ZipMethodPKWare = 20
|
|||
var zipReaderPool sync.Pool
|
||||
|
||||
// newZipReader creates a pooled zip decompressor.
|
||||
func newZipReader(r io.Reader) io.ReadCloser {
|
||||
dec, ok := zipReaderPool.Get().(*Decoder)
|
||||
if ok {
|
||||
dec.Reset(r)
|
||||
} else {
|
||||
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dec = d
|
||||
func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser {
|
||||
pool := &zipReaderPool
|
||||
if len(opts) > 0 {
|
||||
opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...)
|
||||
// Force concurrency 1
|
||||
opts = append(opts, WithDecoderConcurrency(1))
|
||||
// Create our own pool
|
||||
pool = &sync.Pool{}
|
||||
}
|
||||
return func(r io.Reader) io.ReadCloser {
|
||||
dec, ok := pool.Get().(*Decoder)
|
||||
if ok {
|
||||
dec.Reset(r)
|
||||
} else {
|
||||
d, err := NewReader(r, opts...)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
dec = d
|
||||
}
|
||||
return &pooledZipReader{dec: dec, pool: pool}
|
||||
}
|
||||
return &pooledZipReader{dec: dec}
|
||||
}
|
||||
|
||||
type pooledZipReader struct {
|
||||
mu sync.Mutex // guards Close and Read
|
||||
dec *Decoder
|
||||
mu sync.Mutex // guards Close and Read
|
||||
pool *sync.Pool
|
||||
dec *Decoder
|
||||
}
|
||||
|
||||
func (r *pooledZipReader) Read(p []byte) (n int, err error) {
|
||||
|
@ -48,8 +59,8 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
|
|||
}
|
||||
dec, err := r.dec.Read(p)
|
||||
if err == io.EOF {
|
||||
err = r.dec.Reset(nil)
|
||||
zipReaderPool.Put(r.dec)
|
||||
r.dec.Reset(nil)
|
||||
r.pool.Put(r.dec)
|
||||
r.dec = nil
|
||||
}
|
||||
return dec, err
|
||||
|
@ -61,7 +72,7 @@ func (r *pooledZipReader) Close() error {
|
|||
var err error
|
||||
if r.dec != nil {
|
||||
err = r.dec.Reset(nil)
|
||||
zipReaderPool.Put(r.dec)
|
||||
r.pool.Put(r.dec)
|
||||
r.dec = nil
|
||||
}
|
||||
return err
|
||||
|
@ -115,6 +126,9 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
|
|||
|
||||
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
|
||||
// See ZipCompressor for example.
|
||||
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
|
||||
return newZipReader
|
||||
// Options can be specified. WithDecoderConcurrency(1) is forced,
|
||||
// and by default a 128MB maximum decompression window is specified.
|
||||
// The window size can be overridden if required.
|
||||
func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser {
|
||||
return newZipReader(opts...)
|
||||
}
|
||||
|
|
2
vendor/github.com/urfave/cli/v2/LICENSE
generated
vendored
2
vendor/github.com/urfave/cli/v2/LICENSE
generated
vendored
|
@ -1,6 +1,6 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2016 Jeremy Saenz & Contributors
|
||||
Copyright (c) 2022 urfave/cli maintainers
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
4
vendor/github.com/urfave/cli/v2/README.md
generated
vendored
4
vendor/github.com/urfave/cli/v2/README.md
generated
vendored
|
@ -78,3 +78,7 @@ export PATH=$PATH:$GOPATH/bin
|
|||
cli is tested against multiple versions of Go on Linux, and against the latest
|
||||
released version of Go on OS X and Windows. This project uses Github Actions for
|
||||
builds. To see our currently supported go versions and platforms, look at the [./.github/workflows/cli.yml](https://github.com/urfave/cli/blob/main/.github/workflows/cli.yml).
|
||||
|
||||
## License
|
||||
|
||||
See [`LICENSE`](./LICENSE)
|
||||
|
|
4
vendor/github.com/urfave/cli/v2/flag.go
generated
vendored
4
vendor/github.com/urfave/cli/v2/flag.go
generated
vendored
|
@ -402,3 +402,7 @@ func flagFromEnvOrFile(envVars []string, filePath string) (val string, ok bool)
|
|||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func flagSplitMultiValues(val string) []string {
|
||||
return strings.Split(val, ",")
|
||||
}
|
||||
|
|
14
vendor/github.com/urfave/cli/v2/flag_float64_slice.go
generated
vendored
14
vendor/github.com/urfave/cli/v2/flag_float64_slice.go
generated
vendored
|
@ -43,12 +43,14 @@ func (f *Float64Slice) Set(value string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
tmp, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range flagSplitMultiValues(value) {
|
||||
tmp, err := strconv.ParseFloat(strings.TrimSpace(s), 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.slice = append(f.slice, tmp)
|
||||
f.slice = append(f.slice, tmp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -151,7 +153,7 @@ func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
|
|||
if val != "" {
|
||||
f.Value = &Float64Slice{}
|
||||
|
||||
for _, s := range strings.Split(val, ",") {
|
||||
for _, s := range flagSplitMultiValues(val) {
|
||||
if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
|
||||
return fmt.Errorf("could not parse %q as float64 slice value for flag %s: %s", f.Value, f.Name, err)
|
||||
}
|
||||
|
|
14
vendor/github.com/urfave/cli/v2/flag_int64_slice.go
generated
vendored
14
vendor/github.com/urfave/cli/v2/flag_int64_slice.go
generated
vendored
|
@ -43,12 +43,14 @@ func (i *Int64Slice) Set(value string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
tmp, err := strconv.ParseInt(value, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range flagSplitMultiValues(value) {
|
||||
tmp, err := strconv.ParseInt(strings.TrimSpace(s), 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.slice = append(i.slice, tmp)
|
||||
i.slice = append(i.slice, tmp)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -151,7 +153,7 @@ func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error {
|
|||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||
f.Value = &Int64Slice{}
|
||||
|
||||
for _, s := range strings.Split(val, ",") {
|
||||
for _, s := range flagSplitMultiValues(val) {
|
||||
if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
|
||||
return fmt.Errorf("could not parse %q as int64 slice value for flag %s: %s", val, f.Name, err)
|
||||
}
|
||||
|
|
14
vendor/github.com/urfave/cli/v2/flag_int_slice.go
generated
vendored
14
vendor/github.com/urfave/cli/v2/flag_int_slice.go
generated
vendored
|
@ -54,12 +54,14 @@ func (i *IntSlice) Set(value string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
tmp, err := strconv.ParseInt(value, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range flagSplitMultiValues(value) {
|
||||
tmp, err := strconv.ParseInt(strings.TrimSpace(s), 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.slice = append(i.slice, int(tmp))
|
||||
i.slice = append(i.slice, int(tmp))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -162,7 +164,7 @@ func (f *IntSliceFlag) Apply(set *flag.FlagSet) error {
|
|||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||
f.Value = &IntSlice{}
|
||||
|
||||
for _, s := range strings.Split(val, ",") {
|
||||
for _, s := range flagSplitMultiValues(val) {
|
||||
if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
|
||||
return fmt.Errorf("could not parse %q as int slice value for flag %s: %s", val, f.Name, err)
|
||||
}
|
||||
|
|
6
vendor/github.com/urfave/cli/v2/flag_string_slice.go
generated
vendored
6
vendor/github.com/urfave/cli/v2/flag_string_slice.go
generated
vendored
|
@ -42,7 +42,9 @@ func (s *StringSlice) Set(value string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
s.slice = append(s.slice, value)
|
||||
for _, t := range flagSplitMultiValues(value) {
|
||||
s.slice = append(s.slice, strings.TrimSpace(t))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -160,7 +162,7 @@ func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
|
|||
destination = f.Destination
|
||||
}
|
||||
|
||||
for _, s := range strings.Split(val, ",") {
|
||||
for _, s := range flagSplitMultiValues(val) {
|
||||
if err := destination.Set(strings.TrimSpace(s)); err != nil {
|
||||
return fmt.Errorf("could not parse %q as string value for flag %s: %s", val, f.Name, err)
|
||||
}
|
||||
|
|
6
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
6
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
|
@ -37,6 +37,7 @@ func Creat(path string, mode uint32) (fd int, err error) {
|
|||
}
|
||||
|
||||
//sys utimes(path string, times *[2]Timeval) (err error)
|
||||
|
||||
func Utimes(path string, tv []Timeval) error {
|
||||
if len(tv) != 2 {
|
||||
return EINVAL
|
||||
|
@ -45,6 +46,7 @@ func Utimes(path string, tv []Timeval) error {
|
|||
}
|
||||
|
||||
//sys utimensat(dirfd int, path string, times *[2]Timespec, flag int) (err error)
|
||||
|
||||
func UtimesNano(path string, ts []Timespec) error {
|
||||
if len(ts) != 2 {
|
||||
return EINVAL
|
||||
|
@ -300,11 +302,13 @@ func direntNamlen(buf []byte) (uint64, bool) {
|
|||
}
|
||||
|
||||
//sys getdirent(fd int, buf []byte) (n int, err error)
|
||||
|
||||
func Getdents(fd int, buf []byte) (n int, err error) {
|
||||
return getdirent(fd, buf)
|
||||
}
|
||||
|
||||
//sys wait4(pid Pid_t, status *_C_int, options int, rusage *Rusage) (wpid Pid_t, err error)
|
||||
|
||||
func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
|
||||
var status _C_int
|
||||
var r Pid_t
|
||||
|
@ -372,6 +376,7 @@ func (w WaitStatus) TrapCause() int { return -1 }
|
|||
//sys fcntl(fd int, cmd int, arg int) (val int, err error)
|
||||
|
||||
//sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range
|
||||
|
||||
func Fsync(fd int) error {
|
||||
return fsyncRange(fd, O_SYNC, 0, 0)
|
||||
}
|
||||
|
@ -536,6 +541,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) {
|
|||
//sys Getsystemcfg(label int) (n uint64)
|
||||
|
||||
//sys umount(target string) (err error)
|
||||
|
||||
func Unmount(target string, flags int) (err error) {
|
||||
if flags != 0 {
|
||||
// AIX doesn't have any flags for umount.
|
||||
|
|
2
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
|
@ -125,11 +125,13 @@ func Pipe2(p []int, flags int) (err error) {
|
|||
}
|
||||
|
||||
//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error)
|
||||
|
||||
func pread(fd int, p []byte, offset int64) (n int, err error) {
|
||||
return extpread(fd, p, 0, offset)
|
||||
}
|
||||
|
||||
//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error)
|
||||
|
||||
func pwrite(fd int, p []byte, offset int64) (n int, err error) {
|
||||
return extpwrite(fd, p, 0, offset)
|
||||
}
|
||||
|
|
112
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
112
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
|
@ -512,24 +512,24 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||
//
|
||||
// Server example:
|
||||
//
|
||||
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
|
||||
// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
|
||||
// Channel: 1,
|
||||
// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
|
||||
// })
|
||||
// _ = Listen(fd, 1)
|
||||
// nfd, sa, _ := Accept(fd)
|
||||
// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
|
||||
// Read(nfd, buf)
|
||||
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
|
||||
// _ = unix.Bind(fd, &unix.SockaddrRFCOMM{
|
||||
// Channel: 1,
|
||||
// Addr: [6]uint8{0, 0, 0, 0, 0, 0}, // BDADDR_ANY or 00:00:00:00:00:00
|
||||
// })
|
||||
// _ = Listen(fd, 1)
|
||||
// nfd, sa, _ := Accept(fd)
|
||||
// fmt.Printf("conn addr=%v fd=%d", sa.(*unix.SockaddrRFCOMM).Addr, nfd)
|
||||
// Read(nfd, buf)
|
||||
//
|
||||
// Client example:
|
||||
//
|
||||
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
|
||||
// _ = Connect(fd, &SockaddrRFCOMM{
|
||||
// Channel: 1,
|
||||
// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
|
||||
// })
|
||||
// Write(fd, []byte(`hello`))
|
||||
// fd, _ := Socket(AF_BLUETOOTH, SOCK_STREAM, BTPROTO_RFCOMM)
|
||||
// _ = Connect(fd, &SockaddrRFCOMM{
|
||||
// Channel: 1,
|
||||
// Addr: [6]byte{0x11, 0x22, 0x33, 0xaa, 0xbb, 0xcc}, // CC:BB:AA:33:22:11
|
||||
// })
|
||||
// Write(fd, []byte(`hello`))
|
||||
type SockaddrRFCOMM struct {
|
||||
// Addr represents a bluetooth address, byte ordering is little-endian.
|
||||
Addr [6]uint8
|
||||
|
@ -556,12 +556,12 @@ func (sa *SockaddrRFCOMM) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||
// The SockaddrCAN struct must be bound to the socket file descriptor
|
||||
// using Bind before the CAN socket can be used.
|
||||
//
|
||||
// // Read one raw CAN frame
|
||||
// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
|
||||
// addr := &SockaddrCAN{Ifindex: index}
|
||||
// Bind(fd, addr)
|
||||
// frame := make([]byte, 16)
|
||||
// Read(fd, frame)
|
||||
// // Read one raw CAN frame
|
||||
// fd, _ := Socket(AF_CAN, SOCK_RAW, CAN_RAW)
|
||||
// addr := &SockaddrCAN{Ifindex: index}
|
||||
// Bind(fd, addr)
|
||||
// frame := make([]byte, 16)
|
||||
// Read(fd, frame)
|
||||
//
|
||||
// The full SocketCAN documentation can be found in the linux kernel
|
||||
// archives at: https://www.kernel.org/doc/Documentation/networking/can.txt
|
||||
|
@ -632,13 +632,13 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||
// Here is an example of using an AF_ALG socket with SHA1 hashing.
|
||||
// The initial socket setup process is as follows:
|
||||
//
|
||||
// // Open a socket to perform SHA1 hashing.
|
||||
// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0)
|
||||
// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"}
|
||||
// unix.Bind(fd, addr)
|
||||
// // Note: unix.Accept does not work at this time; must invoke accept()
|
||||
// // manually using unix.Syscall.
|
||||
// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0)
|
||||
// // Open a socket to perform SHA1 hashing.
|
||||
// fd, _ := unix.Socket(unix.AF_ALG, unix.SOCK_SEQPACKET, 0)
|
||||
// addr := &unix.SockaddrALG{Type: "hash", Name: "sha1"}
|
||||
// unix.Bind(fd, addr)
|
||||
// // Note: unix.Accept does not work at this time; must invoke accept()
|
||||
// // manually using unix.Syscall.
|
||||
// hashfd, _, _ := unix.Syscall(unix.SYS_ACCEPT, uintptr(fd), 0, 0)
|
||||
//
|
||||
// Once a file descriptor has been returned from Accept, it may be used to
|
||||
// perform SHA1 hashing. The descriptor is not safe for concurrent use, but
|
||||
|
@ -647,39 +647,39 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||
// When hashing a small byte slice or string, a single Write and Read may
|
||||
// be used:
|
||||
//
|
||||
// // Assume hashfd is already configured using the setup process.
|
||||
// hash := os.NewFile(hashfd, "sha1")
|
||||
// // Hash an input string and read the results. Each Write discards
|
||||
// // previous hash state. Read always reads the current state.
|
||||
// b := make([]byte, 20)
|
||||
// for i := 0; i < 2; i++ {
|
||||
// io.WriteString(hash, "Hello, world.")
|
||||
// hash.Read(b)
|
||||
// fmt.Println(hex.EncodeToString(b))
|
||||
// }
|
||||
// // Output:
|
||||
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
|
||||
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
|
||||
// // Assume hashfd is already configured using the setup process.
|
||||
// hash := os.NewFile(hashfd, "sha1")
|
||||
// // Hash an input string and read the results. Each Write discards
|
||||
// // previous hash state. Read always reads the current state.
|
||||
// b := make([]byte, 20)
|
||||
// for i := 0; i < 2; i++ {
|
||||
// io.WriteString(hash, "Hello, world.")
|
||||
// hash.Read(b)
|
||||
// fmt.Println(hex.EncodeToString(b))
|
||||
// }
|
||||
// // Output:
|
||||
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
|
||||
// // 2ae01472317d1935a84797ec1983ae243fc6aa28
|
||||
//
|
||||
// For hashing larger byte slices, or byte streams such as those read from
|
||||
// a file or socket, use Sendto with MSG_MORE to instruct the kernel to update
|
||||
// the hash digest instead of creating a new one for a given chunk and finalizing it.
|
||||
//
|
||||
// // Assume hashfd and addr are already configured using the setup process.
|
||||
// hash := os.NewFile(hashfd, "sha1")
|
||||
// // Hash the contents of a file.
|
||||
// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz")
|
||||
// b := make([]byte, 4096)
|
||||
// for {
|
||||
// n, err := f.Read(b)
|
||||
// if err == io.EOF {
|
||||
// break
|
||||
// }
|
||||
// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr)
|
||||
// }
|
||||
// hash.Read(b)
|
||||
// fmt.Println(hex.EncodeToString(b))
|
||||
// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5
|
||||
// // Assume hashfd and addr are already configured using the setup process.
|
||||
// hash := os.NewFile(hashfd, "sha1")
|
||||
// // Hash the contents of a file.
|
||||
// f, _ := os.Open("/tmp/linux-4.10-rc7.tar.xz")
|
||||
// b := make([]byte, 4096)
|
||||
// for {
|
||||
// n, err := f.Read(b)
|
||||
// if err == io.EOF {
|
||||
// break
|
||||
// }
|
||||
// unix.Sendto(hashfd, b[:n], unix.MSG_MORE, addr)
|
||||
// }
|
||||
// hash.Read(b)
|
||||
// fmt.Println(hex.EncodeToString(b))
|
||||
// // Output: 85cdcad0c06eef66f805ecce353bec9accbeecc5
|
||||
//
|
||||
// For more information, see: http://www.chronox.de/crypto-API/crypto/userspace-if.html.
|
||||
type SockaddrALG struct {
|
||||
|
|
2
vendor/golang.org/x/sys/unix/syscall_openbsd.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_openbsd.go
generated
vendored
|
@ -81,6 +81,7 @@ func Pipe(p []int) (err error) {
|
|||
}
|
||||
|
||||
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
|
||||
|
||||
func Pipe2(p []int, flags int) error {
|
||||
if len(p) != 2 {
|
||||
return EINVAL
|
||||
|
@ -95,6 +96,7 @@ func Pipe2(p []int, flags int) error {
|
|||
}
|
||||
|
||||
//sys Getdents(fd int, buf []byte) (n int, err error)
|
||||
|
||||
func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) {
|
||||
n, err = Getdents(fd, buf)
|
||||
if err != nil || basep == nil {
|
||||
|
|
10
vendor/golang.org/x/sys/windows/exec_windows.go
generated
vendored
10
vendor/golang.org/x/sys/windows/exec_windows.go
generated
vendored
|
@ -15,11 +15,11 @@ import (
|
|||
// in http://msdn.microsoft.com/en-us/library/ms880421.
|
||||
// This function returns "" (2 double quotes) if s is empty.
|
||||
// Alternatively, these transformations are done:
|
||||
// - every back slash (\) is doubled, but only if immediately
|
||||
// followed by double quote (");
|
||||
// - every double quote (") is escaped by back slash (\);
|
||||
// - finally, s is wrapped with double quotes (arg -> "arg"),
|
||||
// but only if there is space or tab inside s.
|
||||
// - every back slash (\) is doubled, but only if immediately
|
||||
// followed by double quote (");
|
||||
// - every double quote (") is escaped by back slash (\);
|
||||
// - finally, s is wrapped with double quotes (arg -> "arg"),
|
||||
// but only if there is space or tab inside s.
|
||||
func EscapeArg(s string) string {
|
||||
if len(s) == 0 {
|
||||
return "\"\""
|
||||
|
|
1
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
1
vendor/golang.org/x/sys/windows/syscall_windows.go
generated
vendored
|
@ -623,7 +623,6 @@ var (
|
|||
|
||||
func getStdHandle(stdhandle uint32) (fd Handle) {
|
||||
r, _ := GetStdHandle(stdhandle)
|
||||
CloseOnExec(r)
|
||||
return r
|
||||
}
|
||||
|
||||
|
|
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
|
@ -5,4 +5,4 @@
|
|||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "0.75.0"
|
||||
const Version = "0.77.0"
|
||||
|
|
21
vendor/modules.txt
vendored
21
vendor/modules.txt
vendored
|
@ -34,7 +34,7 @@ github.com/VictoriaMetrics/metricsql/binaryop
|
|||
# github.com/VividCortex/ewma v1.2.0
|
||||
## explicit; go 1.12
|
||||
github.com/VividCortex/ewma
|
||||
# github.com/aws/aws-sdk-go v1.44.0
|
||||
# github.com/aws/aws-sdk-go v1.44.4
|
||||
## explicit; go 1.11
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/arn
|
||||
|
@ -93,7 +93,7 @@ github.com/beorn7/perks/quantile
|
|||
# github.com/cespare/xxhash/v2 v2.1.2
|
||||
## explicit; go 1.11
|
||||
github.com/cespare/xxhash/v2
|
||||
# github.com/cheggaaa/pb/v3 v3.0.8
|
||||
# github.com/cheggaaa/pb/v3 v3.0.9-0.20211222075416-90c02fa07ea4
|
||||
## explicit; go 1.12
|
||||
github.com/cheggaaa/pb/v3
|
||||
github.com/cheggaaa/pb/v3/termutil
|
||||
|
@ -128,8 +128,8 @@ github.com/golang/protobuf/ptypes/timestamp
|
|||
# github.com/golang/snappy v0.0.4
|
||||
## explicit
|
||||
github.com/golang/snappy
|
||||
# github.com/google/go-cmp v0.5.7
|
||||
## explicit; go 1.11
|
||||
# github.com/google/go-cmp v0.5.8
|
||||
## explicit; go 1.13
|
||||
github.com/google/go-cmp/cmp
|
||||
github.com/google/go-cmp/cmp/internal/diff
|
||||
github.com/google/go-cmp/cmp/internal/flags
|
||||
|
@ -151,13 +151,14 @@ github.com/influxdata/influxdb/pkg/escape
|
|||
# github.com/jmespath/go-jmespath v0.4.0
|
||||
## explicit; go 1.14
|
||||
github.com/jmespath/go-jmespath
|
||||
# github.com/klauspost/compress v1.15.1
|
||||
## explicit; go 1.15
|
||||
# github.com/klauspost/compress v1.15.2
|
||||
## explicit; go 1.16
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/flate
|
||||
github.com/klauspost/compress/fse
|
||||
github.com/klauspost/compress/gzip
|
||||
github.com/klauspost/compress/huff0
|
||||
github.com/klauspost/compress/internal/cpuinfo
|
||||
github.com/klauspost/compress/internal/snapref
|
||||
github.com/klauspost/compress/zlib
|
||||
github.com/klauspost/compress/zstd
|
||||
|
@ -220,7 +221,7 @@ github.com/rivo/uniseg
|
|||
# github.com/russross/blackfriday/v2 v2.1.0
|
||||
## explicit
|
||||
github.com/russross/blackfriday/v2
|
||||
# github.com/urfave/cli/v2 v2.5.0
|
||||
# github.com/urfave/cli/v2 v2.5.1
|
||||
## explicit; go 1.18
|
||||
github.com/urfave/cli/v2
|
||||
# github.com/valyala/bytebufferpool v1.0.0
|
||||
|
@ -294,7 +295,7 @@ golang.org/x/oauth2/jwt
|
|||
# golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
## explicit
|
||||
golang.org/x/sync/errgroup
|
||||
# golang.org/x/sys v0.0.0-20220422013727-9388b58f7150
|
||||
# golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba
|
||||
## explicit; go 1.17
|
||||
golang.org/x/sys/internal/unsafeheader
|
||||
golang.org/x/sys/unix
|
||||
|
@ -309,7 +310,7 @@ golang.org/x/text/unicode/norm
|
|||
## explicit; go 1.11
|
||||
golang.org/x/xerrors
|
||||
golang.org/x/xerrors/internal
|
||||
# google.golang.org/api v0.75.0
|
||||
# google.golang.org/api v0.77.0
|
||||
## explicit; go 1.15
|
||||
google.golang.org/api/googleapi
|
||||
google.golang.org/api/googleapi/transport
|
||||
|
@ -342,7 +343,7 @@ google.golang.org/appengine/internal/socket
|
|||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/socket
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20220422154200-b37d22cd5731
|
||||
# google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e
|
||||
## explicit; go 1.15
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
google.golang.org/genproto/googleapis/iam/v1
|
||||
|
|
Loading…
Reference in a new issue