mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-01 14:47:38 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
780b2a139a
167 changed files with 5758 additions and 1612 deletions
|
@ -774,6 +774,8 @@ The shortlist of configuration flags is the following:
|
||||||
Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead
|
Interval for checking for changes in '-rule' files. By default the checking is disabled. Send SIGHUP signal in order to force config check for changes. DEPRECATED - see '-configCheckInterval' instead
|
||||||
-rule.maxResolveDuration duration
|
-rule.maxResolveDuration duration
|
||||||
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
|
Limits the maximum duration for automatic alert expiration, which is by default equal to 3 evaluation intervals of the parent group.
|
||||||
|
-rule.resendDelay duration
|
||||||
|
Minimum amount of time to wait before resending an alert to notifier
|
||||||
-rule.validateExpressions
|
-rule.validateExpressions
|
||||||
Whether to validate rules expressions via MetricsQL engine (default true)
|
Whether to validate rules expressions via MetricsQL engine (default true)
|
||||||
-rule.validateTemplates
|
-rule.validateTemplates
|
||||||
|
|
|
@ -550,3 +550,26 @@ func (ar *AlertingRule) Restore(ctx context.Context, q datasource.Querier, lookb
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// alertsToSend walks through the current alerts of AlertingRule
|
||||||
|
// and returns only those which should be sent to notifier.
|
||||||
|
// Isn't concurrent safe.
|
||||||
|
func (ar *AlertingRule) alertsToSend(ts time.Time, resolveDuration, resendDelay time.Duration) []notifier.Alert {
|
||||||
|
var alerts []notifier.Alert
|
||||||
|
for _, a := range ar.alerts {
|
||||||
|
switch a.State {
|
||||||
|
case notifier.StateFiring:
|
||||||
|
if time.Since(a.LastSent) < resendDelay {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
a.End = ts.Add(resolveDuration)
|
||||||
|
a.LastSent = ts
|
||||||
|
alerts = append(alerts, *a)
|
||||||
|
case notifier.StateInactive:
|
||||||
|
a.End = ts
|
||||||
|
a.LastSent = ts
|
||||||
|
alerts = append(alerts, *a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alerts
|
||||||
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -781,6 +782,76 @@ func TestAlertingRule_Template(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestAlertsToSend(t *testing.T) {
|
||||||
|
ts := time.Now()
|
||||||
|
f := func(alerts, expAlerts []*notifier.Alert, resolveDuration, resendDelay time.Duration) {
|
||||||
|
t.Helper()
|
||||||
|
ar := &AlertingRule{alerts: make(map[uint64]*notifier.Alert)}
|
||||||
|
for i, a := range alerts {
|
||||||
|
ar.alerts[uint64(i)] = a
|
||||||
|
}
|
||||||
|
gotAlerts := ar.alertsToSend(ts, resolveDuration, resendDelay)
|
||||||
|
if gotAlerts == nil && expAlerts == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(gotAlerts) != len(expAlerts) {
|
||||||
|
t.Fatalf("expected to get %d alerts; got %d instead",
|
||||||
|
len(expAlerts), len(gotAlerts))
|
||||||
|
}
|
||||||
|
sort.Slice(expAlerts, func(i, j int) bool {
|
||||||
|
return expAlerts[i].Name < expAlerts[j].Name
|
||||||
|
})
|
||||||
|
sort.Slice(gotAlerts, func(i, j int) bool {
|
||||||
|
return gotAlerts[i].Name < gotAlerts[j].Name
|
||||||
|
})
|
||||||
|
for i, exp := range expAlerts {
|
||||||
|
got := gotAlerts[i]
|
||||||
|
if got.LastSent != exp.LastSent {
|
||||||
|
t.Fatalf("expected LastSent to be %v; got %v", exp.LastSent, got.LastSent)
|
||||||
|
}
|
||||||
|
if got.End != exp.End {
|
||||||
|
t.Fatalf("expected End to be %v; got %v", exp.End, got.End)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f( // send firing alert with custom resolve time
|
||||||
|
[]*notifier.Alert{{State: notifier.StateFiring}},
|
||||||
|
[]*notifier.Alert{{LastSent: ts, End: ts.Add(5 * time.Minute)}},
|
||||||
|
5*time.Minute, time.Minute,
|
||||||
|
)
|
||||||
|
f( // resolve inactive alert at the current timestamp
|
||||||
|
[]*notifier.Alert{{State: notifier.StateInactive}},
|
||||||
|
[]*notifier.Alert{{LastSent: ts, End: ts}},
|
||||||
|
time.Minute, time.Minute,
|
||||||
|
)
|
||||||
|
f( // mixed case of firing and resolved alerts. Names are added for deterministic sorting
|
||||||
|
[]*notifier.Alert{{Name: "a", State: notifier.StateFiring}, {Name: "b", State: notifier.StateInactive}},
|
||||||
|
[]*notifier.Alert{{Name: "a", LastSent: ts, End: ts.Add(5 * time.Minute)}, {Name: "b", LastSent: ts, End: ts}},
|
||||||
|
5*time.Minute, time.Minute,
|
||||||
|
)
|
||||||
|
f( // mixed case of pending and resolved alerts. Names are added for deterministic sorting
|
||||||
|
[]*notifier.Alert{{Name: "a", State: notifier.StatePending}, {Name: "b", State: notifier.StateInactive}},
|
||||||
|
[]*notifier.Alert{{Name: "b", LastSent: ts, End: ts}},
|
||||||
|
5*time.Minute, time.Minute,
|
||||||
|
)
|
||||||
|
f( // attempt to send alert that was already sent in the resendDelay interval
|
||||||
|
[]*notifier.Alert{{State: notifier.StateFiring, LastSent: ts.Add(-time.Second)}},
|
||||||
|
nil,
|
||||||
|
time.Minute, time.Minute,
|
||||||
|
)
|
||||||
|
f( // attempt to send alert that was sent out of the resendDelay interval
|
||||||
|
[]*notifier.Alert{{State: notifier.StateFiring, LastSent: ts.Add(-2 * time.Minute)}},
|
||||||
|
[]*notifier.Alert{{LastSent: ts, End: ts.Add(time.Minute)}},
|
||||||
|
time.Minute, time.Minute,
|
||||||
|
)
|
||||||
|
f( // alert must be sent even if resendDelay interval is 0
|
||||||
|
[]*notifier.Alert{{State: notifier.StateFiring, LastSent: ts.Add(-time.Second)}},
|
||||||
|
[]*notifier.Alert{{LastSent: ts, End: ts.Add(time.Minute)}},
|
||||||
|
time.Minute, 0,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
func newTestRuleWithLabels(name string, labels ...string) *AlertingRule {
|
func newTestRuleWithLabels(name string, labels ...string) *AlertingRule {
|
||||||
r := newTestAlertingRule(name, 0)
|
r := newTestAlertingRule(name, 0)
|
||||||
r.Labels = make(map[string]string)
|
r.Labels = make(map[string]string)
|
||||||
|
|
12
app/vmalert/config/testdata/rules_interval_good.rules
vendored
Normal file
12
app/vmalert/config/testdata/rules_interval_good.rules
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
groups:
|
||||||
|
- name: groupTest
|
||||||
|
interval: 1s
|
||||||
|
rules:
|
||||||
|
- alert: VMRows
|
||||||
|
for: 2s
|
||||||
|
expr: sum(rate(vm_http_request_errors_total[2s])) > 0
|
||||||
|
labels:
|
||||||
|
label: bar
|
||||||
|
host: "{{ $labels.instance }}"
|
||||||
|
annotations:
|
||||||
|
summary: "{{ $value }}"
|
|
@ -277,8 +277,7 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
|
||||||
g.metrics.iterationTotal.Inc()
|
g.metrics.iterationTotal.Inc()
|
||||||
iterationStart := time.Now()
|
iterationStart := time.Now()
|
||||||
if len(g.Rules) > 0 {
|
if len(g.Rules) > 0 {
|
||||||
resolveDuration := getResolveDuration(g.Interval)
|
errs := e.execConcurrently(ctx, g.Rules, g.Concurrency, getResolveDuration(g.Interval))
|
||||||
errs := e.execConcurrently(ctx, g.Rules, g.Concurrency, resolveDuration)
|
|
||||||
for err := range errs {
|
for err := range errs {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("group %q: %s", g.Name, err)
|
logger.Errorf("group %q: %s", g.Name, err)
|
||||||
|
@ -291,15 +290,18 @@ func (g *Group) start(ctx context.Context, nts func() []notifier.Notifier, rw *r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveDuration for alerts is equal to 3 interval evaluations
|
// getResolveDuration returns the duration after which firing alert
|
||||||
// so in case if vmalert stops sending updates for some reason,
|
// can be considered as resolved.
|
||||||
// notifier could automatically resolve the alert.
|
|
||||||
func getResolveDuration(groupInterval time.Duration) time.Duration {
|
func getResolveDuration(groupInterval time.Duration) time.Duration {
|
||||||
resolveInterval := groupInterval * 3
|
delta := *resendDelay
|
||||||
if *maxResolveDuration > 0 && (resolveInterval > *maxResolveDuration) {
|
if groupInterval > delta {
|
||||||
return *maxResolveDuration
|
delta = groupInterval
|
||||||
}
|
}
|
||||||
return resolveInterval
|
resolveDuration := delta * 4
|
||||||
|
if *maxResolveDuration > 0 && resolveDuration > *maxResolveDuration {
|
||||||
|
resolveDuration = *maxResolveDuration
|
||||||
|
}
|
||||||
|
return resolveDuration
|
||||||
}
|
}
|
||||||
|
|
||||||
type executor struct {
|
type executor struct {
|
||||||
|
@ -370,19 +372,8 @@ func (e *executor) exec(ctx context.Context, rule Rule, resolveDuration time.Dur
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
var alerts []notifier.Alert
|
|
||||||
for _, a := range ar.alerts {
|
alerts := ar.alertsToSend(now, resolveDuration, *resendDelay)
|
||||||
switch a.State {
|
|
||||||
case notifier.StateFiring:
|
|
||||||
a.End = now.Add(resolveDuration)
|
|
||||||
alerts = append(alerts, *a)
|
|
||||||
case notifier.StateInactive:
|
|
||||||
// set End to execStart to notify
|
|
||||||
// that it was just resolved
|
|
||||||
a.End = now
|
|
||||||
alerts = append(alerts, *a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(alerts) < 1 {
|
if len(alerts) < 1 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -158,10 +158,11 @@ func TestGroupStart(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to parse rules: %s", err)
|
t.Fatalf("failed to parse rules: %s", err)
|
||||||
}
|
}
|
||||||
const evalInterval = time.Millisecond
|
|
||||||
fs := &fakeQuerier{}
|
fs := &fakeQuerier{}
|
||||||
fn := &fakeNotifier{}
|
fn := &fakeNotifier{}
|
||||||
|
|
||||||
|
const evalInterval = time.Millisecond
|
||||||
g := newGroup(groups[0], fs, evalInterval, map[string]string{"cluster": "east-1"})
|
g := newGroup(groups[0], fs, evalInterval, map[string]string{"cluster": "east-1"})
|
||||||
g.Concurrency = 2
|
g.Concurrency = 2
|
||||||
|
|
||||||
|
@ -223,6 +224,12 @@ func TestGroupStart(t *testing.T) {
|
||||||
expectedAlerts := []notifier.Alert{*alert1, *alert2}
|
expectedAlerts := []notifier.Alert{*alert1, *alert2}
|
||||||
compareAlerts(t, expectedAlerts, gotAlerts)
|
compareAlerts(t, expectedAlerts, gotAlerts)
|
||||||
|
|
||||||
|
gotAlertsNum := fn.getCounter()
|
||||||
|
if gotAlertsNum < len(expectedAlerts)*2 {
|
||||||
|
t.Fatalf("expected to receive at least %d alerts; got %d instead",
|
||||||
|
len(expectedAlerts)*2, gotAlertsNum)
|
||||||
|
}
|
||||||
|
|
||||||
// reset previous data
|
// reset previous data
|
||||||
fs.reset()
|
fs.reset()
|
||||||
// and set only one datapoint for response
|
// and set only one datapoint for response
|
||||||
|
@ -243,18 +250,29 @@ func TestResolveDuration(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
groupInterval time.Duration
|
groupInterval time.Duration
|
||||||
maxDuration time.Duration
|
maxDuration time.Duration
|
||||||
|
resendDelay time.Duration
|
||||||
expected time.Duration
|
expected time.Duration
|
||||||
}{
|
}{
|
||||||
{time.Minute, 0, 3 * time.Minute},
|
{time.Minute, 0, 0, 4 * time.Minute},
|
||||||
{3 * time.Minute, 0, 9 * time.Minute},
|
{time.Minute, 0, 2 * time.Minute, 8 * time.Minute},
|
||||||
{time.Minute, 2 * time.Minute, 2 * time.Minute},
|
{time.Minute, 4 * time.Minute, 4 * time.Minute, 4 * time.Minute},
|
||||||
{0, 0, 0},
|
{2 * time.Minute, time.Minute, 2 * time.Minute, time.Minute},
|
||||||
|
{time.Minute, 2 * time.Minute, 1 * time.Minute, 2 * time.Minute},
|
||||||
|
{2 * time.Minute, 0, 1 * time.Minute, 8 * time.Minute},
|
||||||
|
{0, 0, 0, 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultResolveDuration := *maxResolveDuration
|
defaultResolveDuration := *maxResolveDuration
|
||||||
defer func() { *maxResolveDuration = defaultResolveDuration }()
|
defaultResendDelay := *resendDelay
|
||||||
|
defer func() {
|
||||||
|
*maxResolveDuration = defaultResolveDuration
|
||||||
|
*resendDelay = defaultResendDelay
|
||||||
|
}()
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(fmt.Sprintf("%v-%v-%v", tc.groupInterval, tc.expected, tc.maxDuration), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%v-%v-%v", tc.groupInterval, tc.expected, tc.maxDuration), func(t *testing.T) {
|
||||||
*maxResolveDuration = tc.maxDuration
|
*maxResolveDuration = tc.maxDuration
|
||||||
|
*resendDelay = tc.resendDelay
|
||||||
got := getResolveDuration(tc.groupInterval)
|
got := getResolveDuration(tc.groupInterval)
|
||||||
if got != tc.expected {
|
if got != tc.expected {
|
||||||
t.Errorf("expected to have %v; got %v", tc.expected, got)
|
t.Errorf("expected to have %v; got %v", tc.expected, got)
|
||||||
|
|
|
@ -61,6 +61,8 @@ func (fq *fakeQuerier) Query(_ context.Context, _ string) ([]datasource.Metric,
|
||||||
type fakeNotifier struct {
|
type fakeNotifier struct {
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
alerts []notifier.Alert
|
alerts []notifier.Alert
|
||||||
|
// records number of received alerts in total
|
||||||
|
counter int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*fakeNotifier) Close() {}
|
func (*fakeNotifier) Close() {}
|
||||||
|
@ -68,10 +70,17 @@ func (*fakeNotifier) Addr() string { return "" }
|
||||||
func (fn *fakeNotifier) Send(_ context.Context, alerts []notifier.Alert) error {
|
func (fn *fakeNotifier) Send(_ context.Context, alerts []notifier.Alert) error {
|
||||||
fn.Lock()
|
fn.Lock()
|
||||||
defer fn.Unlock()
|
defer fn.Unlock()
|
||||||
|
fn.counter += len(alerts)
|
||||||
fn.alerts = alerts
|
fn.alerts = alerts
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (fn *fakeNotifier) getCounter() int {
|
||||||
|
fn.Lock()
|
||||||
|
defer fn.Unlock()
|
||||||
|
return fn.counter
|
||||||
|
}
|
||||||
|
|
||||||
func (fn *fakeNotifier) getAlerts() []notifier.Alert {
|
func (fn *fakeNotifier) getAlerts() []notifier.Alert {
|
||||||
fn.Lock()
|
fn.Lock()
|
||||||
defer fn.Unlock()
|
defer fn.Unlock()
|
||||||
|
|
|
@ -47,6 +47,8 @@ Rule files may contain %{ENV_VAR} placeholders, which are substituted by the cor
|
||||||
validateExpressions = flag.Bool("rule.validateExpressions", true, "Whether to validate rules expressions via MetricsQL engine")
|
validateExpressions = flag.Bool("rule.validateExpressions", true, "Whether to validate rules expressions via MetricsQL engine")
|
||||||
maxResolveDuration = flag.Duration("rule.maxResolveDuration", 0, "Limits the maximum duration for automatic alert expiration, "+
|
maxResolveDuration = flag.Duration("rule.maxResolveDuration", 0, "Limits the maximum duration for automatic alert expiration, "+
|
||||||
"which is by default equal to 3 evaluation intervals of the parent group.")
|
"which is by default equal to 3 evaluation intervals of the parent group.")
|
||||||
|
resendDelay = flag.Duration("rule.resendDelay", 0, "Minimum amount of time to wait before resending an alert to notifier")
|
||||||
|
|
||||||
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier")
|
externalURL = flag.String("external.url", "", "External URL is used as alert's source for sent alerts to the notifier")
|
||||||
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
|
externalAlertSource = flag.String("external.alert.source", "", `External Alert Source allows to override the Source link for alerts sent to AlertManager for cases where you want to build a custom link to Grafana, Prometheus or any other service.
|
||||||
eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used`)
|
eg. 'explore?orgId=1&left=[\"now-1h\",\"now\",\"VictoriaMetrics\",{\"expr\": \"{{$expr|quotesEscape|crlfEscape|queryEscape}}\"},{\"mode\":\"Metrics\"},{\"ui\":[true,true,true,\"none\"]}]'.If empty '/api/v1/:groupID/alertID/status' is used`)
|
||||||
|
|
|
@ -30,6 +30,8 @@ type Alert struct {
|
||||||
Start time.Time
|
Start time.Time
|
||||||
// End defines the moment of time when Alert supposed to expire
|
// End defines the moment of time when Alert supposed to expire
|
||||||
End time.Time
|
End time.Time
|
||||||
|
// LastSent defines the moment when Alert was sent last time
|
||||||
|
LastSent time.Time
|
||||||
// Value stores the value returned from evaluating expression from Expr field
|
// Value stores the value returned from evaluating expression from Expr field
|
||||||
Value float64
|
Value float64
|
||||||
// ID is the unique identifer for the Alert
|
// ID is the unique identifer for the Alert
|
||||||
|
|
|
@ -4,7 +4,7 @@ DOCKER_NAMESPACE := victoriametrics
|
||||||
|
|
||||||
ROOT_IMAGE ?= alpine:3.15.0
|
ROOT_IMAGE ?= alpine:3.15.0
|
||||||
CERTS_IMAGE := alpine:3.15.0
|
CERTS_IMAGE := alpine:3.15.0
|
||||||
GO_BUILDER_IMAGE := golang:1.17.7-alpine
|
GO_BUILDER_IMAGE := golang:1.18.0-alpine
|
||||||
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
BUILDER_IMAGE := local/builder:2.0.0-$(shell echo $(GO_BUILDER_IMAGE) | tr :/ __)-1
|
||||||
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
BASE_IMAGE := local/base:1.1.3-$(shell echo $(ROOT_IMAGE) | tr :/ __)-$(shell echo $(CERTS_IMAGE) | tr :/ __)
|
||||||
|
|
||||||
|
@ -186,4 +186,4 @@ package-via-docker-386:
|
||||||
GOARCH=386 $(MAKE) package-via-docker-goarch-nocgo
|
GOARCH=386 $(MAKE) package-via-docker-goarch-nocgo
|
||||||
|
|
||||||
remove-docker-images:
|
remove-docker-images:
|
||||||
docker image ls --format '{{.Repository}}\t{{.ID}}' | grep $(DOCKER_NAMESPACE)/ | awk '{print $$2}' | xargs docker image rm -f
|
docker image ls --format '{{.Repository}}\t{{.ID}}' | awk '{print $$2}' | xargs docker image rm -f
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
ARG go_builder_image
|
ARG go_builder_image
|
||||||
FROM $go_builder_image
|
FROM $go_builder_image
|
||||||
STOPSIGNAL SIGINT
|
STOPSIGNAL SIGINT
|
||||||
RUN apk add gcc musl-dev make wget --no-cache && \
|
RUN apk add git gcc musl-dev make wget --no-cache && \
|
||||||
mkdir /opt/cross-builder && \
|
mkdir /opt/cross-builder && \
|
||||||
wget https://musl.cc/aarch64-linux-musl-cross.tgz -O /opt/cross-builder/aarch64-musl.tgz && \
|
wget https://musl.cc/aarch64-linux-musl-cross.tgz -O /opt/cross-builder/aarch64-musl.tgz && \
|
||||||
cd /opt/cross-builder && \
|
cd /opt/cross-builder && \
|
||||||
tar zxf aarch64-musl.tgz -C ./ && \
|
tar zxf aarch64-musl.tgz -C ./ && \
|
||||||
rm /opt/cross-builder/aarch64-musl.tgz
|
rm /opt/cross-builder/aarch64-musl.tgz
|
||||||
|
|
|
@ -14,10 +14,14 @@ The following tip changes can be tested by building VictoriaMetrics components f
|
||||||
|
|
||||||
## tip
|
## tip
|
||||||
|
|
||||||
|
* FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): add support for mTLS communications between cluster components. See [these docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/550).
|
||||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add ability to use OAuth2 for `-datasource.url`, `-notifier.url` and `-remoteRead.url`. See the corresponding command-line flags containing `oauth2` in their names [here](https://docs.victoriametrics.com/vmalert.html#flags).
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add ability to use OAuth2 for `-datasource.url`, `-notifier.url` and `-remoteRead.url`. See the corresponding command-line flags containing `oauth2` in their names [here](https://docs.victoriametrics.com/vmalert.html#flags).
|
||||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add ability to use Bearer Token for `-notifier.url` via `-notifier.bearerToken` and `-notifier.bearerTokenFile` command-line flags. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1824).
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add ability to use Bearer Token for `-notifier.url` via `-notifier.bearerToken` and `-notifier.bearerTokenFile` command-line flags. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1824).
|
||||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `sortByLabel` template function in order to be consistent with Prometheus. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/template_reference/#functions) for more details.
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `sortByLabel` template function in order to be consistent with Prometheus. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/template_reference/#functions) for more details.
|
||||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): improve compliance with [Prometheus Alert Generator Specification](https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md).
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): improve compliance with [Prometheus Alert Generator Specification](https://github.com/prometheus/compliance/blob/main/alert_generator/specification.md).
|
||||||
|
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `-rule.resendDelay` command-line flag, which specifies the minumum amount of time to wait before resending an alert to Alertmanager (e.g. this is equivalent to `-rules.alert.resend-delay` option from Prometheus. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1665).
|
||||||
|
|
||||||
|
* BUGFIX: [Graphite Render API](https://docs.victoriametrics.com/#graphite-render-api-usage): return an additional point after `until` timestamp in the same way as Graphite does. Previously VictoriaMetrics didn't return this point, which could result in missing last point on the graph.
|
||||||
|
|
||||||
|
|
||||||
## [v1.74.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.74.0)
|
## [v1.74.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.74.0)
|
||||||
|
|
|
@ -158,6 +158,11 @@ It is possible manualy setting up a toy cluster on a single host. In this case e
|
||||||
* `-vmselectAddr` - every `vmstorage` node must listen for a distinct tcp address for accepting requests from `vmselect` nodes.
|
* `-vmselectAddr` - every `vmstorage` node must listen for a distinct tcp address for accepting requests from `vmselect` nodes.
|
||||||
|
|
||||||
|
|
||||||
|
## mTLS protection
|
||||||
|
|
||||||
|
By default `vminsert` and `vmselect` nodes use unencrypted connections to `vmstorage` nodes, since it is assumed that all the cluster components run in a protected environment. [Enterprise version of VictoriaMetrics](https://victoriametrics.com/products/enterprise/) provides optional support for [mTLS connections](https://en.wikipedia.org/wiki/Mutual_authentication#mTLS) between cluster components. Pass `-cluster.tls=true` command-line flag to `vminsert`, `vmselect` and `vmstorage` nodes in order to enable mTLS protection. Additionally, `vminsert` and `vmselect` must be configured with client-side certificates via `-cluster.tlsCertFile`, `-cluster.tlsKeyFile` command-line options. These certificates are verified by `vmstorage` when `vminsert` and `vmselect` dial `vmstorage`. An optional `-cluster.tlsCAFile` command-line flag can be set at `vminsert`, `vmselect` and `vmstorage` for verifying peer certificates issued with custom [certificate authority](https://en.wikipedia.org/wiki/Certificate_authority).
|
||||||
|
|
||||||
|
|
||||||
### Environment variables
|
### Environment variables
|
||||||
|
|
||||||
Each flag values can be set thru environment variables by following these rules:
|
Each flag values can be set thru environment variables by following these rules:
|
||||||
|
@ -482,6 +487,14 @@ Report bugs and propose new features [here](https://github.com/VictoriaMetrics/V
|
||||||
Below is the output for `/path/to/vminsert -help`:
|
Below is the output for `/path/to/vminsert -help`:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
-cluster.tls
|
||||||
|
Whether to use TLS for connections to -storageNode. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
|
-cluster.tlsCAFile string
|
||||||
|
Path to TLS CA file to use for verifying certificates provided by -storageNode. By default system CA is used. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
|
-cluster.tlsCertFile string
|
||||||
|
Path to client-side TLS certificate file to use when connecting to -storageNode. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
|
-cluster.tlsKeyFile string
|
||||||
|
Path to client-side TLS key file to use when connecting to -storageNode. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
-clusternativeListenAddr string
|
-clusternativeListenAddr string
|
||||||
TCP address to listen for data from other vminsert nodes in multi-level cluster setup. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup . Usually :8400 must be set. Doesn't work if empty
|
TCP address to listen for data from other vminsert nodes in multi-level cluster setup. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multi-level-cluster-setup . Usually :8400 must be set. Doesn't work if empty
|
||||||
-csvTrimTimestamp duration
|
-csvTrimTimestamp duration
|
||||||
|
@ -530,6 +543,8 @@ Below is the output for `/path/to/vminsert -help`:
|
||||||
-influx.maxLineSize size
|
-influx.maxLineSize size
|
||||||
The maximum size in bytes for a single InfluxDB line during parsing
|
The maximum size in bytes for a single InfluxDB line during parsing
|
||||||
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144)
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 262144)
|
||||||
|
-influxDBLabel string
|
||||||
|
Default label for the DB name sent over '?db={db_name}' query parameter (default "db")
|
||||||
-influxListenAddr string
|
-influxListenAddr string
|
||||||
TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write
|
TCP and UDP address to listen for InfluxDB line protocol data. Usually :8189 must be set. Doesn't work if empty. This flag isn't needed when ingesting data over HTTP - just send it to http://<victoriametrics>:8428/write
|
||||||
-influxMeasurementFieldSeparator string
|
-influxMeasurementFieldSeparator string
|
||||||
|
@ -611,6 +626,14 @@ Below is the output for `/path/to/vmselect -help`:
|
||||||
```
|
```
|
||||||
-cacheDataPath string
|
-cacheDataPath string
|
||||||
Path to directory for cache files. Cache isn't saved if empty
|
Path to directory for cache files. Cache isn't saved if empty
|
||||||
|
-cluster.tls
|
||||||
|
Whether to use TLS for connections to -storageNode. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
|
-cluster.tlsCAFile string
|
||||||
|
Path to TLS CA file to use for verifying certificates provided by -storageNode. By default system CA is used. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
|
-cluster.tlsCertFile string
|
||||||
|
Path to client-side TLS certificate file to use when connecting to -storageNode. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
|
-cluster.tlsKeyFile string
|
||||||
|
Path to client-side TLS key file to use when connecting to -storageNode. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
-dedup.minScrapeInterval duration
|
-dedup.minScrapeInterval duration
|
||||||
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication for details
|
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication for details
|
||||||
-downsampling.period array
|
-downsampling.period array
|
||||||
|
@ -737,6 +760,14 @@ Below is the output for `/path/to/vmstorage -help`:
|
||||||
```
|
```
|
||||||
-bigMergeConcurrency int
|
-bigMergeConcurrency int
|
||||||
The maximum number of CPU cores to use for big merges. Default value is used if set to 0
|
The maximum number of CPU cores to use for big merges. Default value is used if set to 0
|
||||||
|
-cluster.tls
|
||||||
|
Whether to use TLS when accepting connections from vminsert and vmselect. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
|
-cluster.tlsCAFile string
|
||||||
|
Path to TLS CA file to use for verifying certificates provided by vminsert and vmselect. By default system CA is used. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
|
-cluster.tlsCertFile string
|
||||||
|
Path to server-side TLS certificate file to use when accepting connections from vminsert and vmselect. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
|
-cluster.tlsKeyFile string
|
||||||
|
Path to server-side TLS key file to use when accepting connections from vminsert and vmselect. See https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#mtls-protection
|
||||||
-dedup.minScrapeInterval duration
|
-dedup.minScrapeInterval duration
|
||||||
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication for details
|
Leave only the first sample in every time series per each discrete interval equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication for details
|
||||||
-denyQueriesOutsideRetention
|
-denyQueriesOutsideRetention
|
||||||
|
@ -814,6 +845,15 @@ Below is the output for `/path/to/vmstorage -help`:
|
||||||
The maximum number of CPU cores to use for small merges. Default value is used if set to 0
|
The maximum number of CPU cores to use for small merges. Default value is used if set to 0
|
||||||
-snapshotAuthKey string
|
-snapshotAuthKey string
|
||||||
authKey, which must be passed in query string to /snapshot* pages
|
authKey, which must be passed in query string to /snapshot* pages
|
||||||
|
-storage.cacheSizeIndexDBDataBlocks size
|
||||||
|
Overrides max size for indexdb/dataBlocks cache. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning
|
||||||
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
|
-storage.cacheSizeIndexDBIndexBlocks size
|
||||||
|
Overrides max size for indexdb/indexBlocks cache. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning
|
||||||
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
|
-storage.cacheSizeStorageTSID size
|
||||||
|
Overrides max size for storage/tsid cache. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning
|
||||||
|
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)
|
||||||
-storage.maxDailySeries int
|
-storage.maxDailySeries int
|
||||||
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries
|
The maximum number of unique series can be added to the storage during the last 24 hours. Excess series are logged and dropped. This can be useful for limiting series churn rate. See also -storage.maxHourlySeries
|
||||||
-storage.maxHourlySeries int
|
-storage.maxHourlySeries int
|
||||||
|
@ -840,7 +880,7 @@ Below is the output for `/path/to/vmstorage -help`:
|
||||||
|
|
||||||
## VictoriaMetrics Logo
|
## VictoriaMetrics Logo
|
||||||
|
|
||||||
[Zip](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/VM_logo.zip) contains three folders with different image orientation (main color and inverted version).
|
[Zip](VM_logo.zip) contains three folders with different image orientation (main color and inverted version).
|
||||||
|
|
||||||
Files included in each folder:
|
Files included in each folder:
|
||||||
|
|
||||||
|
|
16
go.mod
16
go.mod
|
@ -11,7 +11,7 @@ require (
|
||||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||||
github.com/VictoriaMetrics/metrics v1.18.1
|
github.com/VictoriaMetrics/metrics v1.18.1
|
||||||
github.com/VictoriaMetrics/metricsql v0.40.0
|
github.com/VictoriaMetrics/metricsql v0.40.0
|
||||||
github.com/aws/aws-sdk-go v1.43.10
|
github.com/aws/aws-sdk-go v1.43.19
|
||||||
github.com/cespare/xxhash/v2 v2.1.2
|
github.com/cespare/xxhash/v2 v2.1.2
|
||||||
github.com/cheggaaa/pb/v3 v3.0.8
|
github.com/cheggaaa/pb/v3 v3.0.8
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||||
|
@ -19,22 +19,22 @@ require (
|
||||||
github.com/go-kit/kit v0.12.0
|
github.com/go-kit/kit v0.12.0
|
||||||
github.com/golang/snappy v0.0.4
|
github.com/golang/snappy v0.0.4
|
||||||
github.com/influxdata/influxdb v1.9.6
|
github.com/influxdata/influxdb v1.9.6
|
||||||
github.com/klauspost/compress v1.14.4
|
github.com/klauspost/compress v1.15.1
|
||||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||||
github.com/oklog/ulid v1.3.1
|
github.com/oklog/ulid v1.3.1
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
github.com/prometheus/common v0.32.1 // indirect
|
||||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||||
github.com/urfave/cli/v2 v2.3.0
|
github.com/urfave/cli/v2 v2.4.0
|
||||||
github.com/valyala/fastjson v1.6.3
|
github.com/valyala/fastjson v1.6.3
|
||||||
github.com/valyala/fastrand v1.1.0
|
github.com/valyala/fastrand v1.1.0
|
||||||
github.com/valyala/fasttemplate v1.2.1
|
github.com/valyala/fasttemplate v1.2.1
|
||||||
github.com/valyala/gozstd v1.16.0
|
github.com/valyala/gozstd v1.16.0
|
||||||
github.com/valyala/quicktemplate v1.7.0
|
github.com/valyala/quicktemplate v1.7.0
|
||||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f
|
golang.org/x/net v0.0.0-20220225172249-27dd8689420f
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b
|
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a
|
||||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9
|
golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86
|
||||||
google.golang.org/api v0.70.0
|
google.golang.org/api v0.73.0
|
||||||
gopkg.in/yaml.v2 v2.4.0
|
gopkg.in/yaml.v2 v2.4.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -68,8 +68,8 @@ require (
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20220302033224-9aa15565e42a // indirect
|
google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 // indirect
|
||||||
google.golang.org/grpc v1.44.0 // indirect
|
google.golang.org/grpc v1.45.0 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.27.1 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
)
|
)
|
||||||
|
|
32
go.sum
32
go.sum
|
@ -165,8 +165,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
||||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||||
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||||
github.com/aws/aws-sdk-go v1.43.10 h1:lFX6gzTBltYBnlJBjd2DWRCmqn2CbTcs6PW99/Dme7k=
|
github.com/aws/aws-sdk-go v1.43.19 h1:n7YAreaCpcstusW7F0+XiocZxh7rwmcAPO4HTEPJ6mE=
|
||||||
github.com/aws/aws-sdk-go v1.43.10/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
github.com/aws/aws-sdk-go v1.43.19/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||||
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
||||||
|
@ -661,8 +661,8 @@ github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
|
||||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/compress v1.14.4 h1:eijASRJcobkVtSt81Olfh7JX43osYLwy5krOJo6YEu4=
|
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
|
||||||
github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||||
|
@ -967,8 +967,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6
|
||||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||||
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
|
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
|
||||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
|
github.com/urfave/cli/v2 v2.4.0 h1:m2pxjjDFgDxSPtO8WSdbndj17Wu2y8vOT86wE/tjr+I=
|
||||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg=
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
|
||||||
|
@ -1196,8 +1196,8 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ
|
||||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b h1:clP8eMhB30EHdc0bd2Twtq6kgU7yl5ub2cQLSdrv1Dg=
|
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM=
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
@ -1312,8 +1312,9 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9 h1:nhht2DYV/Sn3qOayu8lM+cU1ii9sTLUeBQwQQfUHtrs=
|
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86 h1:A9i04dxx7Cribqbs8jf3FQLogkL/CV2YN7hj9KWJCkc=
|
||||||
|
golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
|
@ -1467,8 +1468,9 @@ google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSim
|
||||||
google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M=
|
google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M=
|
||||||
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
||||||
google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80=
|
google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80=
|
||||||
google.golang.org/api v0.70.0 h1:67zQnAE0T2rB0A3CwLSas0K+SbVzSxP+zTLkQLexeiw=
|
|
||||||
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
|
||||||
|
google.golang.org/api v0.73.0 h1:O9bThUh35K1rvUrQwTUQ1eqLC/IYyzUpWavYIO2EXvo=
|
||||||
|
google.golang.org/api v0.73.0/go.mod h1:lbd/q6BRFJbdpV6OUCXstVeiI5mL/d3/WifG7iNKnjI=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
@ -1558,8 +1560,9 @@ google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2
|
||||||
google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
google.golang.org/genproto v0.0.0-20220302033224-9aa15565e42a h1:uqouglH745GoGeZ1YFZbPBiu961tgi/9Qm5jaorajjQ=
|
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||||
google.golang.org/genproto v0.0.0-20220302033224-9aa15565e42a/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 h1:ErU+UA6wxadoU8nWrsy5MZUVBs75K17zUCsUCIfrXCE=
|
||||||
|
google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
|
||||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||||
|
@ -1593,8 +1596,9 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
||||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
|
|
||||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
|
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
|
||||||
|
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
|
184
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
184
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -2293,15 +2293,60 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-west-1",
|
Region: "eu-west-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "fips-us-east-1",
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "apprunner-fips.us-east-1.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-east-1",
|
||||||
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "fips-us-east-2",
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "apprunner-fips.us-east-2.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-east-2",
|
||||||
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "fips-us-west-2",
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "apprunner-fips.us-west-2.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-west-2",
|
||||||
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-east-1",
|
Region: "us-east-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-east-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "apprunner-fips.us-east-1.amazonaws.com",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-east-2",
|
Region: "us-east-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-east-2",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "apprunner-fips.us-east-2.amazonaws.com",
|
||||||
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-west-2",
|
Region: "us-west-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-west-2",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "apprunner-fips.us-west-2.amazonaws.com",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"appstream2": service{
|
"appstream2": service{
|
||||||
|
@ -2828,6 +2873,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -3588,6 +3636,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-northeast-2",
|
Region: "ap-northeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-northeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-south-1",
|
Region: "ap-south-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -3597,6 +3648,9 @@ var awsPartition = partition{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ap-southeast-2",
|
Region: "ap-southeast-2",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-southeast-3",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "ca-central-1",
|
Region: "ca-central-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -11021,6 +11075,18 @@ var awsPartition = partition{
|
||||||
},
|
},
|
||||||
"ivs": service{
|
"ivs": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-northeast-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-northeast-2",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "ap-south-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "eu-central-1",
|
||||||
|
}: endpoint{},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "eu-west-1",
|
Region: "eu-west-1",
|
||||||
}: endpoint{},
|
}: endpoint{},
|
||||||
|
@ -24172,6 +24238,14 @@ var awsusgovPartition = partition{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"autoscaling": service{
|
"autoscaling": service{
|
||||||
|
Defaults: endpointDefaults{
|
||||||
|
defaultKey{}: endpoint{},
|
||||||
|
defaultKey{
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "autoscaling.{region}.{dnsSuffix}",
|
||||||
|
},
|
||||||
|
},
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-east-1",
|
Region: "us-gov-east-1",
|
||||||
|
@ -24356,20 +24430,40 @@ var awsusgovPartition = partition{
|
||||||
"cloudtrail": service{
|
"cloudtrail": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-east-1",
|
Region: "fips-us-gov-east-1",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "cloudtrail.us-gov-east-1.amazonaws.com",
|
Hostname: "cloudtrail.us-gov-east-1.amazonaws.com",
|
||||||
CredentialScope: credentialScope{
|
CredentialScope: credentialScope{
|
||||||
Region: "us-gov-east-1",
|
Region: "us-gov-east-1",
|
||||||
},
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-west-1",
|
Region: "fips-us-gov-west-1",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
|
Hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
|
||||||
CredentialScope: credentialScope{
|
CredentialScope: credentialScope{
|
||||||
Region: "us-gov-west-1",
|
Region: "us-gov-west-1",
|
||||||
},
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "cloudtrail.us-gov-east-1.amazonaws.com",
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -25361,20 +25455,40 @@ var awsusgovPartition = partition{
|
||||||
"events": service{
|
"events": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-east-1",
|
Region: "fips-us-gov-east-1",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "events.us-gov-east-1.amazonaws.com",
|
Hostname: "events.us-gov-east-1.amazonaws.com",
|
||||||
CredentialScope: credentialScope{
|
CredentialScope: credentialScope{
|
||||||
Region: "us-gov-east-1",
|
Region: "us-gov-east-1",
|
||||||
},
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-west-1",
|
Region: "fips-us-gov-west-1",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "events.us-gov-west-1.amazonaws.com",
|
Hostname: "events.us-gov-west-1.amazonaws.com",
|
||||||
CredentialScope: credentialScope{
|
CredentialScope: credentialScope{
|
||||||
Region: "us-gov-west-1",
|
Region: "us-gov-west-1",
|
||||||
},
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "events.us-gov-east-1.amazonaws.com",
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "events.us-gov-west-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -26245,20 +26359,40 @@ var awsusgovPartition = partition{
|
||||||
"logs": service{
|
"logs": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-east-1",
|
Region: "fips-us-gov-east-1",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "logs.us-gov-east-1.amazonaws.com",
|
Hostname: "logs.us-gov-east-1.amazonaws.com",
|
||||||
CredentialScope: credentialScope{
|
CredentialScope: credentialScope{
|
||||||
Region: "us-gov-east-1",
|
Region: "us-gov-east-1",
|
||||||
},
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-west-1",
|
Region: "fips-us-gov-west-1",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "logs.us-gov-west-1.amazonaws.com",
|
Hostname: "logs.us-gov-west-1.amazonaws.com",
|
||||||
CredentialScope: credentialScope{
|
CredentialScope: credentialScope{
|
||||||
Region: "us-gov-west-1",
|
Region: "us-gov-west-1",
|
||||||
},
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "logs.us-gov-east-1.amazonaws.com",
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "logs.us-gov-west-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -27594,25 +27728,55 @@ var awsusgovPartition = partition{
|
||||||
"sns": service{
|
"sns": service{
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-east-1",
|
Region: "fips-us-gov-east-1",
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "sns.us-gov-east-1.amazonaws.com",
|
Hostname: "sns.us-gov-east-1.amazonaws.com",
|
||||||
CredentialScope: credentialScope{
|
CredentialScope: credentialScope{
|
||||||
Region: "us-gov-east-1",
|
Region: "us-gov-east-1",
|
||||||
},
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "fips-us-gov-west-1",
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "sns.us-gov-west-1.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
},
|
||||||
|
Deprecated: boxedTrue,
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
}: endpoint{},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "sns.us-gov-east-1.amazonaws.com",
|
||||||
},
|
},
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-west-1",
|
Region: "us-gov-west-1",
|
||||||
|
}: endpoint{
|
||||||
|
Protocols: []string{"http", "https"},
|
||||||
|
},
|
||||||
|
endpointKey{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
Variant: fipsVariant,
|
||||||
}: endpoint{
|
}: endpoint{
|
||||||
Hostname: "sns.us-gov-west-1.amazonaws.com",
|
Hostname: "sns.us-gov-west-1.amazonaws.com",
|
||||||
Protocols: []string{"http", "https"},
|
Protocols: []string{"http", "https"},
|
||||||
CredentialScope: credentialScope{
|
|
||||||
Region: "us-gov-west-1",
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"sqs": service{
|
"sqs": service{
|
||||||
|
Defaults: endpointDefaults{
|
||||||
|
defaultKey{}: endpoint{},
|
||||||
|
defaultKey{
|
||||||
|
Variant: fipsVariant,
|
||||||
|
}: endpoint{
|
||||||
|
Hostname: "sqs.{region}.{dnsSuffix}",
|
||||||
|
},
|
||||||
|
},
|
||||||
Endpoints: serviceEndpoints{
|
Endpoints: serviceEndpoints{
|
||||||
endpointKey{
|
endpointKey{
|
||||||
Region: "us-gov-east-1",
|
Region: "us-gov-east-1",
|
||||||
|
|
4
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
generated
vendored
|
@ -15,8 +15,8 @@ import (
|
||||||
// and determine if a request API error should be retried.
|
// and determine if a request API error should be retried.
|
||||||
//
|
//
|
||||||
// client.DefaultRetryer is the SDK's default implementation of the Retryer. It
|
// client.DefaultRetryer is the SDK's default implementation of the Retryer. It
|
||||||
// uses the which uses the Request.IsErrorRetryable and Request.IsErrorThrottle
|
// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to
|
||||||
// methods to determine if the request is retried.
|
// determine if the request is retried.
|
||||||
type Retryer interface {
|
type Retryer interface {
|
||||||
// RetryRules return the retry delay that should be used by the SDK before
|
// RetryRules return the retry delay that should be used by the SDK before
|
||||||
// making another request attempt for the failed request.
|
// making another request attempt for the failed request.
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.43.10"
|
const SDKVersion = "1.43.19"
|
||||||
|
|
16
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
16
vendor/github.com/aws/aws-sdk-go/service/sts/api.go
generated
vendored
|
@ -507,8 +507,9 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
|
||||||
//
|
//
|
||||||
// Returns a set of temporary security credentials for users who have been authenticated
|
// Returns a set of temporary security credentials for users who have been authenticated
|
||||||
// in a mobile or web application with a web identity provider. Example providers
|
// in a mobile or web application with a web identity provider. Example providers
|
||||||
// include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID
|
// include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID
|
||||||
// Connect-compatible identity provider.
|
// Connect-compatible identity provider such as Google or Amazon Cognito federated
|
||||||
|
// identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html).
|
||||||
//
|
//
|
||||||
// For mobile applications, we recommend that you use Amazon Cognito. You can
|
// For mobile applications, we recommend that you use Amazon Cognito. You can
|
||||||
// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide
|
// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide
|
||||||
|
@ -1537,7 +1538,7 @@ type AssumeRoleInput struct {
|
||||||
// the new session inherits any transitive session tags from the calling session.
|
// the new session inherits any transitive session tags from the calling session.
|
||||||
// If you pass a session tag with the same key as an inherited tag, the operation
|
// If you pass a session tag with the same key as an inherited tag, the operation
|
||||||
// fails. To view the inherited tags for a session, see the CloudTrail logs.
|
// fails. To view the inherited tags for a session, see the CloudTrail logs.
|
||||||
// For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/session-tags.html#id_session-tags_ctlogs)
|
// For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs)
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
Tags []*Tag `type:"list"`
|
Tags []*Tag `type:"list"`
|
||||||
|
|
||||||
|
@ -2220,11 +2221,12 @@ type AssumeRoleWithWebIdentityInput struct {
|
||||||
// in the IAM User Guide.
|
// in the IAM User Guide.
|
||||||
PolicyArns []*PolicyDescriptorType `type:"list"`
|
PolicyArns []*PolicyDescriptorType `type:"list"`
|
||||||
|
|
||||||
// The fully qualified host component of the domain name of the identity provider.
|
// The fully qualified host component of the domain name of the OAuth 2.0 identity
|
||||||
|
// provider. Do not specify this value for an OpenID Connect identity provider.
|
||||||
//
|
//
|
||||||
// Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com
|
// Currently www.amazon.com and graph.facebook.com are the only supported identity
|
||||||
// and graph.facebook.com are the only supported identity providers for OAuth
|
// providers for OAuth 2.0 access tokens. Do not include URL schemes and port
|
||||||
// 2.0 access tokens. Do not include URL schemes and port numbers.
|
// numbers.
|
||||||
//
|
//
|
||||||
// Do not specify this value for OpenID Connect ID tokens.
|
// Do not specify this value for OpenID Connect ID tokens.
|
||||||
ProviderId *string `min:"4" type:"string"`
|
ProviderId *string `min:"4" type:"string"`
|
||||||
|
|
23
vendor/github.com/klauspost/compress/README.md
generated
vendored
23
vendor/github.com/klauspost/compress/README.md
generated
vendored
|
@ -17,6 +17,29 @@ This package provides various compression algorithms.
|
||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Mar 3, 2022 (v1.15.0)
|
||||||
|
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
|
||||||
|
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
|
||||||
|
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
|
||||||
|
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
|
||||||
|
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
||||||
|
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>See Details</summary>
|
||||||
|
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
||||||
|
|
||||||
|
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
|
||||||
|
|
||||||
|
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||||
|
</details>
|
||||||
|
|
||||||
|
* Feb 22, 2022 (v1.14.4)
|
||||||
|
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||||
|
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||||
|
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
|
||||||
|
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
|
||||||
|
|
||||||
* Feb 17, 2022 (v1.14.3)
|
* Feb 17, 2022 (v1.14.3)
|
||||||
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
|
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
|
||||||
* flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
|
* flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
|
||||||
|
|
17
vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
17
vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
|
@ -154,7 +154,15 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||||
l++
|
l++
|
||||||
}
|
}
|
||||||
if nextEmit < s {
|
if nextEmit < s {
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
if false {
|
||||||
|
emitLiteral(dst, src[nextEmit:s])
|
||||||
|
} else {
|
||||||
|
for _, v := range src[nextEmit:s] {
|
||||||
|
dst.tokens[dst.n] = token(v)
|
||||||
|
dst.litHist[v]++
|
||||||
|
dst.n++
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the match found
|
// Save the match found
|
||||||
|
@ -169,8 +177,11 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||||
for xlength > 0 {
|
for xlength > 0 {
|
||||||
xl := xlength
|
xl := xlength
|
||||||
if xl > 258 {
|
if xl > 258 {
|
||||||
// We need to have at least baseMatchLength left over for next loop.
|
if xl > 258+baseMatchLength {
|
||||||
xl = 258 - baseMatchLength
|
xl = 258
|
||||||
|
} else {
|
||||||
|
xl = 258 - baseMatchLength
|
||||||
|
}
|
||||||
}
|
}
|
||||||
xlength -= xl
|
xlength -= xl
|
||||||
xl -= baseMatchLength
|
xl -= baseMatchLength
|
||||||
|
|
10
vendor/github.com/klauspost/compress/flate/level2.go
generated
vendored
10
vendor/github.com/klauspost/compress/flate/level2.go
generated
vendored
|
@ -134,7 +134,15 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
|
||||||
l++
|
l++
|
||||||
}
|
}
|
||||||
if nextEmit < s {
|
if nextEmit < s {
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
if false {
|
||||||
|
emitLiteral(dst, src[nextEmit:s])
|
||||||
|
} else {
|
||||||
|
for _, v := range src[nextEmit:s] {
|
||||||
|
dst.tokens[dst.n] = token(v)
|
||||||
|
dst.litHist[v]++
|
||||||
|
dst.n++
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||||
|
|
10
vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
10
vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
|
@ -143,7 +143,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||||
l++
|
l++
|
||||||
}
|
}
|
||||||
if nextEmit < s {
|
if nextEmit < s {
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
if false {
|
||||||
|
emitLiteral(dst, src[nextEmit:s])
|
||||||
|
} else {
|
||||||
|
for _, v := range src[nextEmit:s] {
|
||||||
|
dst.tokens[dst.n] = token(v)
|
||||||
|
dst.litHist[v]++
|
||||||
|
dst.n++
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||||
|
|
10
vendor/github.com/klauspost/compress/flate/level4.go
generated
vendored
10
vendor/github.com/klauspost/compress/flate/level4.go
generated
vendored
|
@ -135,7 +135,15 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
|
||||||
l++
|
l++
|
||||||
}
|
}
|
||||||
if nextEmit < s {
|
if nextEmit < s {
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
if false {
|
||||||
|
emitLiteral(dst, src[nextEmit:s])
|
||||||
|
} else {
|
||||||
|
for _, v := range src[nextEmit:s] {
|
||||||
|
dst.tokens[dst.n] = token(v)
|
||||||
|
dst.litHist[v]++
|
||||||
|
dst.n++
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if debugDeflate {
|
if debugDeflate {
|
||||||
if t >= s {
|
if t >= s {
|
||||||
|
|
10
vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
10
vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
|
@ -210,7 +210,15 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||||
l++
|
l++
|
||||||
}
|
}
|
||||||
if nextEmit < s {
|
if nextEmit < s {
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
if false {
|
||||||
|
emitLiteral(dst, src[nextEmit:s])
|
||||||
|
} else {
|
||||||
|
for _, v := range src[nextEmit:s] {
|
||||||
|
dst.tokens[dst.n] = token(v)
|
||||||
|
dst.litHist[v]++
|
||||||
|
dst.n++
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if debugDeflate {
|
if debugDeflate {
|
||||||
if t >= s {
|
if t >= s {
|
||||||
|
|
10
vendor/github.com/klauspost/compress/flate/level6.go
generated
vendored
10
vendor/github.com/klauspost/compress/flate/level6.go
generated
vendored
|
@ -243,7 +243,15 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||||
l++
|
l++
|
||||||
}
|
}
|
||||||
if nextEmit < s {
|
if nextEmit < s {
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
if false {
|
||||||
|
emitLiteral(dst, src[nextEmit:s])
|
||||||
|
} else {
|
||||||
|
for _, v := range src[nextEmit:s] {
|
||||||
|
dst.tokens[dst.n] = token(v)
|
||||||
|
dst.litHist[v]++
|
||||||
|
dst.n++
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if false {
|
if false {
|
||||||
if t >= s {
|
if t >= s {
|
||||||
|
|
10
vendor/github.com/klauspost/compress/flate/stateless.go
generated
vendored
10
vendor/github.com/klauspost/compress/flate/stateless.go
generated
vendored
|
@ -249,7 +249,15 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) {
|
||||||
l++
|
l++
|
||||||
}
|
}
|
||||||
if nextEmit < s {
|
if nextEmit < s {
|
||||||
emitLiteral(dst, src[nextEmit:s])
|
if false {
|
||||||
|
emitLiteral(dst, src[nextEmit:s])
|
||||||
|
} else {
|
||||||
|
for _, v := range src[nextEmit:s] {
|
||||||
|
dst.tokens[dst.n] = token(v)
|
||||||
|
dst.litHist[v]++
|
||||||
|
dst.n++
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the match found
|
// Save the match found
|
||||||
|
|
13
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
13
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
|
@ -195,12 +195,11 @@ func (t *tokens) indexTokens(in []token) {
|
||||||
|
|
||||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||||
func emitLiteral(dst *tokens, lit []byte) {
|
func emitLiteral(dst *tokens, lit []byte) {
|
||||||
ol := int(dst.n)
|
for _, v := range lit {
|
||||||
for i, v := range lit {
|
dst.tokens[dst.n] = token(v)
|
||||||
dst.tokens[(i+ol)&maxStoreBlockSize] = token(v)
|
|
||||||
dst.litHist[v]++
|
dst.litHist[v]++
|
||||||
|
dst.n++
|
||||||
}
|
}
|
||||||
dst.n += uint16(len(lit))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *tokens) AddLiteral(lit byte) {
|
func (t *tokens) AddLiteral(lit byte) {
|
||||||
|
@ -294,7 +293,11 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
|
||||||
xl := xlength
|
xl := xlength
|
||||||
if xl > 258 {
|
if xl > 258 {
|
||||||
// We need to have at least baseMatchLength left over for next loop.
|
// We need to have at least baseMatchLength left over for next loop.
|
||||||
xl = 258 - baseMatchLength
|
if xl > 258+baseMatchLength {
|
||||||
|
xl = 258
|
||||||
|
} else {
|
||||||
|
xl = 258 - baseMatchLength
|
||||||
|
}
|
||||||
}
|
}
|
||||||
xlength -= xl
|
xlength -= xl
|
||||||
xl -= baseMatchLength
|
xl -= baseMatchLength
|
||||||
|
|
5
vendor/github.com/klauspost/compress/huff0/autogen.go
generated
vendored
Normal file
5
vendor/github.com/klauspost/compress/huff0/autogen.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
package huff0
|
||||||
|
|
||||||
|
//go:generate go run generate.go
|
||||||
|
//go:generate asmfmt -w decompress_amd64.s
|
||||||
|
//go:generate asmfmt -w decompress_8b_amd64.s
|
126
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
126
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
|
@ -8,115 +8,10 @@ package huff0
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// bitReader reads a bitstream in reverse.
|
|
||||||
// The last set bit indicates the start of the stream and is used
|
|
||||||
// for aligning the input.
|
|
||||||
type bitReader struct {
|
|
||||||
in []byte
|
|
||||||
off uint // next byte to read is at in[off - 1]
|
|
||||||
value uint64
|
|
||||||
bitsRead uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
// init initializes and resets the bit reader.
|
|
||||||
func (b *bitReader) init(in []byte) error {
|
|
||||||
if len(in) < 1 {
|
|
||||||
return errors.New("corrupt stream: too short")
|
|
||||||
}
|
|
||||||
b.in = in
|
|
||||||
b.off = uint(len(in))
|
|
||||||
// The highest bit of the last byte indicates where to start
|
|
||||||
v := in[len(in)-1]
|
|
||||||
if v == 0 {
|
|
||||||
return errors.New("corrupt stream, did not find end of stream")
|
|
||||||
}
|
|
||||||
b.bitsRead = 64
|
|
||||||
b.value = 0
|
|
||||||
if len(in) >= 8 {
|
|
||||||
b.fillFastStart()
|
|
||||||
} else {
|
|
||||||
b.fill()
|
|
||||||
b.fill()
|
|
||||||
}
|
|
||||||
b.bitsRead += 8 - uint8(highBit32(uint32(v)))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// peekBitsFast requires that at least one bit is requested every time.
|
|
||||||
// There are no checks if the buffer is filled.
|
|
||||||
func (b *bitReader) peekBitsFast(n uint8) uint16 {
|
|
||||||
const regMask = 64 - 1
|
|
||||||
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// fillFast() will make sure at least 32 bits are available.
|
|
||||||
// There must be at least 4 bytes available.
|
|
||||||
func (b *bitReader) fillFast() {
|
|
||||||
if b.bitsRead < 32 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2 bounds checks.
|
|
||||||
v := b.in[b.off-4 : b.off]
|
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
b.value = (b.value << 32) | uint64(low)
|
|
||||||
b.bitsRead -= 32
|
|
||||||
b.off -= 4
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *bitReader) advance(n uint8) {
|
|
||||||
b.bitsRead += n
|
|
||||||
}
|
|
||||||
|
|
||||||
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
|
||||||
func (b *bitReader) fillFastStart() {
|
|
||||||
// Do single re-slice to avoid bounds checks.
|
|
||||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
|
||||||
b.bitsRead = 0
|
|
||||||
b.off -= 8
|
|
||||||
}
|
|
||||||
|
|
||||||
// fill() will make sure at least 32 bits are available.
|
|
||||||
func (b *bitReader) fill() {
|
|
||||||
if b.bitsRead < 32 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if b.off > 4 {
|
|
||||||
v := b.in[b.off-4:]
|
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
b.value = (b.value << 32) | uint64(low)
|
|
||||||
b.bitsRead -= 32
|
|
||||||
b.off -= 4
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for b.off > 0 {
|
|
||||||
b.value = (b.value << 8) | uint64(b.in[b.off-1])
|
|
||||||
b.bitsRead -= 8
|
|
||||||
b.off--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// finished returns true if all bits have been read from the bit stream.
|
|
||||||
func (b *bitReader) finished() bool {
|
|
||||||
return b.off == 0 && b.bitsRead >= 64
|
|
||||||
}
|
|
||||||
|
|
||||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
|
||||||
func (b *bitReader) close() error {
|
|
||||||
// Release reference.
|
|
||||||
b.in = nil
|
|
||||||
if b.bitsRead > 64 {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// bitReader reads a bitstream in reverse.
|
// bitReader reads a bitstream in reverse.
|
||||||
// The last set bit indicates the start of the stream and is used
|
// The last set bit indicates the start of the stream and is used
|
||||||
// for aligning the input.
|
// for aligning the input.
|
||||||
|
@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool {
|
||||||
return b.off == 0 && b.bitsRead >= 64
|
return b.off == 0 && b.bitsRead >= 64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *bitReaderBytes) remaining() uint {
|
||||||
|
return b.off*8 + uint(64-b.bitsRead)
|
||||||
|
}
|
||||||
|
|
||||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||||
func (b *bitReaderBytes) close() error {
|
func (b *bitReaderBytes) close() error {
|
||||||
// Release reference.
|
// Release reference.
|
||||||
b.in = nil
|
b.in = nil
|
||||||
|
if b.remaining() > 0 {
|
||||||
|
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
|
||||||
|
}
|
||||||
if b.bitsRead > 64 {
|
if b.bitsRead > 64 {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
@ -263,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
|
||||||
return uint16(b.value >> ((64 - n) & 63))
|
return uint16(b.value >> ((64 - n) & 63))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
|
||||||
|
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
|
||||||
|
return uint16(b.value >> n)
|
||||||
|
}
|
||||||
|
|
||||||
func (b *bitReaderShifted) advance(n uint8) {
|
func (b *bitReaderShifted) advance(n uint8) {
|
||||||
b.bitsRead += n
|
b.bitsRead += n
|
||||||
b.value <<= n & 63
|
b.value <<= n & 63
|
||||||
|
@ -318,10 +225,17 @@ func (b *bitReaderShifted) finished() bool {
|
||||||
return b.off == 0 && b.bitsRead >= 64
|
return b.off == 0 && b.bitsRead >= 64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *bitReaderShifted) remaining() uint {
|
||||||
|
return b.off*8 + uint(64-b.bitsRead)
|
||||||
|
}
|
||||||
|
|
||||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||||
func (b *bitReaderShifted) close() error {
|
func (b *bitReaderShifted) close() error {
|
||||||
// Release reference.
|
// Release reference.
|
||||||
b.in = nil
|
b.in = nil
|
||||||
|
if b.remaining() > 0 {
|
||||||
|
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
|
||||||
|
}
|
||||||
if b.bitsRead > 64 {
|
if b.bitsRead > 64 {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
9
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
9
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
|
@ -2,6 +2,7 @@ package huff0
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"runtime"
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
if len(s.Out)-idx > math.MaxUint16 {
|
||||||
|
// We cannot store the size in the jump table
|
||||||
|
return nil, ErrIncompressible
|
||||||
|
}
|
||||||
// Write compressed length as little endian before block.
|
// Write compressed length as little endian before block.
|
||||||
if i < 3 {
|
if i < 3 {
|
||||||
// Last length is not written.
|
// Last length is not written.
|
||||||
|
@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
||||||
return nil, errs[i]
|
return nil, errs[i]
|
||||||
}
|
}
|
||||||
o := s.tmpOut[i]
|
o := s.tmpOut[i]
|
||||||
|
if len(o) > math.MaxUint16 {
|
||||||
|
// We cannot store the size in the jump table
|
||||||
|
return nil, ErrIncompressible
|
||||||
|
}
|
||||||
// Write compressed length as little endian before block.
|
// Write compressed length as little endian before block.
|
||||||
if i < 3 {
|
if i < 3 {
|
||||||
// Last length is not written.
|
// Last length is not written.
|
||||||
|
|
222
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
222
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
|
@ -725,196 +725,6 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||||
return dst, br.close()
|
return dst, br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decompress4X will decompress a 4X encoded stream.
|
|
||||||
// The length of the supplied input must match the end of a block exactly.
|
|
||||||
// The *capacity* of the dst slice must match the destination size of
|
|
||||||
// the uncompressed data exactly.
|
|
||||||
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
|
||||||
if len(d.dt.single) == 0 {
|
|
||||||
return nil, errors.New("no table loaded")
|
|
||||||
}
|
|
||||||
if len(src) < 6+(4*1) {
|
|
||||||
return nil, errors.New("input too small")
|
|
||||||
}
|
|
||||||
if use8BitTables && d.actualTableLog <= 8 {
|
|
||||||
return d.decompress4X8bit(dst, src)
|
|
||||||
}
|
|
||||||
|
|
||||||
var br [4]bitReaderShifted
|
|
||||||
start := 6
|
|
||||||
for i := 0; i < 3; i++ {
|
|
||||||
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
|
||||||
if start+length >= len(src) {
|
|
||||||
return nil, errors.New("truncated input (or invalid offset)")
|
|
||||||
}
|
|
||||||
err := br[i].init(src[start : start+length])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
start += length
|
|
||||||
}
|
|
||||||
err := br[3].init(src[start:])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// destination, offset to match first output
|
|
||||||
dstSize := cap(dst)
|
|
||||||
dst = dst[:dstSize]
|
|
||||||
out := dst
|
|
||||||
dstEvery := (dstSize + 3) / 4
|
|
||||||
|
|
||||||
const tlSize = 1 << tableLogMax
|
|
||||||
const tlMask = tlSize - 1
|
|
||||||
single := d.dt.single[:tlSize]
|
|
||||||
|
|
||||||
// Use temp table to avoid bound checks/append penalty.
|
|
||||||
buf := d.buffer()
|
|
||||||
var off uint8
|
|
||||||
var decoded int
|
|
||||||
|
|
||||||
// Decode 2 values from each decoder/loop.
|
|
||||||
const bufoff = 256
|
|
||||||
for {
|
|
||||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
const stream = 0
|
|
||||||
const stream2 = 1
|
|
||||||
br[stream].fillFast()
|
|
||||||
br[stream2].fillFast()
|
|
||||||
|
|
||||||
val := br[stream].peekBitsFast(d.actualTableLog)
|
|
||||||
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
|
||||||
v := single[val&tlMask]
|
|
||||||
v2 := single[val2&tlMask]
|
|
||||||
br[stream].advance(uint8(v.entry))
|
|
||||||
br[stream2].advance(uint8(v2.entry))
|
|
||||||
buf[stream][off] = uint8(v.entry >> 8)
|
|
||||||
buf[stream2][off] = uint8(v2.entry >> 8)
|
|
||||||
|
|
||||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
|
||||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
|
||||||
v = single[val&tlMask]
|
|
||||||
v2 = single[val2&tlMask]
|
|
||||||
br[stream].advance(uint8(v.entry))
|
|
||||||
br[stream2].advance(uint8(v2.entry))
|
|
||||||
buf[stream][off+1] = uint8(v.entry >> 8)
|
|
||||||
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
const stream = 2
|
|
||||||
const stream2 = 3
|
|
||||||
br[stream].fillFast()
|
|
||||||
br[stream2].fillFast()
|
|
||||||
|
|
||||||
val := br[stream].peekBitsFast(d.actualTableLog)
|
|
||||||
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
|
||||||
v := single[val&tlMask]
|
|
||||||
v2 := single[val2&tlMask]
|
|
||||||
br[stream].advance(uint8(v.entry))
|
|
||||||
br[stream2].advance(uint8(v2.entry))
|
|
||||||
buf[stream][off] = uint8(v.entry >> 8)
|
|
||||||
buf[stream2][off] = uint8(v2.entry >> 8)
|
|
||||||
|
|
||||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
|
||||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
|
||||||
v = single[val&tlMask]
|
|
||||||
v2 = single[val2&tlMask]
|
|
||||||
br[stream].advance(uint8(v.entry))
|
|
||||||
br[stream2].advance(uint8(v2.entry))
|
|
||||||
buf[stream][off+1] = uint8(v.entry >> 8)
|
|
||||||
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
|
||||||
}
|
|
||||||
|
|
||||||
off += 2
|
|
||||||
|
|
||||||
if off == 0 {
|
|
||||||
if bufoff > dstEvery {
|
|
||||||
d.bufs.Put(buf)
|
|
||||||
return nil, errors.New("corruption detected: stream overrun 1")
|
|
||||||
}
|
|
||||||
copy(out, buf[0][:])
|
|
||||||
copy(out[dstEvery:], buf[1][:])
|
|
||||||
copy(out[dstEvery*2:], buf[2][:])
|
|
||||||
copy(out[dstEvery*3:], buf[3][:])
|
|
||||||
out = out[bufoff:]
|
|
||||||
decoded += bufoff * 4
|
|
||||||
// There must at least be 3 buffers left.
|
|
||||||
if len(out) < dstEvery*3 {
|
|
||||||
d.bufs.Put(buf)
|
|
||||||
return nil, errors.New("corruption detected: stream overrun 2")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if off > 0 {
|
|
||||||
ioff := int(off)
|
|
||||||
if len(out) < dstEvery*3+ioff {
|
|
||||||
d.bufs.Put(buf)
|
|
||||||
return nil, errors.New("corruption detected: stream overrun 3")
|
|
||||||
}
|
|
||||||
copy(out, buf[0][:off])
|
|
||||||
copy(out[dstEvery:], buf[1][:off])
|
|
||||||
copy(out[dstEvery*2:], buf[2][:off])
|
|
||||||
copy(out[dstEvery*3:], buf[3][:off])
|
|
||||||
decoded += int(off) * 4
|
|
||||||
out = out[off:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode remaining.
|
|
||||||
for i := range br {
|
|
||||||
offset := dstEvery * i
|
|
||||||
br := &br[i]
|
|
||||||
bitsLeft := br.off*8 + uint(64-br.bitsRead)
|
|
||||||
for bitsLeft > 0 {
|
|
||||||
br.fill()
|
|
||||||
if false && br.bitsRead >= 32 {
|
|
||||||
if br.off >= 4 {
|
|
||||||
v := br.in[br.off-4:]
|
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
|
||||||
br.value = (br.value << 32) | uint64(low)
|
|
||||||
br.bitsRead -= 32
|
|
||||||
br.off -= 4
|
|
||||||
} else {
|
|
||||||
for br.off > 0 {
|
|
||||||
br.value = (br.value << 8) | uint64(br.in[br.off-1])
|
|
||||||
br.bitsRead -= 8
|
|
||||||
br.off--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// end inline...
|
|
||||||
if offset >= len(out) {
|
|
||||||
d.bufs.Put(buf)
|
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read value and increment offset.
|
|
||||||
val := br.peekBitsFast(d.actualTableLog)
|
|
||||||
v := single[val&tlMask].entry
|
|
||||||
nBits := uint8(v)
|
|
||||||
br.advance(nBits)
|
|
||||||
bitsLeft -= uint(nBits)
|
|
||||||
out[offset] = uint8(v >> 8)
|
|
||||||
offset++
|
|
||||||
}
|
|
||||||
decoded += offset - dstEvery*i
|
|
||||||
err = br.close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d.bufs.Put(buf)
|
|
||||||
if dstSize != decoded {
|
|
||||||
return nil, errors.New("corruption detected: short output block")
|
|
||||||
}
|
|
||||||
return dst, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decompress4X will decompress a 4X encoded stream.
|
// Decompress4X will decompress a 4X encoded stream.
|
||||||
// The length of the supplied input must match the end of a block exactly.
|
// The length of the supplied input must match the end of a block exactly.
|
||||||
// The *capacity* of the dst slice must match the destination size of
|
// The *capacity* of the dst slice must match the destination size of
|
||||||
|
@ -1091,10 +901,16 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode remaining.
|
// Decode remaining.
|
||||||
|
// Decode remaining.
|
||||||
|
remainBytes := dstEvery - (decoded / 4)
|
||||||
for i := range br {
|
for i := range br {
|
||||||
offset := dstEvery * i
|
offset := dstEvery * i
|
||||||
|
endsAt := offset + remainBytes
|
||||||
|
if endsAt > len(out) {
|
||||||
|
endsAt = len(out)
|
||||||
|
}
|
||||||
br := &br[i]
|
br := &br[i]
|
||||||
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
bitsLeft := br.remaining()
|
||||||
for bitsLeft > 0 {
|
for bitsLeft > 0 {
|
||||||
if br.finished() {
|
if br.finished() {
|
||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
|
@ -1117,7 +933,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// end inline...
|
// end inline...
|
||||||
if offset >= len(out) {
|
if offset >= endsAt {
|
||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
}
|
}
|
||||||
|
@ -1126,10 +942,14 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||||
v := single[uint8(br.value>>shift)].entry
|
v := single[uint8(br.value>>shift)].entry
|
||||||
nBits := uint8(v)
|
nBits := uint8(v)
|
||||||
br.advance(nBits)
|
br.advance(nBits)
|
||||||
bitsLeft -= int(nBits)
|
bitsLeft -= uint(nBits)
|
||||||
out[offset] = uint8(v >> 8)
|
out[offset] = uint8(v >> 8)
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
|
if offset != endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||||
|
}
|
||||||
decoded += offset - dstEvery*i
|
decoded += offset - dstEvery*i
|
||||||
err = br.close()
|
err = br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1315,10 +1135,15 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode remaining.
|
// Decode remaining.
|
||||||
|
remainBytes := dstEvery - (decoded / 4)
|
||||||
for i := range br {
|
for i := range br {
|
||||||
offset := dstEvery * i
|
offset := dstEvery * i
|
||||||
|
endsAt := offset + remainBytes
|
||||||
|
if endsAt > len(out) {
|
||||||
|
endsAt = len(out)
|
||||||
|
}
|
||||||
br := &br[i]
|
br := &br[i]
|
||||||
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
bitsLeft := br.remaining()
|
||||||
for bitsLeft > 0 {
|
for bitsLeft > 0 {
|
||||||
if br.finished() {
|
if br.finished() {
|
||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
|
@ -1341,7 +1166,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// end inline...
|
// end inline...
|
||||||
if offset >= len(out) {
|
if offset >= endsAt {
|
||||||
d.bufs.Put(buf)
|
d.bufs.Put(buf)
|
||||||
return nil, errors.New("corruption detected: stream overrun 4")
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
}
|
}
|
||||||
|
@ -1350,10 +1175,15 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||||
v := single[br.peekByteFast()].entry
|
v := single[br.peekByteFast()].entry
|
||||||
nBits := uint8(v)
|
nBits := uint8(v)
|
||||||
br.advance(nBits)
|
br.advance(nBits)
|
||||||
bitsLeft -= int(nBits)
|
bitsLeft -= uint(nBits)
|
||||||
out[offset] = uint8(v >> 8)
|
out[offset] = uint8(v >> 8)
|
||||||
offset++
|
offset++
|
||||||
}
|
}
|
||||||
|
if offset != endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||||
|
}
|
||||||
|
|
||||||
decoded += offset - dstEvery*i
|
decoded += offset - dstEvery*i
|
||||||
err = br.close()
|
err = br.close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
488
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
generated
vendored
Normal file
488
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,488 @@
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
#include "funcdata.h"
|
||||||
|
#include "go_asm.h"
|
||||||
|
|
||||||
|
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
||||||
|
|
||||||
|
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||||
|
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
||||||
|
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
|
||||||
|
#define off R8
|
||||||
|
#define buffer DI
|
||||||
|
#define table SI
|
||||||
|
|
||||||
|
#define br_bits_read R9
|
||||||
|
#define br_value R10
|
||||||
|
#define br_offset R11
|
||||||
|
#define peek_bits R12
|
||||||
|
#define exhausted DX
|
||||||
|
|
||||||
|
#define br0 R13
|
||||||
|
#define br1 R14
|
||||||
|
#define br2 R15
|
||||||
|
#define br3 BP
|
||||||
|
|
||||||
|
MOVQ BP, 0(SP)
|
||||||
|
|
||||||
|
XORQ exhausted, exhausted // exhausted = false
|
||||||
|
XORQ off, off // off = 0
|
||||||
|
|
||||||
|
MOVBQZX peekBits+32(FP), peek_bits
|
||||||
|
MOVQ buf+40(FP), buffer
|
||||||
|
MOVQ tbl+48(FP), table
|
||||||
|
|
||||||
|
MOVQ pbr0+0(FP), br0
|
||||||
|
MOVQ pbr1+8(FP), br1
|
||||||
|
MOVQ pbr2+16(FP), br2
|
||||||
|
MOVQ pbr3+24(FP), br3
|
||||||
|
|
||||||
|
main_loop:
|
||||||
|
|
||||||
|
// const stream = 0
|
||||||
|
// br0.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br0), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br0), br_offset
|
||||||
|
|
||||||
|
// if b.bitsRead >= 32 {
|
||||||
|
CMPQ br_bits_read, $32
|
||||||
|
JB skip_fill0
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br0), AX
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br0.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
|
||||||
|
// }
|
||||||
|
skip_fill0:
|
||||||
|
|
||||||
|
// val0 := br0.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br0.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val1 := br0.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br0.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, 0(buffer)(off*1)
|
||||||
|
|
||||||
|
// SECOND PART:
|
||||||
|
// val2 := br0.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v2 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br0.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val3 := br0.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v3 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br0.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||||
|
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||||
|
MOVW BX, 0+2(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br0)
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br0)
|
||||||
|
|
||||||
|
// const stream = 1
|
||||||
|
// br1.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br1), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br1), br_offset
|
||||||
|
|
||||||
|
// if b.bitsRead >= 32 {
|
||||||
|
CMPQ br_bits_read, $32
|
||||||
|
JB skip_fill1
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br1), AX
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br1.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
|
||||||
|
// }
|
||||||
|
skip_fill1:
|
||||||
|
|
||||||
|
// val0 := br1.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br1.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val1 := br1.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br1.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, 256(buffer)(off*1)
|
||||||
|
|
||||||
|
// SECOND PART:
|
||||||
|
// val2 := br1.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v2 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br1.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val3 := br1.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v3 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br1.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||||
|
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||||
|
MOVW BX, 256+2(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br1)
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br1)
|
||||||
|
|
||||||
|
// const stream = 2
|
||||||
|
// br2.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br2), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br2), br_offset
|
||||||
|
|
||||||
|
// if b.bitsRead >= 32 {
|
||||||
|
CMPQ br_bits_read, $32
|
||||||
|
JB skip_fill2
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br2), AX
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br2.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
|
||||||
|
// }
|
||||||
|
skip_fill2:
|
||||||
|
|
||||||
|
// val0 := br2.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br2.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val1 := br2.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br2.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, 512(buffer)(off*1)
|
||||||
|
|
||||||
|
// SECOND PART:
|
||||||
|
// val2 := br2.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v2 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br2.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val3 := br2.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v3 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br2.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||||
|
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||||
|
MOVW BX, 512+2(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br2)
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br2)
|
||||||
|
|
||||||
|
// const stream = 3
|
||||||
|
// br3.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br3), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br3), br_offset
|
||||||
|
|
||||||
|
// if b.bitsRead >= 32 {
|
||||||
|
CMPQ br_bits_read, $32
|
||||||
|
JB skip_fill3
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br3), AX
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br3.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
|
||||||
|
// }
|
||||||
|
skip_fill3:
|
||||||
|
|
||||||
|
// val0 := br3.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br3.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val1 := br3.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br3.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, 768(buffer)(off*1)
|
||||||
|
|
||||||
|
// SECOND PART:
|
||||||
|
// val2 := br3.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v2 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br3.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val3 := br3.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v3 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br3.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||||
|
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||||
|
MOVW BX, 768+2(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br3)
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br3)
|
||||||
|
|
||||||
|
ADDQ $4, off // off += 2
|
||||||
|
|
||||||
|
TESTB DH, DH // any br[i].ofs < 4?
|
||||||
|
JNZ end
|
||||||
|
|
||||||
|
CMPQ off, $bufoff
|
||||||
|
JL main_loop
|
||||||
|
|
||||||
|
end:
|
||||||
|
MOVQ 0(SP), BP
|
||||||
|
|
||||||
|
MOVB off, ret+56(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
#undef off
|
||||||
|
#undef buffer
|
||||||
|
#undef table
|
||||||
|
|
||||||
|
#undef br_bits_read
|
||||||
|
#undef br_value
|
||||||
|
#undef br_offset
|
||||||
|
#undef peek_bits
|
||||||
|
#undef exhausted
|
||||||
|
|
||||||
|
#undef br0
|
||||||
|
#undef br1
|
||||||
|
#undef br2
|
||||||
|
#undef br3
|
197
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
generated
vendored
Normal file
197
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
generated
vendored
Normal file
|
@ -0,0 +1,197 @@
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
#include "funcdata.h"
|
||||||
|
#include "go_asm.h"
|
||||||
|
|
||||||
|
|
||||||
|
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
||||||
|
|
||||||
|
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||||
|
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
||||||
|
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
|
||||||
|
#define off R8
|
||||||
|
#define buffer DI
|
||||||
|
#define table SI
|
||||||
|
|
||||||
|
#define br_bits_read R9
|
||||||
|
#define br_value R10
|
||||||
|
#define br_offset R11
|
||||||
|
#define peek_bits R12
|
||||||
|
#define exhausted DX
|
||||||
|
|
||||||
|
#define br0 R13
|
||||||
|
#define br1 R14
|
||||||
|
#define br2 R15
|
||||||
|
#define br3 BP
|
||||||
|
|
||||||
|
MOVQ BP, 0(SP)
|
||||||
|
|
||||||
|
XORQ exhausted, exhausted // exhausted = false
|
||||||
|
XORQ off, off // off = 0
|
||||||
|
|
||||||
|
MOVBQZX peekBits+32(FP), peek_bits
|
||||||
|
MOVQ buf+40(FP), buffer
|
||||||
|
MOVQ tbl+48(FP), table
|
||||||
|
|
||||||
|
MOVQ pbr0+0(FP), br0
|
||||||
|
MOVQ pbr1+8(FP), br1
|
||||||
|
MOVQ pbr2+16(FP), br2
|
||||||
|
MOVQ pbr3+24(FP), br3
|
||||||
|
|
||||||
|
main_loop:
|
||||||
|
{{ define "decode_2_values_x86" }}
|
||||||
|
// const stream = {{ var "id" }}
|
||||||
|
// br{{ var "id"}}.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
|
||||||
|
|
||||||
|
// if b.bitsRead >= 32 {
|
||||||
|
CMPQ br_bits_read, $32
|
||||||
|
JB skip_fill{{ var "id" }}
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
// }
|
||||||
|
skip_fill{{ var "id" }}:
|
||||||
|
|
||||||
|
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br{{ var "id"}}.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br{{ var "id"}}.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
|
||||||
|
|
||||||
|
// SECOND PART:
|
||||||
|
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v2 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br{{ var "id"}}.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
// v3 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br{{ var "id"}}.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CX, br_value // value <<= n
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||||
|
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||||
|
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
{{ set "id" "0" }}
|
||||||
|
{{ set "ofs" "0" }}
|
||||||
|
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
|
||||||
|
{{ template "decode_2_values_x86" . }}
|
||||||
|
|
||||||
|
{{ set "id" "1" }}
|
||||||
|
{{ set "ofs" "8" }}
|
||||||
|
{{ set "bufofs" "256" }}
|
||||||
|
{{ template "decode_2_values_x86" . }}
|
||||||
|
|
||||||
|
{{ set "id" "2" }}
|
||||||
|
{{ set "ofs" "16" }}
|
||||||
|
{{ set "bufofs" "512" }}
|
||||||
|
{{ template "decode_2_values_x86" . }}
|
||||||
|
|
||||||
|
{{ set "id" "3" }}
|
||||||
|
{{ set "ofs" "24" }}
|
||||||
|
{{ set "bufofs" "768" }}
|
||||||
|
{{ template "decode_2_values_x86" . }}
|
||||||
|
|
||||||
|
ADDQ $4, off // off += 2
|
||||||
|
|
||||||
|
TESTB DH, DH // any br[i].ofs < 4?
|
||||||
|
JNZ end
|
||||||
|
|
||||||
|
CMPQ off, $bufoff
|
||||||
|
JL main_loop
|
||||||
|
end:
|
||||||
|
MOVQ 0(SP), BP
|
||||||
|
|
||||||
|
MOVB off, ret+56(FP)
|
||||||
|
RET
|
||||||
|
#undef off
|
||||||
|
#undef buffer
|
||||||
|
#undef table
|
||||||
|
|
||||||
|
#undef br_bits_read
|
||||||
|
#undef br_value
|
||||||
|
#undef br_offset
|
||||||
|
#undef peek_bits
|
||||||
|
#undef exhausted
|
||||||
|
|
||||||
|
#undef br0
|
||||||
|
#undef br1
|
||||||
|
#undef br2
|
||||||
|
#undef br3
|
181
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
Normal file
181
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,181 @@
|
||||||
|
//go:build amd64 && !appengine && !noasm && gc
|
||||||
|
// +build amd64,!appengine,!noasm,gc
|
||||||
|
|
||||||
|
// This file contains the specialisation of Decoder.Decompress4X
|
||||||
|
// that uses an asm implementation of its main loop.
|
||||||
|
package huff0
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||||
|
// of Decompress4X when tablelog > 8.
|
||||||
|
// go:noescape
|
||||||
|
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||||
|
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
|
||||||
|
|
||||||
|
// decompress4x_8b_loop_x86 is an x86 assembler implementation
|
||||||
|
// of Decompress4X when tablelog <= 8 which decodes 4 entries
|
||||||
|
// per loop.
|
||||||
|
// go:noescape
|
||||||
|
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||||
|
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
|
||||||
|
|
||||||
|
// fallback8BitSize is the size where using Go version is faster.
|
||||||
|
const fallback8BitSize = 800
|
||||||
|
|
||||||
|
// Decompress4X will decompress a 4X encoded stream.
|
||||||
|
// The length of the supplied input must match the end of a block exactly.
|
||||||
|
// The *capacity* of the dst slice must match the destination size of
|
||||||
|
// the uncompressed data exactly.
|
||||||
|
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
|
if len(d.dt.single) == 0 {
|
||||||
|
return nil, errors.New("no table loaded")
|
||||||
|
}
|
||||||
|
if len(src) < 6+(4*1) {
|
||||||
|
return nil, errors.New("input too small")
|
||||||
|
}
|
||||||
|
|
||||||
|
use8BitTables := d.actualTableLog <= 8
|
||||||
|
if cap(dst) < fallback8BitSize && use8BitTables {
|
||||||
|
return d.decompress4X8bit(dst, src)
|
||||||
|
}
|
||||||
|
var br [4]bitReaderShifted
|
||||||
|
// Decode "jump table"
|
||||||
|
start := 6
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||||
|
if start+length >= len(src) {
|
||||||
|
return nil, errors.New("truncated input (or invalid offset)")
|
||||||
|
}
|
||||||
|
err := br[i].init(src[start : start+length])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
start += length
|
||||||
|
}
|
||||||
|
err := br[3].init(src[start:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// destination, offset to match first output
|
||||||
|
dstSize := cap(dst)
|
||||||
|
dst = dst[:dstSize]
|
||||||
|
out := dst
|
||||||
|
dstEvery := (dstSize + 3) / 4
|
||||||
|
|
||||||
|
const tlSize = 1 << tableLogMax
|
||||||
|
const tlMask = tlSize - 1
|
||||||
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
|
buf := d.buffer()
|
||||||
|
var off uint8
|
||||||
|
var decoded int
|
||||||
|
|
||||||
|
const debug = false
|
||||||
|
|
||||||
|
// see: bitReaderShifted.peekBitsFast()
|
||||||
|
peekBits := uint8((64 - d.actualTableLog) & 63)
|
||||||
|
|
||||||
|
// Decode 2 values from each decoder/loop.
|
||||||
|
const bufoff = 256
|
||||||
|
for {
|
||||||
|
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if use8BitTables {
|
||||||
|
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
|
||||||
|
} else {
|
||||||
|
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
|
||||||
|
}
|
||||||
|
if debug {
|
||||||
|
fmt.Print("DEBUG: ")
|
||||||
|
fmt.Printf("off=%d,", off)
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
|
||||||
|
i, br[i].bitsRead, br[i].value, br[i].off)
|
||||||
|
}
|
||||||
|
fmt.Println("")
|
||||||
|
}
|
||||||
|
|
||||||
|
if off != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if bufoff > dstEvery {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
|
}
|
||||||
|
copy(out, buf[0][:])
|
||||||
|
copy(out[dstEvery:], buf[1][:])
|
||||||
|
copy(out[dstEvery*2:], buf[2][:])
|
||||||
|
copy(out[dstEvery*3:], buf[3][:])
|
||||||
|
out = out[bufoff:]
|
||||||
|
decoded += bufoff * 4
|
||||||
|
// There must at least be 3 buffers left.
|
||||||
|
if len(out) < dstEvery*3 {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if off > 0 {
|
||||||
|
ioff := int(off)
|
||||||
|
if len(out) < dstEvery*3+ioff {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, errors.New("corruption detected: stream overrun 3")
|
||||||
|
}
|
||||||
|
copy(out, buf[0][:off])
|
||||||
|
copy(out[dstEvery:], buf[1][:off])
|
||||||
|
copy(out[dstEvery*2:], buf[2][:off])
|
||||||
|
copy(out[dstEvery*3:], buf[3][:off])
|
||||||
|
decoded += int(off) * 4
|
||||||
|
out = out[off:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode remaining.
|
||||||
|
remainBytes := dstEvery - (decoded / 4)
|
||||||
|
for i := range br {
|
||||||
|
offset := dstEvery * i
|
||||||
|
endsAt := offset + remainBytes
|
||||||
|
if endsAt > len(out) {
|
||||||
|
endsAt = len(out)
|
||||||
|
}
|
||||||
|
br := &br[i]
|
||||||
|
bitsLeft := br.remaining()
|
||||||
|
for bitsLeft > 0 {
|
||||||
|
br.fill()
|
||||||
|
if offset >= endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read value and increment offset.
|
||||||
|
val := br.peekBitsFast(d.actualTableLog)
|
||||||
|
v := single[val&tlMask].entry
|
||||||
|
nBits := uint8(v)
|
||||||
|
br.advance(nBits)
|
||||||
|
bitsLeft -= uint(nBits)
|
||||||
|
out[offset] = uint8(v >> 8)
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
if offset != endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||||
|
}
|
||||||
|
decoded += offset - dstEvery*i
|
||||||
|
err = br.close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
if dstSize != decoded {
|
||||||
|
return nil, errors.New("corruption detected: short output block")
|
||||||
|
}
|
||||||
|
return dst, nil
|
||||||
|
}
|
506
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
Normal file
506
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,506 @@
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
#include "funcdata.h"
|
||||||
|
#include "go_asm.h"
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v4
|
||||||
|
#ifndef GOAMD64_v3
|
||||||
|
#define GOAMD64_v3
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
||||||
|
|
||||||
|
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||||
|
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
||||||
|
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
|
||||||
|
#define off R8
|
||||||
|
#define buffer DI
|
||||||
|
#define table SI
|
||||||
|
|
||||||
|
#define br_bits_read R9
|
||||||
|
#define br_value R10
|
||||||
|
#define br_offset R11
|
||||||
|
#define peek_bits R12
|
||||||
|
#define exhausted DX
|
||||||
|
|
||||||
|
#define br0 R13
|
||||||
|
#define br1 R14
|
||||||
|
#define br2 R15
|
||||||
|
#define br3 BP
|
||||||
|
|
||||||
|
MOVQ BP, 0(SP)
|
||||||
|
|
||||||
|
XORQ exhausted, exhausted // exhausted = false
|
||||||
|
XORQ off, off // off = 0
|
||||||
|
|
||||||
|
MOVBQZX peekBits+32(FP), peek_bits
|
||||||
|
MOVQ buf+40(FP), buffer
|
||||||
|
MOVQ tbl+48(FP), table
|
||||||
|
|
||||||
|
MOVQ pbr0+0(FP), br0
|
||||||
|
MOVQ pbr1+8(FP), br1
|
||||||
|
MOVQ pbr2+16(FP), br2
|
||||||
|
MOVQ pbr3+24(FP), br3
|
||||||
|
|
||||||
|
main_loop:
|
||||||
|
|
||||||
|
// const stream = 0
|
||||||
|
// br0.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br0), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br0), br_offset
|
||||||
|
|
||||||
|
// We must have at least 2 * max tablelog left
|
||||||
|
CMPQ br_bits_read, $64-22
|
||||||
|
JBE skip_fill0
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br0), AX
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br0.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
|
||||||
|
// }
|
||||||
|
skip_fill0:
|
||||||
|
|
||||||
|
// val0 := br0.peekTopBits(peekBits)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br0.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#else
|
||||||
|
// val1 := br0.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br0.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, 0(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br0)
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br0)
|
||||||
|
|
||||||
|
// const stream = 1
|
||||||
|
// br1.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br1), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br1), br_offset
|
||||||
|
|
||||||
|
// We must have at least 2 * max tablelog left
|
||||||
|
CMPQ br_bits_read, $64-22
|
||||||
|
JBE skip_fill1
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br1), AX
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br1.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
|
||||||
|
// }
|
||||||
|
skip_fill1:
|
||||||
|
|
||||||
|
// val0 := br1.peekTopBits(peekBits)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br1.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#else
|
||||||
|
// val1 := br1.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br1.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, 256(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br1)
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br1)
|
||||||
|
|
||||||
|
// const stream = 2
|
||||||
|
// br2.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br2), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br2), br_offset
|
||||||
|
|
||||||
|
// We must have at least 2 * max tablelog left
|
||||||
|
CMPQ br_bits_read, $64-22
|
||||||
|
JBE skip_fill2
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br2), AX
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br2.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
|
||||||
|
// }
|
||||||
|
skip_fill2:
|
||||||
|
|
||||||
|
// val0 := br2.peekTopBits(peekBits)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br2.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#else
|
||||||
|
// val1 := br2.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br2.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, 512(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br2)
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br2)
|
||||||
|
|
||||||
|
// const stream = 3
|
||||||
|
// br3.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br3), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br3), br_offset
|
||||||
|
|
||||||
|
// We must have at least 2 * max tablelog left
|
||||||
|
CMPQ br_bits_read, $64-22
|
||||||
|
JBE skip_fill3
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br3), AX
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br3.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
|
||||||
|
// }
|
||||||
|
skip_fill3:
|
||||||
|
|
||||||
|
// val0 := br3.peekTopBits(peekBits)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br3.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#else
|
||||||
|
// val1 := br3.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br3.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, 768(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br3)
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br3)
|
||||||
|
|
||||||
|
ADDQ $2, off // off += 2
|
||||||
|
|
||||||
|
TESTB DH, DH // any br[i].ofs < 4?
|
||||||
|
JNZ end
|
||||||
|
|
||||||
|
CMPQ off, $bufoff
|
||||||
|
JL main_loop
|
||||||
|
|
||||||
|
end:
|
||||||
|
MOVQ 0(SP), BP
|
||||||
|
|
||||||
|
MOVB off, ret+56(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
#undef off
|
||||||
|
#undef buffer
|
||||||
|
#undef table
|
||||||
|
|
||||||
|
#undef br_bits_read
|
||||||
|
#undef br_value
|
||||||
|
#undef br_offset
|
||||||
|
#undef peek_bits
|
||||||
|
#undef exhausted
|
||||||
|
|
||||||
|
#undef br0
|
||||||
|
#undef br1
|
||||||
|
#undef br2
|
||||||
|
#undef br3
|
195
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
generated
vendored
Normal file
195
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
generated
vendored
Normal file
|
@ -0,0 +1,195 @@
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !noasm
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
#include "funcdata.h"
|
||||||
|
#include "go_asm.h"
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v4
|
||||||
|
#ifndef GOAMD64_v3
|
||||||
|
#define GOAMD64_v3
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
||||||
|
|
||||||
|
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||||
|
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
||||||
|
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
|
||||||
|
#define off R8
|
||||||
|
#define buffer DI
|
||||||
|
#define table SI
|
||||||
|
|
||||||
|
#define br_bits_read R9
|
||||||
|
#define br_value R10
|
||||||
|
#define br_offset R11
|
||||||
|
#define peek_bits R12
|
||||||
|
#define exhausted DX
|
||||||
|
|
||||||
|
#define br0 R13
|
||||||
|
#define br1 R14
|
||||||
|
#define br2 R15
|
||||||
|
#define br3 BP
|
||||||
|
|
||||||
|
MOVQ BP, 0(SP)
|
||||||
|
|
||||||
|
XORQ exhausted, exhausted // exhausted = false
|
||||||
|
XORQ off, off // off = 0
|
||||||
|
|
||||||
|
MOVBQZX peekBits+32(FP), peek_bits
|
||||||
|
MOVQ buf+40(FP), buffer
|
||||||
|
MOVQ tbl+48(FP), table
|
||||||
|
|
||||||
|
MOVQ pbr0+0(FP), br0
|
||||||
|
MOVQ pbr1+8(FP), br1
|
||||||
|
MOVQ pbr2+16(FP), br2
|
||||||
|
MOVQ pbr3+24(FP), br3
|
||||||
|
|
||||||
|
main_loop:
|
||||||
|
{{ define "decode_2_values_x86" }}
|
||||||
|
// const stream = {{ var "id" }}
|
||||||
|
// br{{ var "id"}}.fillFast()
|
||||||
|
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
|
||||||
|
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
|
||||||
|
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
|
||||||
|
|
||||||
|
// We must have at least 2 * max tablelog left
|
||||||
|
CMPQ br_bits_read, $64-22
|
||||||
|
JBE skip_fill{{ var "id" }}
|
||||||
|
|
||||||
|
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||||
|
SUBQ $4, br_offset // b.off -= 4
|
||||||
|
|
||||||
|
// v := b.in[b.off-4 : b.off]
|
||||||
|
// v = v[:4]
|
||||||
|
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
|
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
|
||||||
|
|
||||||
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||||
|
#else
|
||||||
|
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||||
|
MOVQ br_bits_read, CX
|
||||||
|
SHLQ CL, AX
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ORQ AX, br_value
|
||||||
|
|
||||||
|
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
|
||||||
|
CMPQ br_offset, $4
|
||||||
|
SETLT DL
|
||||||
|
ORB DL, DH
|
||||||
|
// }
|
||||||
|
skip_fill{{ var "id" }}:
|
||||||
|
|
||||||
|
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
#else
|
||||||
|
MOVQ br_value, AX
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v0 := table[val0&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v0
|
||||||
|
|
||||||
|
// br{{ var "id"}}.advance(uint8(v0.entry))
|
||||||
|
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||||
|
#else
|
||||||
|
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||||
|
MOVQ peek_bits, CX
|
||||||
|
MOVQ br_value, AX
|
||||||
|
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// v1 := table[val1&mask]
|
||||||
|
MOVW 0(table)(AX*2), AX // AX - v1
|
||||||
|
|
||||||
|
// br{{ var "id"}}.advance(uint8(v1.entry))
|
||||||
|
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||||
|
|
||||||
|
#ifdef GOAMD64_v3
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLXQ AX, br_value, br_value // value <<= n
|
||||||
|
#else
|
||||||
|
MOVBQZX AL, CX
|
||||||
|
SHLQ CL, br_value // value <<= n
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ADDQ CX, br_bits_read // bits_read += n
|
||||||
|
|
||||||
|
|
||||||
|
// these two writes get coalesced
|
||||||
|
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||||
|
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||||
|
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
|
||||||
|
|
||||||
|
// update the bitrader reader structure
|
||||||
|
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
|
||||||
|
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
|
||||||
|
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
|
||||||
|
{{ end }}
|
||||||
|
|
||||||
|
{{ set "id" "0" }}
|
||||||
|
{{ set "ofs" "0" }}
|
||||||
|
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
|
||||||
|
{{ template "decode_2_values_x86" . }}
|
||||||
|
|
||||||
|
{{ set "id" "1" }}
|
||||||
|
{{ set "ofs" "8" }}
|
||||||
|
{{ set "bufofs" "256" }}
|
||||||
|
{{ template "decode_2_values_x86" . }}
|
||||||
|
|
||||||
|
{{ set "id" "2" }}
|
||||||
|
{{ set "ofs" "16" }}
|
||||||
|
{{ set "bufofs" "512" }}
|
||||||
|
{{ template "decode_2_values_x86" . }}
|
||||||
|
|
||||||
|
{{ set "id" "3" }}
|
||||||
|
{{ set "ofs" "24" }}
|
||||||
|
{{ set "bufofs" "768" }}
|
||||||
|
{{ template "decode_2_values_x86" . }}
|
||||||
|
|
||||||
|
ADDQ $2, off // off += 2
|
||||||
|
|
||||||
|
TESTB DH, DH // any br[i].ofs < 4?
|
||||||
|
JNZ end
|
||||||
|
|
||||||
|
CMPQ off, $bufoff
|
||||||
|
JL main_loop
|
||||||
|
end:
|
||||||
|
MOVQ 0(SP), BP
|
||||||
|
|
||||||
|
MOVB off, ret+56(FP)
|
||||||
|
RET
|
||||||
|
#undef off
|
||||||
|
#undef buffer
|
||||||
|
#undef table
|
||||||
|
|
||||||
|
#undef br_bits_read
|
||||||
|
#undef br_value
|
||||||
|
#undef br_offset
|
||||||
|
#undef peek_bits
|
||||||
|
#undef exhausted
|
||||||
|
|
||||||
|
#undef br0
|
||||||
|
#undef br1
|
||||||
|
#undef br2
|
||||||
|
#undef br3
|
193
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
Normal file
193
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
Normal file
|
@ -0,0 +1,193 @@
|
||||||
|
//go:build !amd64 || appengine || !gc || noasm
|
||||||
|
// +build !amd64 appengine !gc noasm
|
||||||
|
|
||||||
|
// This file contains a generic implementation of Decoder.Decompress4X.
|
||||||
|
package huff0
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decompress4X will decompress a 4X encoded stream.
|
||||||
|
// The length of the supplied input must match the end of a block exactly.
|
||||||
|
// The *capacity* of the dst slice must match the destination size of
|
||||||
|
// the uncompressed data exactly.
|
||||||
|
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||||
|
if len(d.dt.single) == 0 {
|
||||||
|
return nil, errors.New("no table loaded")
|
||||||
|
}
|
||||||
|
if len(src) < 6+(4*1) {
|
||||||
|
return nil, errors.New("input too small")
|
||||||
|
}
|
||||||
|
if use8BitTables && d.actualTableLog <= 8 {
|
||||||
|
return d.decompress4X8bit(dst, src)
|
||||||
|
}
|
||||||
|
|
||||||
|
var br [4]bitReaderShifted
|
||||||
|
// Decode "jump table"
|
||||||
|
start := 6
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||||
|
if start+length >= len(src) {
|
||||||
|
return nil, errors.New("truncated input (or invalid offset)")
|
||||||
|
}
|
||||||
|
err := br[i].init(src[start : start+length])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
start += length
|
||||||
|
}
|
||||||
|
err := br[3].init(src[start:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// destination, offset to match first output
|
||||||
|
dstSize := cap(dst)
|
||||||
|
dst = dst[:dstSize]
|
||||||
|
out := dst
|
||||||
|
dstEvery := (dstSize + 3) / 4
|
||||||
|
|
||||||
|
const tlSize = 1 << tableLogMax
|
||||||
|
const tlMask = tlSize - 1
|
||||||
|
single := d.dt.single[:tlSize]
|
||||||
|
|
||||||
|
// Use temp table to avoid bound checks/append penalty.
|
||||||
|
buf := d.buffer()
|
||||||
|
var off uint8
|
||||||
|
var decoded int
|
||||||
|
|
||||||
|
// Decode 2 values from each decoder/loop.
|
||||||
|
const bufoff = 256
|
||||||
|
for {
|
||||||
|
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const stream = 0
|
||||||
|
const stream2 = 1
|
||||||
|
br[stream].fillFast()
|
||||||
|
br[stream2].fillFast()
|
||||||
|
|
||||||
|
val := br[stream].peekBitsFast(d.actualTableLog)
|
||||||
|
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
v := single[val&tlMask]
|
||||||
|
v2 := single[val2&tlMask]
|
||||||
|
br[stream].advance(uint8(v.entry))
|
||||||
|
br[stream2].advance(uint8(v2.entry))
|
||||||
|
buf[stream][off] = uint8(v.entry >> 8)
|
||||||
|
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||||
|
|
||||||
|
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||||
|
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
v = single[val&tlMask]
|
||||||
|
v2 = single[val2&tlMask]
|
||||||
|
br[stream].advance(uint8(v.entry))
|
||||||
|
br[stream2].advance(uint8(v2.entry))
|
||||||
|
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||||
|
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
const stream = 2
|
||||||
|
const stream2 = 3
|
||||||
|
br[stream].fillFast()
|
||||||
|
br[stream2].fillFast()
|
||||||
|
|
||||||
|
val := br[stream].peekBitsFast(d.actualTableLog)
|
||||||
|
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
v := single[val&tlMask]
|
||||||
|
v2 := single[val2&tlMask]
|
||||||
|
br[stream].advance(uint8(v.entry))
|
||||||
|
br[stream2].advance(uint8(v2.entry))
|
||||||
|
buf[stream][off] = uint8(v.entry >> 8)
|
||||||
|
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||||
|
|
||||||
|
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||||
|
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||||
|
v = single[val&tlMask]
|
||||||
|
v2 = single[val2&tlMask]
|
||||||
|
br[stream].advance(uint8(v.entry))
|
||||||
|
br[stream2].advance(uint8(v2.entry))
|
||||||
|
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||||
|
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
off += 2
|
||||||
|
|
||||||
|
if off == 0 {
|
||||||
|
if bufoff > dstEvery {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, errors.New("corruption detected: stream overrun 1")
|
||||||
|
}
|
||||||
|
copy(out, buf[0][:])
|
||||||
|
copy(out[dstEvery:], buf[1][:])
|
||||||
|
copy(out[dstEvery*2:], buf[2][:])
|
||||||
|
copy(out[dstEvery*3:], buf[3][:])
|
||||||
|
out = out[bufoff:]
|
||||||
|
decoded += bufoff * 4
|
||||||
|
// There must at least be 3 buffers left.
|
||||||
|
if len(out) < dstEvery*3 {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, errors.New("corruption detected: stream overrun 2")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if off > 0 {
|
||||||
|
ioff := int(off)
|
||||||
|
if len(out) < dstEvery*3+ioff {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, errors.New("corruption detected: stream overrun 3")
|
||||||
|
}
|
||||||
|
copy(out, buf[0][:off])
|
||||||
|
copy(out[dstEvery:], buf[1][:off])
|
||||||
|
copy(out[dstEvery*2:], buf[2][:off])
|
||||||
|
copy(out[dstEvery*3:], buf[3][:off])
|
||||||
|
decoded += int(off) * 4
|
||||||
|
out = out[off:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode remaining.
|
||||||
|
remainBytes := dstEvery - (decoded / 4)
|
||||||
|
for i := range br {
|
||||||
|
offset := dstEvery * i
|
||||||
|
endsAt := offset + remainBytes
|
||||||
|
if endsAt > len(out) {
|
||||||
|
endsAt = len(out)
|
||||||
|
}
|
||||||
|
br := &br[i]
|
||||||
|
bitsLeft := br.remaining()
|
||||||
|
for bitsLeft > 0 {
|
||||||
|
br.fill()
|
||||||
|
if offset >= endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, errors.New("corruption detected: stream overrun 4")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read value and increment offset.
|
||||||
|
val := br.peekBitsFast(d.actualTableLog)
|
||||||
|
v := single[val&tlMask].entry
|
||||||
|
nBits := uint8(v)
|
||||||
|
br.advance(nBits)
|
||||||
|
bitsLeft -= uint(nBits)
|
||||||
|
out[offset] = uint8(v >> 8)
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
if offset != endsAt {
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||||
|
}
|
||||||
|
decoded += offset - dstEvery*i
|
||||||
|
err = br.close()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.bufs.Put(buf)
|
||||||
|
if dstSize != decoded {
|
||||||
|
return nil, errors.New("corruption detected: short output block")
|
||||||
|
}
|
||||||
|
return dst, nil
|
||||||
|
}
|
112
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
112
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
|
@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is
|
||||||
in the future. So if you want to limit concurrency for future updates, specify the concurrency
|
in the future. So if you want to limit concurrency for future updates, specify the concurrency
|
||||||
you would like.
|
you would like.
|
||||||
|
|
||||||
|
If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)`
|
||||||
|
which will compress input as each block is completed, blocking on writes until each has completed.
|
||||||
|
|
||||||
You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
|
You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
|
||||||
compression settings can be specified.
|
compression settings can be specified.
|
||||||
|
|
||||||
|
@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h
|
||||||
For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
|
For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
|
||||||
|
|
||||||
`EncodeAll` will encode all input in src and append it to dst.
|
`EncodeAll` will encode all input in src and append it to dst.
|
||||||
This function can be called concurrently, but each call will only run on a single goroutine.
|
This function can be called concurrently.
|
||||||
|
Each call will only run on a same goroutine as the caller.
|
||||||
|
|
||||||
Encoded blocks can be concatenated and the result will be the combined input stream.
|
Encoded blocks can be concatenated and the result will be the combined input stream.
|
||||||
Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
|
Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
|
||||||
|
@ -149,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
|
||||||
|
|
||||||
This package:
|
This package:
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
silesia.tar zskp 1 211947520 73101992 643 313.87
|
silesia.tar zskp 1 211947520 73821326 634 318.47
|
||||||
silesia.tar zskp 2 211947520 67504318 969 208.38
|
silesia.tar zskp 2 211947520 67655404 1508 133.96
|
||||||
silesia.tar zskp 3 211947520 64595893 2007 100.68
|
silesia.tar zskp 3 211947520 64746933 3000 67.37
|
||||||
silesia.tar zskp 4 211947520 60995370 8825 22.90
|
silesia.tar zskp 4 211947520 60073508 16926 11.94
|
||||||
|
|
||||||
cgo zstd:
|
cgo zstd:
|
||||||
silesia.tar zstd 1 211947520 73605392 543 371.56
|
silesia.tar zstd 1 211947520 73605392 543 371.56
|
||||||
|
@ -161,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
|
||||||
silesia.tar zstd 9 211947520 60212393 5063 39.92
|
silesia.tar zstd 9 211947520 60212393 5063 39.92
|
||||||
|
|
||||||
gzip, stdlib/this package:
|
gzip, stdlib/this package:
|
||||||
silesia.tar gzstd 1 211947520 80007735 1654 122.21
|
silesia.tar gzstd 1 211947520 80007735 1498 134.87
|
||||||
silesia.tar gzkp 1 211947520 80136201 1152 175.45
|
silesia.tar gzkp 1 211947520 80088272 1009 200.31
|
||||||
|
|
||||||
GOB stream of binary data. Highly compressible.
|
GOB stream of binary data. Highly compressible.
|
||||||
https://files.klauspost.com/compress/gob-stream.7z
|
https://files.klauspost.com/compress/gob-stream.7z
|
||||||
|
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
gob-stream zskp 1 1911399616 235022249 3088 590.30
|
gob-stream zskp 1 1911399616 233948096 3230 564.34
|
||||||
gob-stream zskp 2 1911399616 205669791 3786 481.34
|
gob-stream zskp 2 1911399616 203997694 4997 364.73
|
||||||
gob-stream zskp 3 1911399616 175034659 9636 189.17
|
gob-stream zskp 3 1911399616 173526523 13435 135.68
|
||||||
gob-stream zskp 4 1911399616 165609838 50369 36.19
|
gob-stream zskp 4 1911399616 162195235 47559 38.33
|
||||||
|
|
||||||
gob-stream zstd 1 1911399616 249810424 2637 691.26
|
gob-stream zstd 1 1911399616 249810424 2637 691.26
|
||||||
gob-stream zstd 3 1911399616 208192146 3490 522.31
|
gob-stream zstd 3 1911399616 208192146 3490 522.31
|
||||||
gob-stream zstd 6 1911399616 193632038 6687 272.56
|
gob-stream zstd 6 1911399616 193632038 6687 272.56
|
||||||
gob-stream zstd 9 1911399616 177620386 16175 112.70
|
gob-stream zstd 9 1911399616 177620386 16175 112.70
|
||||||
|
|
||||||
gob-stream gzstd 1 1911399616 357382641 10251 177.82
|
gob-stream gzstd 1 1911399616 357382013 9046 201.49
|
||||||
gob-stream gzkp 1 1911399616 359753026 5438 335.20
|
gob-stream gzkp 1 1911399616 359136669 4885 373.08
|
||||||
|
|
||||||
The test data for the Large Text Compression Benchmark is the first
|
The test data for the Large Text Compression Benchmark is the first
|
||||||
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
|
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
|
||||||
http://mattmahoney.net/dc/textdata.html
|
http://mattmahoney.net/dc/textdata.html
|
||||||
|
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
enwik9 zskp 1 1000000000 343848582 3609 264.18
|
enwik9 zskp 1 1000000000 343833605 3687 258.64
|
||||||
enwik9 zskp 2 1000000000 317276632 5746 165.97
|
enwik9 zskp 2 1000000000 317001237 7672 124.29
|
||||||
enwik9 zskp 3 1000000000 292243069 12162 78.41
|
enwik9 zskp 3 1000000000 291915823 15923 59.89
|
||||||
enwik9 zskp 4 1000000000 262183768 82837 11.51
|
enwik9 zskp 4 1000000000 261710291 77697 12.27
|
||||||
|
|
||||||
enwik9 zstd 1 1000000000 358072021 3110 306.65
|
enwik9 zstd 1 1000000000 358072021 3110 306.65
|
||||||
enwik9 zstd 3 1000000000 313734672 4784 199.35
|
enwik9 zstd 3 1000000000 313734672 4784 199.35
|
||||||
enwik9 zstd 6 1000000000 295138875 10290 92.68
|
enwik9 zstd 6 1000000000 295138875 10290 92.68
|
||||||
enwik9 zstd 9 1000000000 278348700 28549 33.40
|
enwik9 zstd 9 1000000000 278348700 28549 33.40
|
||||||
|
|
||||||
enwik9 gzstd 1 1000000000 382578136 9604 99.30
|
enwik9 gzstd 1 1000000000 382578136 8608 110.78
|
||||||
enwik9 gzkp 1 1000000000 383825945 6544 145.73
|
enwik9 gzkp 1 1000000000 382781160 5628 169.45
|
||||||
|
|
||||||
Highly compressible JSON file.
|
Highly compressible JSON file.
|
||||||
https://files.klauspost.com/compress/github-june-2days-2019.json.zst
|
https://files.klauspost.com/compress/github-june-2days-2019.json.zst
|
||||||
|
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
|
github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
|
||||||
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
|
github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
|
||||||
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
|
github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
|
||||||
github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
|
github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
|
||||||
|
|
||||||
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
|
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
|
||||||
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
|
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
|
||||||
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
|
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
|
||||||
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
|
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
|
||||||
|
|
||||||
github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
|
github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
|
||||||
github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
|
github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
|
||||||
|
|
||||||
VM Image, Linux mint with a few installed applications:
|
VM Image, Linux mint with a few installed applications:
|
||||||
https://files.klauspost.com/compress/rawstudio-mint14.7z
|
https://files.klauspost.com/compress/rawstudio-mint14.7z
|
||||||
|
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
|
rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
|
||||||
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
|
rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
|
||||||
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
|
rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
|
||||||
rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
|
rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
|
||||||
|
|
||||||
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
|
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
|
||||||
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
|
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
|
||||||
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
|
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
|
||||||
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
|
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
|
||||||
|
|
||||||
rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
|
rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
|
||||||
rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
|
rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
|
||||||
|
|
||||||
CSV data:
|
CSV data:
|
||||||
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
|
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
|
||||||
|
|
||||||
file out level insize outsize millis mb/s
|
file out level insize outsize millis mb/s
|
||||||
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
|
nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
|
||||||
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
|
nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
|
||||||
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
|
nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
|
||||||
nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
|
nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
|
||||||
|
|
||||||
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
|
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
|
||||||
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
|
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
|
||||||
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
|
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
|
||||||
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
|
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
|
||||||
|
|
||||||
nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
|
nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
|
||||||
nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
|
nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
|
||||||
```
|
```
|
||||||
|
|
||||||
## Decompressor
|
## Decompressor
|
||||||
|
@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
It is important to use the "Close" function when you no longer need the Reader to stop running goroutines.
|
It is important to use the "Close" function when you no longer need the Reader to stop running goroutines,
|
||||||
See "Allocation-less operation" below.
|
when running with default settings.
|
||||||
|
Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream.
|
||||||
|
|
||||||
|
Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput.
|
||||||
|
However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data
|
||||||
|
as it is being requested only.
|
||||||
|
|
||||||
For decoding buffers, it could look something like this:
|
For decoding buffers, it could look something like this:
|
||||||
|
|
||||||
|
@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd"
|
||||||
|
|
||||||
// Create a reader that caches decompressors.
|
// Create a reader that caches decompressors.
|
||||||
// For this operation type we supply a nil Reader.
|
// For this operation type we supply a nil Reader.
|
||||||
var decoder, _ = zstd.NewReader(nil)
|
var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
|
||||||
|
|
||||||
// Decompress a buffer. We don't supply a destination buffer,
|
// Decompress a buffer. We don't supply a destination buffer,
|
||||||
// so it will be allocated by the decoder.
|
// so it will be allocated by the decoder.
|
||||||
|
@ -303,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) {
|
||||||
```
|
```
|
||||||
|
|
||||||
Both of these cases should provide the functionality needed.
|
Both of these cases should provide the functionality needed.
|
||||||
The decoder can be used for *concurrent* decompression of multiple buffers.
|
The decoder can be used for *concurrent* decompression of multiple buffers.
|
||||||
|
By default 4 decompressors will be created.
|
||||||
|
|
||||||
It will only allow a certain number of concurrent operations to run.
|
It will only allow a certain number of concurrent operations to run.
|
||||||
To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
|
To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
|
||||||
|
It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders.
|
||||||
|
|
||||||
### Dictionaries
|
### Dictionaries
|
||||||
|
|
||||||
|
@ -357,19 +369,21 @@ In this case no unneeded allocations should be made.
|
||||||
The buffer decoder does everything on the same goroutine and does nothing concurrently.
|
The buffer decoder does everything on the same goroutine and does nothing concurrently.
|
||||||
It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
|
It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
|
||||||
|
|
||||||
The stream decoder operates on
|
The stream decoder will create goroutines that:
|
||||||
|
|
||||||
* One goroutine reads input and splits the input to several block decoders.
|
1) Reads input and splits the input into blocks.
|
||||||
* A number of decoders will decode blocks.
|
2) Decompression of literals.
|
||||||
* A goroutine coordinates these blocks and sends history from one to the next.
|
3) Decompression of sequences.
|
||||||
|
4) Reconstruction of output stream.
|
||||||
|
|
||||||
So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
|
So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
|
||||||
|
|
||||||
|
The concurrency level will, for streams, determine how many blocks ahead the compression will start.
|
||||||
|
|
||||||
Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
|
Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
|
||||||
|
|
||||||
In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
|
In practice this means that concurrency is often limited to utilizing about 3 cores effectively.
|
||||||
|
|
||||||
|
|
||||||
### Benchmarks
|
### Benchmarks
|
||||||
|
|
||||||
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
|
These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd).
|
||||||
|
|
4
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
|
@ -7,6 +7,7 @@ package zstd
|
||||||
import (
|
import (
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/bits"
|
"math/bits"
|
||||||
)
|
)
|
||||||
|
@ -132,6 +133,9 @@ func (b *bitReader) remain() uint {
|
||||||
func (b *bitReader) close() error {
|
func (b *bitReader) close() error {
|
||||||
// Release reference.
|
// Release reference.
|
||||||
b.in = nil
|
b.in = nil
|
||||||
|
if !b.finished() {
|
||||||
|
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
|
||||||
|
}
|
||||||
if b.bitsRead > 64 {
|
if b.bitsRead > 64 {
|
||||||
return io.ErrUnexpectedEOF
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
|
|
448
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
448
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
|
@ -76,16 +76,25 @@ type blockDec struct {
|
||||||
// Window size of the block.
|
// Window size of the block.
|
||||||
WindowSize uint64
|
WindowSize uint64
|
||||||
|
|
||||||
history chan *history
|
err error
|
||||||
input chan struct{}
|
|
||||||
result chan decodeOutput
|
// Check against this crc
|
||||||
err error
|
checkCRC []byte
|
||||||
decWG sync.WaitGroup
|
|
||||||
|
|
||||||
// Frame to use for singlethreaded decoding.
|
// Frame to use for singlethreaded decoding.
|
||||||
// Should not be used by the decoder itself since parent may be another frame.
|
// Should not be used by the decoder itself since parent may be another frame.
|
||||||
localFrame *frameDec
|
localFrame *frameDec
|
||||||
|
|
||||||
|
sequence []seqVals
|
||||||
|
|
||||||
|
async struct {
|
||||||
|
newHist *history
|
||||||
|
literals []byte
|
||||||
|
seqData []byte
|
||||||
|
seqSize int // Size of uncompressed sequences
|
||||||
|
fcs uint64
|
||||||
|
}
|
||||||
|
|
||||||
// Block is RLE, this is the size.
|
// Block is RLE, this is the size.
|
||||||
RLESize uint32
|
RLESize uint32
|
||||||
tmp [4]byte
|
tmp [4]byte
|
||||||
|
@ -108,13 +117,8 @@ func (b *blockDec) String() string {
|
||||||
|
|
||||||
func newBlockDec(lowMem bool) *blockDec {
|
func newBlockDec(lowMem bool) *blockDec {
|
||||||
b := blockDec{
|
b := blockDec{
|
||||||
lowMem: lowMem,
|
lowMem: lowMem,
|
||||||
result: make(chan decodeOutput, 1),
|
|
||||||
input: make(chan struct{}, 1),
|
|
||||||
history: make(chan *history, 1),
|
|
||||||
}
|
}
|
||||||
b.decWG.Add(1)
|
|
||||||
go b.startDecoder()
|
|
||||||
return &b
|
return &b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,6 +141,12 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||||
case blockTypeReserved:
|
case blockTypeReserved:
|
||||||
return ErrReservedBlockType
|
return ErrReservedBlockType
|
||||||
case blockTypeRLE:
|
case blockTypeRLE:
|
||||||
|
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
|
||||||
|
}
|
||||||
|
return ErrWindowSizeExceeded
|
||||||
|
}
|
||||||
b.RLESize = uint32(cSize)
|
b.RLESize = uint32(cSize)
|
||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
maxSize = cSize
|
maxSize = cSize
|
||||||
|
@ -157,7 +167,19 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||||
}
|
}
|
||||||
return ErrCompressedSizeTooBig
|
return ErrCompressedSizeTooBig
|
||||||
}
|
}
|
||||||
|
// Empty compressed blocks must at least be 2 bytes
|
||||||
|
// for Literals_Block_Type and one for Sequences_Section_Header.
|
||||||
|
if cSize < 2 {
|
||||||
|
return ErrBlockTooSmall
|
||||||
|
}
|
||||||
case blockTypeRaw:
|
case blockTypeRaw:
|
||||||
|
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
|
||||||
|
}
|
||||||
|
return ErrWindowSizeExceeded
|
||||||
|
}
|
||||||
|
|
||||||
b.RLESize = 0
|
b.RLESize = 0
|
||||||
// We do not need a destination for raw blocks.
|
// We do not need a destination for raw blocks.
|
||||||
maxSize = -1
|
maxSize = -1
|
||||||
|
@ -192,85 +214,14 @@ func (b *blockDec) sendErr(err error) {
|
||||||
b.Last = true
|
b.Last = true
|
||||||
b.Type = blockTypeReserved
|
b.Type = blockTypeReserved
|
||||||
b.err = err
|
b.err = err
|
||||||
b.input <- struct{}{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close will release resources.
|
// Close will release resources.
|
||||||
// Closed blockDec cannot be reset.
|
// Closed blockDec cannot be reset.
|
||||||
func (b *blockDec) Close() {
|
func (b *blockDec) Close() {
|
||||||
close(b.input)
|
|
||||||
close(b.history)
|
|
||||||
close(b.result)
|
|
||||||
b.decWG.Wait()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeAsync will prepare decoding the block when it receives input.
|
// decodeBuf
|
||||||
// This will separate output and history.
|
|
||||||
func (b *blockDec) startDecoder() {
|
|
||||||
defer b.decWG.Done()
|
|
||||||
for range b.input {
|
|
||||||
//println("blockDec: Got block input")
|
|
||||||
switch b.Type {
|
|
||||||
case blockTypeRLE:
|
|
||||||
if cap(b.dst) < int(b.RLESize) {
|
|
||||||
if b.lowMem {
|
|
||||||
b.dst = make([]byte, b.RLESize)
|
|
||||||
} else {
|
|
||||||
b.dst = make([]byte, maxBlockSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
o := decodeOutput{
|
|
||||||
d: b,
|
|
||||||
b: b.dst[:b.RLESize],
|
|
||||||
err: nil,
|
|
||||||
}
|
|
||||||
v := b.data[0]
|
|
||||||
for i := range o.b {
|
|
||||||
o.b[i] = v
|
|
||||||
}
|
|
||||||
hist := <-b.history
|
|
||||||
hist.append(o.b)
|
|
||||||
b.result <- o
|
|
||||||
case blockTypeRaw:
|
|
||||||
o := decodeOutput{
|
|
||||||
d: b,
|
|
||||||
b: b.data,
|
|
||||||
err: nil,
|
|
||||||
}
|
|
||||||
hist := <-b.history
|
|
||||||
hist.append(o.b)
|
|
||||||
b.result <- o
|
|
||||||
case blockTypeCompressed:
|
|
||||||
b.dst = b.dst[:0]
|
|
||||||
err := b.decodeCompressed(nil)
|
|
||||||
o := decodeOutput{
|
|
||||||
d: b,
|
|
||||||
b: b.dst,
|
|
||||||
err: err,
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("Decompressed to", len(b.dst), "bytes, error:", err)
|
|
||||||
}
|
|
||||||
b.result <- o
|
|
||||||
case blockTypeReserved:
|
|
||||||
// Used for returning errors.
|
|
||||||
<-b.history
|
|
||||||
b.result <- decodeOutput{
|
|
||||||
d: b,
|
|
||||||
b: nil,
|
|
||||||
err: b.err,
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("Invalid block type")
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("blockDec: Finished block")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// decodeAsync will prepare decoding the block when it receives the history.
|
|
||||||
// If history is provided, it will not fetch it from the channel.
|
|
||||||
func (b *blockDec) decodeBuf(hist *history) error {
|
func (b *blockDec) decodeBuf(hist *history) error {
|
||||||
switch b.Type {
|
switch b.Type {
|
||||||
case blockTypeRLE:
|
case blockTypeRLE:
|
||||||
|
@ -293,14 +244,23 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
||||||
return nil
|
return nil
|
||||||
case blockTypeCompressed:
|
case blockTypeCompressed:
|
||||||
saved := b.dst
|
saved := b.dst
|
||||||
b.dst = hist.b
|
// Append directly to history
|
||||||
hist.b = nil
|
if hist.ignoreBuffer == 0 {
|
||||||
|
b.dst = hist.b
|
||||||
|
hist.b = nil
|
||||||
|
} else {
|
||||||
|
b.dst = b.dst[:0]
|
||||||
|
}
|
||||||
err := b.decodeCompressed(hist)
|
err := b.decodeCompressed(hist)
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
|
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
|
||||||
}
|
}
|
||||||
hist.b = b.dst
|
if hist.ignoreBuffer == 0 {
|
||||||
b.dst = saved
|
hist.b = b.dst
|
||||||
|
b.dst = saved
|
||||||
|
} else {
|
||||||
|
hist.appendKeep(b.dst)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
case blockTypeReserved:
|
case blockTypeReserved:
|
||||||
// Used for returning errors.
|
// Used for returning errors.
|
||||||
|
@ -310,30 +270,18 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// decodeCompressed will start decompressing a block.
|
func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
|
||||||
// If no history is supplied the decoder will decodeAsync as much as possible
|
|
||||||
// before fetching from blockDec.history
|
|
||||||
func (b *blockDec) decodeCompressed(hist *history) error {
|
|
||||||
in := b.data
|
|
||||||
delayedHistory := hist == nil
|
|
||||||
|
|
||||||
if delayedHistory {
|
|
||||||
// We must always grab history.
|
|
||||||
defer func() {
|
|
||||||
if hist == nil {
|
|
||||||
<-b.history
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
|
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
|
||||||
if len(in) < 2 {
|
if len(in) < 2 {
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
|
|
||||||
litType := literalsBlockType(in[0] & 3)
|
litType := literalsBlockType(in[0] & 3)
|
||||||
var litRegenSize int
|
var litRegenSize int
|
||||||
var litCompSize int
|
var litCompSize int
|
||||||
sizeFormat := (in[0] >> 2) & 3
|
sizeFormat := (in[0] >> 2) & 3
|
||||||
var fourStreams bool
|
var fourStreams bool
|
||||||
|
var literals []byte
|
||||||
switch litType {
|
switch litType {
|
||||||
case literalsBlockRaw, literalsBlockRLE:
|
case literalsBlockRaw, literalsBlockRLE:
|
||||||
switch sizeFormat {
|
switch sizeFormat {
|
||||||
|
@ -349,7 +297,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
|
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
|
||||||
if len(in) < 3 {
|
if len(in) < 3 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
|
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
|
||||||
in = in[3:]
|
in = in[3:]
|
||||||
|
@ -360,7 +308,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
|
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
|
||||||
if len(in) < 3 {
|
if len(in) < 3 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
|
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
|
||||||
litRegenSize = int(n & 1023)
|
litRegenSize = int(n & 1023)
|
||||||
|
@ -371,7 +319,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
fourStreams = true
|
fourStreams = true
|
||||||
if len(in) < 4 {
|
if len(in) < 4 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
|
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
|
||||||
litRegenSize = int(n & 16383)
|
litRegenSize = int(n & 16383)
|
||||||
|
@ -381,7 +329,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
fourStreams = true
|
fourStreams = true
|
||||||
if len(in) < 5 {
|
if len(in) < 5 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
|
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
|
||||||
litRegenSize = int(n & 262143)
|
litRegenSize = int(n & 262143)
|
||||||
|
@ -392,13 +340,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
|
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
|
||||||
}
|
}
|
||||||
var literals []byte
|
if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
|
||||||
var huff *huff0.Scratch
|
return in, ErrWindowSizeExceeded
|
||||||
|
}
|
||||||
|
|
||||||
switch litType {
|
switch litType {
|
||||||
case literalsBlockRaw:
|
case literalsBlockRaw:
|
||||||
if len(in) < litRegenSize {
|
if len(in) < litRegenSize {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
literals = in[:litRegenSize]
|
literals = in[:litRegenSize]
|
||||||
in = in[litRegenSize:]
|
in = in[litRegenSize:]
|
||||||
|
@ -406,7 +356,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
case literalsBlockRLE:
|
case literalsBlockRLE:
|
||||||
if len(in) < 1 {
|
if len(in) < 1 {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
if cap(b.literalBuf) < litRegenSize {
|
if cap(b.literalBuf) < litRegenSize {
|
||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
|
@ -417,7 +367,6 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
b.literalBuf = make([]byte, litRegenSize)
|
b.literalBuf = make([]byte, litRegenSize)
|
||||||
} else {
|
} else {
|
||||||
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
|
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -433,7 +382,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
case literalsBlockTreeless:
|
case literalsBlockTreeless:
|
||||||
if len(in) < litCompSize {
|
if len(in) < litCompSize {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
||||||
return ErrBlockTooSmall
|
return in, ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
// Store compressed literals, so we defer decoding until we get history.
|
// Store compressed literals, so we defer decoding until we get history.
|
||||||
literals = in[:litCompSize]
|
literals = in[:litCompSize]
|
||||||
|
@ -441,15 +390,10 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("Found %d compressed literals\n", litCompSize)
|
printf("Found %d compressed literals\n", litCompSize)
|
||||||
}
|
}
|
||||||
case literalsBlockCompressed:
|
huff := hist.huffTree
|
||||||
if len(in) < litCompSize {
|
if huff == nil {
|
||||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
return in, errors.New("literal block was treeless, but no history was defined")
|
||||||
return ErrBlockTooSmall
|
|
||||||
}
|
}
|
||||||
literals = in[:litCompSize]
|
|
||||||
in = in[litCompSize:]
|
|
||||||
huff = huffDecoderPool.Get().(*huff0.Scratch)
|
|
||||||
var err error
|
|
||||||
// Ensure we have space to store it.
|
// Ensure we have space to store it.
|
||||||
if cap(b.literalBuf) < litRegenSize {
|
if cap(b.literalBuf) < litRegenSize {
|
||||||
if b.lowMem {
|
if b.lowMem {
|
||||||
|
@ -458,14 +402,53 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
|
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if huff == nil {
|
var err error
|
||||||
huff = &huff0.Scratch{}
|
// Use our out buffer.
|
||||||
|
huff.MaxDecodedSize = maxCompressedBlockSize
|
||||||
|
if fourStreams {
|
||||||
|
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
||||||
|
} else {
|
||||||
|
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
|
||||||
}
|
}
|
||||||
|
// Make sure we don't leak our literals buffer
|
||||||
|
if err != nil {
|
||||||
|
println("decompressing literals:", err)
|
||||||
|
return in, err
|
||||||
|
}
|
||||||
|
if len(literals) != litRegenSize {
|
||||||
|
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
||||||
|
}
|
||||||
|
|
||||||
|
case literalsBlockCompressed:
|
||||||
|
if len(in) < litCompSize {
|
||||||
|
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
||||||
|
return in, ErrBlockTooSmall
|
||||||
|
}
|
||||||
|
literals = in[:litCompSize]
|
||||||
|
in = in[litCompSize:]
|
||||||
|
// Ensure we have space to store it.
|
||||||
|
if cap(b.literalBuf) < litRegenSize {
|
||||||
|
if b.lowMem {
|
||||||
|
b.literalBuf = make([]byte, 0, litRegenSize)
|
||||||
|
} else {
|
||||||
|
b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
huff := hist.huffTree
|
||||||
|
if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
|
||||||
|
huff = huffDecoderPool.Get().(*huff0.Scratch)
|
||||||
|
if huff == nil {
|
||||||
|
huff = &huff0.Scratch{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var err error
|
||||||
huff, literals, err = huff0.ReadTable(literals, huff)
|
huff, literals, err = huff0.ReadTable(literals, huff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("reading huffman table:", err)
|
println("reading huffman table:", err)
|
||||||
return err
|
return in, err
|
||||||
}
|
}
|
||||||
|
hist.huffTree = huff
|
||||||
|
huff.MaxDecodedSize = maxCompressedBlockSize
|
||||||
// Use our out buffer.
|
// Use our out buffer.
|
||||||
if fourStreams {
|
if fourStreams {
|
||||||
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
||||||
|
@ -474,27 +457,56 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("decoding compressed literals:", err)
|
println("decoding compressed literals:", err)
|
||||||
return err
|
return in, err
|
||||||
}
|
}
|
||||||
// Make sure we don't leak our literals buffer
|
// Make sure we don't leak our literals buffer
|
||||||
if len(literals) != litRegenSize {
|
if len(literals) != litRegenSize {
|
||||||
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
|
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
hist.decoders.literals = literals
|
||||||
|
return in, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeCompressed will start decompressing a block.
|
||||||
|
func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
|
in := b.data
|
||||||
|
in, err := b.decodeLiterals(in, hist)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = b.prepareSequences(in, hist)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if hist.decoders.nSeqs == 0 {
|
||||||
|
b.dst = append(b.dst, hist.decoders.literals...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
err = hist.decoders.decodeSync(hist)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b.dst = hist.decoders.out
|
||||||
|
hist.recentOffsets = hist.decoders.prevOffset
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("prepareSequences: %d byte(s) input\n", len(in))
|
||||||
|
}
|
||||||
// Decode Sequences
|
// Decode Sequences
|
||||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
|
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
|
||||||
if len(in) < 1 {
|
if len(in) < 1 {
|
||||||
return ErrBlockTooSmall
|
return ErrBlockTooSmall
|
||||||
}
|
}
|
||||||
|
var nSeqs int
|
||||||
seqHeader := in[0]
|
seqHeader := in[0]
|
||||||
nSeqs := 0
|
|
||||||
switch {
|
switch {
|
||||||
case seqHeader == 0:
|
|
||||||
in = in[1:]
|
|
||||||
case seqHeader < 128:
|
case seqHeader < 128:
|
||||||
nSeqs = int(seqHeader)
|
nSeqs = int(seqHeader)
|
||||||
in = in[1:]
|
in = in[1:]
|
||||||
|
@ -511,8 +523,16 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
|
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
|
||||||
in = in[3:]
|
in = in[3:]
|
||||||
}
|
}
|
||||||
|
if nSeqs == 0 && len(in) != 0 {
|
||||||
|
// When no sequences, there should not be any more data...
|
||||||
|
if debugDecoder {
|
||||||
|
printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
|
||||||
|
}
|
||||||
|
return ErrUnexpectedBlockSize
|
||||||
|
}
|
||||||
|
|
||||||
var seqs = &sequenceDecs{}
|
var seqs = &hist.decoders
|
||||||
|
seqs.nSeqs = nSeqs
|
||||||
if nSeqs > 0 {
|
if nSeqs > 0 {
|
||||||
if len(in) < 1 {
|
if len(in) < 1 {
|
||||||
return ErrBlockTooSmall
|
return ErrBlockTooSmall
|
||||||
|
@ -541,6 +561,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
}
|
}
|
||||||
switch mode {
|
switch mode {
|
||||||
case compModePredefined:
|
case compModePredefined:
|
||||||
|
if seq.fse != nil && !seq.fse.preDefined {
|
||||||
|
fseDecoderPool.Put(seq.fse)
|
||||||
|
}
|
||||||
seq.fse = &fsePredef[i]
|
seq.fse = &fsePredef[i]
|
||||||
case compModeRLE:
|
case compModeRLE:
|
||||||
if br.remain() < 1 {
|
if br.remain() < 1 {
|
||||||
|
@ -548,34 +571,36 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
}
|
}
|
||||||
v := br.Uint8()
|
v := br.Uint8()
|
||||||
br.advance(1)
|
br.advance(1)
|
||||||
dec := fseDecoderPool.Get().(*fseDecoder)
|
if seq.fse == nil || seq.fse.preDefined {
|
||||||
|
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
||||||
|
}
|
||||||
symb, err := decSymbolValue(v, symbolTableX[i])
|
symb, err := decSymbolValue(v, symbolTableX[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
|
printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
dec.setRLE(symb)
|
seq.fse.setRLE(symb)
|
||||||
seq.fse = dec
|
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("RLE set to %+v, code: %v", symb, v)
|
printf("RLE set to %+v, code: %v", symb, v)
|
||||||
}
|
}
|
||||||
case compModeFSE:
|
case compModeFSE:
|
||||||
println("Reading table for", tableIndex(i))
|
println("Reading table for", tableIndex(i))
|
||||||
dec := fseDecoderPool.Get().(*fseDecoder)
|
if seq.fse == nil || seq.fse.preDefined {
|
||||||
err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
|
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
||||||
|
}
|
||||||
|
err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("Read table error:", err)
|
println("Read table error:", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = dec.transform(symbolTableX[i])
|
err = seq.fse.transform(symbolTableX[i])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("Transform table error:", err)
|
println("Transform table error:", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Read table ok", "symbolLen:", dec.symbolLen)
|
println("Read table ok", "symbolLen:", seq.fse.symbolLen)
|
||||||
}
|
}
|
||||||
seq.fse = dec
|
|
||||||
case compModeRepeat:
|
case compModeRepeat:
|
||||||
seq.repeat = true
|
seq.repeat = true
|
||||||
}
|
}
|
||||||
|
@ -585,140 +610,89 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||||
}
|
}
|
||||||
in = br.unread()
|
in = br.unread()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for history.
|
|
||||||
// All time spent after this is critical since it is strictly sequential.
|
|
||||||
if hist == nil {
|
|
||||||
hist = <-b.history
|
|
||||||
if hist.error {
|
|
||||||
return ErrDecoderClosed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode treeless literal block.
|
|
||||||
if litType == literalsBlockTreeless {
|
|
||||||
// TODO: We could send the history early WITHOUT the stream history.
|
|
||||||
// This would allow decoding treeless literals before the byte history is available.
|
|
||||||
// Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
|
|
||||||
// So not much obvious gain here.
|
|
||||||
|
|
||||||
if hist.huffTree == nil {
|
|
||||||
return errors.New("literal block was treeless, but no history was defined")
|
|
||||||
}
|
|
||||||
// Ensure we have space to store it.
|
|
||||||
if cap(b.literalBuf) < litRegenSize {
|
|
||||||
if b.lowMem {
|
|
||||||
b.literalBuf = make([]byte, 0, litRegenSize)
|
|
||||||
} else {
|
|
||||||
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var err error
|
|
||||||
// Use our out buffer.
|
|
||||||
huff = hist.huffTree
|
|
||||||
if fourStreams {
|
|
||||||
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
|
||||||
} else {
|
|
||||||
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
|
|
||||||
}
|
|
||||||
// Make sure we don't leak our literals buffer
|
|
||||||
if err != nil {
|
|
||||||
println("decompressing literals:", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(literals) != litRegenSize {
|
|
||||||
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if hist.huffTree != nil && huff != nil {
|
|
||||||
if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
|
|
||||||
huffDecoderPool.Put(hist.huffTree)
|
|
||||||
}
|
|
||||||
hist.huffTree = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if huff != nil {
|
|
||||||
hist.huffTree = huff
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
|
println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
|
||||||
}
|
}
|
||||||
|
|
||||||
if nSeqs == 0 {
|
if nSeqs == 0 {
|
||||||
// Decompressed content is defined entirely as Literals Section content.
|
if len(b.sequence) > 0 {
|
||||||
b.dst = append(b.dst, literals...)
|
b.sequence = b.sequence[:0]
|
||||||
if delayedHistory {
|
|
||||||
hist.append(literals)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
br := seqs.br
|
||||||
seqs, err := seqs.mergeHistory(&hist.decoders)
|
if br == nil {
|
||||||
if err != nil {
|
br = &bitReader{}
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
|
||||||
println("History merged ok")
|
|
||||||
}
|
|
||||||
br := &bitReader{}
|
|
||||||
if err := br.init(in); err != nil {
|
if err := br.init(in); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Investigate if sending history without decoders are faster.
|
if err := seqs.initialize(br, hist, b.dst); err != nil {
|
||||||
// This would allow the sequences to be decoded async and only have to construct stream history.
|
println("initializing sequences:", err)
|
||||||
// If only recent offsets were not transferred, this would be an obvious win.
|
return err
|
||||||
// Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockDec) decodeSequences(hist *history) error {
|
||||||
|
if cap(b.sequence) < hist.decoders.nSeqs {
|
||||||
|
if b.lowMem {
|
||||||
|
b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
|
||||||
|
} else {
|
||||||
|
b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.sequence = b.sequence[:hist.decoders.nSeqs]
|
||||||
|
if hist.decoders.nSeqs == 0 {
|
||||||
|
hist.decoders.seqSize = len(hist.decoders.literals)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
hist.decoders.windowSize = hist.windowSize
|
||||||
|
hist.decoders.prevOffset = hist.recentOffsets
|
||||||
|
err := hist.decoders.decode(b.sequence)
|
||||||
|
hist.recentOffsets = hist.decoders.prevOffset
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *blockDec) executeSequences(hist *history) error {
|
||||||
hbytes := hist.b
|
hbytes := hist.b
|
||||||
if len(hbytes) > hist.windowSize {
|
if len(hbytes) > hist.windowSize {
|
||||||
hbytes = hbytes[len(hbytes)-hist.windowSize:]
|
hbytes = hbytes[len(hbytes)-hist.windowSize:]
|
||||||
// We do not need history any more.
|
// We do not need history anymore.
|
||||||
if hist.dict != nil {
|
if hist.dict != nil {
|
||||||
hist.dict.content = nil
|
hist.dict.content = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
hist.decoders.windowSize = hist.windowSize
|
||||||
if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
|
hist.decoders.out = b.dst[:0]
|
||||||
println("initializing sequences:", err)
|
err := hist.decoders.execute(b.sequence, hbytes)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = seqs.decode(nSeqs, br, hbytes)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if !br.finished() {
|
return b.updateHistory(hist)
|
||||||
return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
|
}
|
||||||
}
|
|
||||||
|
|
||||||
err = br.close()
|
func (b *blockDec) updateHistory(hist *history) error {
|
||||||
if err != nil {
|
|
||||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
|
||||||
}
|
|
||||||
if len(b.data) > maxCompressedBlockSize {
|
if len(b.data) > maxCompressedBlockSize {
|
||||||
return fmt.Errorf("compressed block size too large (%d)", len(b.data))
|
return fmt.Errorf("compressed block size too large (%d)", len(b.data))
|
||||||
}
|
}
|
||||||
// Set output and release references.
|
// Set output and release references.
|
||||||
b.dst = seqs.out
|
b.dst = hist.decoders.out
|
||||||
seqs.out, seqs.literals, seqs.hist = nil, nil, nil
|
hist.recentOffsets = hist.decoders.prevOffset
|
||||||
|
|
||||||
if !delayedHistory {
|
|
||||||
// If we don't have delayed history, no need to update.
|
|
||||||
hist.recentOffsets = seqs.prevOffset
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if b.Last {
|
if b.Last {
|
||||||
// if last block we don't care about history.
|
// if last block we don't care about history.
|
||||||
println("Last block, no history returned")
|
println("Last block, no history returned")
|
||||||
hist.b = hist.b[:0]
|
hist.b = hist.b[:0]
|
||||||
return nil
|
return nil
|
||||||
|
} else {
|
||||||
|
hist.append(b.dst)
|
||||||
|
if debugDecoder {
|
||||||
|
println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
hist.append(b.dst)
|
hist.decoders.out, hist.decoders.literals = nil, nil
|
||||||
hist.recentOffsets = seqs.prevOffset
|
|
||||||
if debugDecoder {
|
|
||||||
println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
3
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
3
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
|
@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
|
||||||
func (r *readerWrapper) readByte() (byte, error) {
|
func (r *readerWrapper) readByte() (byte, error) {
|
||||||
n2, err := r.r.Read(r.tmp[:1])
|
n2, err := r.r.Read(r.tmp[:1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if n2 != 1 {
|
if n2 != 1 {
|
||||||
|
|
649
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
649
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
|
@ -5,9 +5,13 @@
|
||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/klauspost/compress/zstd/internal/xxhash"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Decoder provides decoding of zstandard streams.
|
// Decoder provides decoding of zstandard streams.
|
||||||
|
@ -22,12 +26,19 @@ type Decoder struct {
|
||||||
// Unreferenced decoders, ready for use.
|
// Unreferenced decoders, ready for use.
|
||||||
decoders chan *blockDec
|
decoders chan *blockDec
|
||||||
|
|
||||||
// Streams ready to be decoded.
|
|
||||||
stream chan decodeStream
|
|
||||||
|
|
||||||
// Current read position used for Reader functionality.
|
// Current read position used for Reader functionality.
|
||||||
current decoderState
|
current decoderState
|
||||||
|
|
||||||
|
// sync stream decoding
|
||||||
|
syncStream struct {
|
||||||
|
decodedFrame uint64
|
||||||
|
br readerWrapper
|
||||||
|
enabled bool
|
||||||
|
inFrame bool
|
||||||
|
}
|
||||||
|
|
||||||
|
frame *frameDec
|
||||||
|
|
||||||
// Custom dictionaries.
|
// Custom dictionaries.
|
||||||
// Always uses copies.
|
// Always uses copies.
|
||||||
dicts map[uint32]dict
|
dicts map[uint32]dict
|
||||||
|
@ -46,7 +57,10 @@ type decoderState struct {
|
||||||
output chan decodeOutput
|
output chan decodeOutput
|
||||||
|
|
||||||
// cancel remaining output.
|
// cancel remaining output.
|
||||||
cancel chan struct{}
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
// crc of current frame
|
||||||
|
crc *xxhash.Digest
|
||||||
|
|
||||||
flushed bool
|
flushed bool
|
||||||
}
|
}
|
||||||
|
@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d.current.output = make(chan decodeOutput, d.o.concurrent)
|
d.current.crc = xxhash.New()
|
||||||
d.current.flushed = true
|
d.current.flushed = true
|
||||||
|
|
||||||
if r == nil {
|
if r == nil {
|
||||||
|
@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !d.nextBlock(n == 0) {
|
if !d.nextBlock(n == 0) {
|
||||||
return n, nil
|
return n, d.current.err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error {
|
||||||
|
|
||||||
d.drainOutput()
|
d.drainOutput()
|
||||||
|
|
||||||
|
d.syncStream.br.r = nil
|
||||||
if r == nil {
|
if r == nil {
|
||||||
d.current.err = ErrDecoderNilInput
|
d.current.err = ErrDecoderNilInput
|
||||||
if len(d.current.b) > 0 {
|
if len(d.current.b) > 0 {
|
||||||
|
@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.stream == nil {
|
|
||||||
d.stream = make(chan decodeStream, 1)
|
|
||||||
d.streamWg.Add(1)
|
|
||||||
go d.startStreamDecoder(d.stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove current block.
|
// Remove current block.
|
||||||
|
d.stashDecoder()
|
||||||
d.current.decodeOutput = decodeOutput{}
|
d.current.decodeOutput = decodeOutput{}
|
||||||
d.current.err = nil
|
d.current.err = nil
|
||||||
d.current.cancel = make(chan struct{})
|
|
||||||
d.current.flushed = false
|
d.current.flushed = false
|
||||||
d.current.d = nil
|
d.current.d = nil
|
||||||
|
|
||||||
d.stream <- decodeStream{
|
// Ensure no-one else is still running...
|
||||||
r: r,
|
d.streamWg.Wait()
|
||||||
output: d.current.output,
|
if d.frame == nil {
|
||||||
cancel: d.current.cancel,
|
d.frame = newFrameDec(d.o)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d.o.concurrent == 1 {
|
||||||
|
return d.startSyncDecoder(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.current.output = make(chan decodeOutput, d.o.concurrent)
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
d.current.cancel = cancel
|
||||||
|
d.streamWg.Add(1)
|
||||||
|
go d.startStreamDecoder(ctx, r, d.current.output)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// drainOutput will drain the output until errEndOfStream is sent.
|
// drainOutput will drain the output until errEndOfStream is sent.
|
||||||
func (d *Decoder) drainOutput() {
|
func (d *Decoder) drainOutput() {
|
||||||
if d.current.cancel != nil {
|
if d.current.cancel != nil {
|
||||||
println("cancelling current")
|
if debugDecoder {
|
||||||
close(d.current.cancel)
|
println("cancelling current")
|
||||||
|
}
|
||||||
|
d.current.cancel()
|
||||||
d.current.cancel = nil
|
d.current.cancel = nil
|
||||||
}
|
}
|
||||||
if d.current.d != nil {
|
if d.current.d != nil {
|
||||||
|
@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() {
|
||||||
}
|
}
|
||||||
d.decoders <- v.d
|
d.decoders <- v.d
|
||||||
}
|
}
|
||||||
if v.err == errEndOfStream {
|
|
||||||
println("current flushed")
|
|
||||||
d.current.flushed = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
d.current.output = nil
|
||||||
|
d.current.flushed = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
||||||
|
@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
|
||||||
// DecodeAll can be used concurrently.
|
// DecodeAll can be used concurrently.
|
||||||
// The Decoder concurrency limits will be respected.
|
// The Decoder concurrency limits will be respected.
|
||||||
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||||
if d.current.err == ErrDecoderClosed {
|
if d.decoders == nil {
|
||||||
return dst, ErrDecoderClosed
|
return dst, ErrDecoderClosed
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||||
}
|
}
|
||||||
frame.rawInput = nil
|
frame.rawInput = nil
|
||||||
frame.bBuf = nil
|
frame.bBuf = nil
|
||||||
|
if frame.history.decoders.br != nil {
|
||||||
|
frame.history.decoders.br.in = nil
|
||||||
|
}
|
||||||
d.decoders <- block
|
d.decoders <- block
|
||||||
}()
|
}()
|
||||||
frame.bBuf = input
|
frame.bBuf = input
|
||||||
|
@ -307,27 +328,31 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||||
for {
|
for {
|
||||||
frame.history.reset()
|
frame.history.reset()
|
||||||
err := frame.reset(&frame.bBuf)
|
err := frame.reset(&frame.bBuf)
|
||||||
if err == io.EOF {
|
if err != nil {
|
||||||
if debugDecoder {
|
if err == io.EOF {
|
||||||
println("frame reset return EOF")
|
if debugDecoder {
|
||||||
|
println("frame reset return EOF")
|
||||||
|
}
|
||||||
|
return dst, nil
|
||||||
}
|
}
|
||||||
return dst, nil
|
return dst, err
|
||||||
}
|
}
|
||||||
if frame.DictionaryID != nil {
|
if frame.DictionaryID != nil {
|
||||||
dict, ok := d.dicts[*frame.DictionaryID]
|
dict, ok := d.dicts[*frame.DictionaryID]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, ErrUnknownDictionary
|
return nil, ErrUnknownDictionary
|
||||||
}
|
}
|
||||||
|
if debugDecoder {
|
||||||
|
println("setting dict", frame.DictionaryID)
|
||||||
|
}
|
||||||
frame.history.setDict(&dict)
|
frame.history.setDict(&dict)
|
||||||
}
|
}
|
||||||
if err != nil {
|
|
||||||
return dst, err
|
if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||||
}
|
|
||||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
|
||||||
return dst, ErrDecoderSizeExceeded
|
return dst, ErrDecoderSizeExceeded
|
||||||
}
|
}
|
||||||
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
|
if frame.FrameContentSize < 1<<30 {
|
||||||
// Never preallocate moe than 1 GB up front.
|
// Never preallocate more than 1 GB up front.
|
||||||
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
||||||
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
|
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
|
||||||
copy(dst2, dst)
|
copy(dst2, dst)
|
||||||
|
@ -368,6 +393,161 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||||
// If non-blocking mode is used the returned boolean will be false
|
// If non-blocking mode is used the returned boolean will be false
|
||||||
// if no data was available without blocking.
|
// if no data was available without blocking.
|
||||||
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
||||||
|
if d.current.err != nil {
|
||||||
|
// Keep error state.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
d.current.b = d.current.b[:0]
|
||||||
|
|
||||||
|
// SYNC:
|
||||||
|
if d.syncStream.enabled {
|
||||||
|
if !blocking {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
ok = d.nextBlockSync()
|
||||||
|
if !ok {
|
||||||
|
d.stashDecoder()
|
||||||
|
}
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
//ASYNC:
|
||||||
|
d.stashDecoder()
|
||||||
|
if blocking {
|
||||||
|
d.current.decodeOutput, ok = <-d.current.output
|
||||||
|
} else {
|
||||||
|
select {
|
||||||
|
case d.current.decodeOutput, ok = <-d.current.output:
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
// This should not happen, so signal error state...
|
||||||
|
d.current.err = io.ErrUnexpectedEOF
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
next := d.current.decodeOutput
|
||||||
|
if next.d != nil && next.d.async.newHist != nil {
|
||||||
|
d.current.crc.Reset()
|
||||||
|
}
|
||||||
|
if debugDecoder {
|
||||||
|
var tmp [4]byte
|
||||||
|
binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b)))
|
||||||
|
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(next.b) > 0 {
|
||||||
|
n, err := d.current.crc.Write(next.b)
|
||||||
|
if err == nil {
|
||||||
|
if n != len(next.b) {
|
||||||
|
d.current.err = io.ErrShortWrite
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
|
||||||
|
got := d.current.crc.Sum64()
|
||||||
|
var tmp [4]byte
|
||||||
|
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
|
||||||
|
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
|
||||||
|
if debugDecoder {
|
||||||
|
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
|
||||||
|
}
|
||||||
|
d.current.err = ErrCRCMismatch
|
||||||
|
} else {
|
||||||
|
if debugDecoder {
|
||||||
|
println("CRC ok", tmp[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) nextBlockSync() (ok bool) {
|
||||||
|
if d.current.d == nil {
|
||||||
|
d.current.d = <-d.decoders
|
||||||
|
}
|
||||||
|
for len(d.current.b) == 0 {
|
||||||
|
if !d.syncStream.inFrame {
|
||||||
|
d.frame.history.reset()
|
||||||
|
d.current.err = d.frame.reset(&d.syncStream.br)
|
||||||
|
if d.current.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if d.frame.DictionaryID != nil {
|
||||||
|
dict, ok := d.dicts[*d.frame.DictionaryID]
|
||||||
|
if !ok {
|
||||||
|
d.current.err = ErrUnknownDictionary
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
d.frame.history.setDict(&dict)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
|
||||||
|
d.current.err = ErrDecoderSizeExceeded
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
d.syncStream.decodedFrame = 0
|
||||||
|
d.syncStream.inFrame = true
|
||||||
|
}
|
||||||
|
d.current.err = d.frame.next(d.current.d)
|
||||||
|
if d.current.err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
d.frame.history.ensureBlock()
|
||||||
|
if debugDecoder {
|
||||||
|
println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame)
|
||||||
|
}
|
||||||
|
histBefore := len(d.frame.history.b)
|
||||||
|
d.current.err = d.current.d.decodeBuf(&d.frame.history)
|
||||||
|
|
||||||
|
if d.current.err != nil {
|
||||||
|
println("error after:", d.current.err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
d.current.b = d.frame.history.b[histBefore:]
|
||||||
|
if debugDecoder {
|
||||||
|
println("history after:", len(d.frame.history.b))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check frame size (before CRC)
|
||||||
|
d.syncStream.decodedFrame += uint64(len(d.current.b))
|
||||||
|
if d.syncStream.decodedFrame > d.frame.FrameContentSize {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
|
||||||
|
}
|
||||||
|
d.current.err = ErrFrameSizeExceeded
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check FCS
|
||||||
|
if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
|
||||||
|
}
|
||||||
|
d.current.err = ErrFrameSizeMismatch
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update/Check CRC
|
||||||
|
if d.frame.HasCheckSum {
|
||||||
|
d.frame.crc.Write(d.current.b)
|
||||||
|
if d.current.d.Last {
|
||||||
|
d.current.err = d.frame.checkCRC()
|
||||||
|
if d.current.err != nil {
|
||||||
|
println("CRC error:", d.current.err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
d.syncStream.inFrame = !d.current.d.Last
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) stashDecoder() {
|
||||||
if d.current.d != nil {
|
if d.current.d != nil {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("re-adding current decoder %p", d.current.d)
|
printf("re-adding current decoder %p", d.current.d)
|
||||||
|
@ -375,24 +555,6 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
||||||
d.decoders <- d.current.d
|
d.decoders <- d.current.d
|
||||||
d.current.d = nil
|
d.current.d = nil
|
||||||
}
|
}
|
||||||
if d.current.err != nil {
|
|
||||||
// Keep error state.
|
|
||||||
return blocking
|
|
||||||
}
|
|
||||||
|
|
||||||
if blocking {
|
|
||||||
d.current.decodeOutput = <-d.current.output
|
|
||||||
} else {
|
|
||||||
select {
|
|
||||||
case d.current.decodeOutput = <-d.current.output:
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("got", len(d.current.b), "bytes, error:", d.current.err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close will release all resources.
|
// Close will release all resources.
|
||||||
|
@ -402,10 +564,10 @@ func (d *Decoder) Close() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.drainOutput()
|
d.drainOutput()
|
||||||
if d.stream != nil {
|
if d.current.cancel != nil {
|
||||||
close(d.stream)
|
d.current.cancel()
|
||||||
d.streamWg.Wait()
|
d.streamWg.Wait()
|
||||||
d.stream = nil
|
d.current.cancel = nil
|
||||||
}
|
}
|
||||||
if d.decoders != nil {
|
if d.decoders != nil {
|
||||||
close(d.decoders)
|
close(d.decoders)
|
||||||
|
@ -456,100 +618,307 @@ type decodeOutput struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
type decodeStream struct {
|
func (d *Decoder) startSyncDecoder(r io.Reader) error {
|
||||||
r io.Reader
|
d.frame.history.reset()
|
||||||
|
d.syncStream.br = readerWrapper{r: r}
|
||||||
// Blocks ready to be written to output.
|
d.syncStream.inFrame = false
|
||||||
output chan decodeOutput
|
d.syncStream.enabled = true
|
||||||
|
d.syncStream.decodedFrame = 0
|
||||||
// cancel reading from the input
|
return nil
|
||||||
cancel chan struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// errEndOfStream indicates that everything from the stream was read.
|
|
||||||
var errEndOfStream = errors.New("end-of-stream")
|
|
||||||
|
|
||||||
// Create Decoder:
|
// Create Decoder:
|
||||||
// Spawn n block decoders. These accept tasks to decode a block.
|
// ASYNC:
|
||||||
// Create goroutine that handles stream processing, this will send history to decoders as they are available.
|
// Spawn 4 go routines.
|
||||||
// Decoders update the history as they decode.
|
// 0: Read frames and decode blocks.
|
||||||
// When a block is returned:
|
// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
|
||||||
// a) history is sent to the next decoder,
|
// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
|
||||||
// b) content written to CRC.
|
// 3: Wait for stream history, execute sequences, send stream history.
|
||||||
// c) return data to WRITER.
|
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
|
||||||
// d) wait for next block to return data.
|
|
||||||
// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
|
|
||||||
func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
|
|
||||||
defer d.streamWg.Done()
|
defer d.streamWg.Done()
|
||||||
frame := newFrameDec(d.o)
|
br := readerWrapper{r: r}
|
||||||
for stream := range inStream {
|
|
||||||
if debugDecoder {
|
var seqPrepare = make(chan *blockDec, d.o.concurrent)
|
||||||
println("got new stream")
|
var seqDecode = make(chan *blockDec, d.o.concurrent)
|
||||||
|
var seqExecute = make(chan *blockDec, d.o.concurrent)
|
||||||
|
|
||||||
|
// Async 1: Prepare blocks...
|
||||||
|
go func() {
|
||||||
|
var hist history
|
||||||
|
var hasErr bool
|
||||||
|
for block := range seqPrepare {
|
||||||
|
if hasErr {
|
||||||
|
if block != nil {
|
||||||
|
seqDecode <- block
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if block.async.newHist != nil {
|
||||||
|
if debugDecoder {
|
||||||
|
println("Async 1: new history")
|
||||||
|
}
|
||||||
|
hist.reset()
|
||||||
|
if block.async.newHist.dict != nil {
|
||||||
|
hist.setDict(block.async.newHist.dict)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if block.err != nil || block.Type != blockTypeCompressed {
|
||||||
|
hasErr = block.err != nil
|
||||||
|
seqDecode <- block
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
remain, err := block.decodeLiterals(block.data, &hist)
|
||||||
|
block.err = err
|
||||||
|
hasErr = block.err != nil
|
||||||
|
if err == nil {
|
||||||
|
block.async.literals = hist.decoders.literals
|
||||||
|
block.async.seqData = remain
|
||||||
|
} else if debugDecoder {
|
||||||
|
println("decodeLiterals error:", err)
|
||||||
|
}
|
||||||
|
seqDecode <- block
|
||||||
}
|
}
|
||||||
br := readerWrapper{r: stream.r}
|
close(seqDecode)
|
||||||
decodeStream:
|
}()
|
||||||
for {
|
|
||||||
frame.history.reset()
|
// Async 2: Decode sequences...
|
||||||
err := frame.reset(&br)
|
go func() {
|
||||||
if debugDecoder && err != nil {
|
var hist history
|
||||||
println("Frame decoder returned", err)
|
var hasErr bool
|
||||||
|
|
||||||
|
for block := range seqDecode {
|
||||||
|
if hasErr {
|
||||||
|
if block != nil {
|
||||||
|
seqExecute <- block
|
||||||
|
}
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
if err == nil && frame.DictionaryID != nil {
|
if block.async.newHist != nil {
|
||||||
dict, ok := d.dicts[*frame.DictionaryID]
|
if debugDecoder {
|
||||||
if !ok {
|
println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
|
||||||
err = ErrUnknownDictionary
|
}
|
||||||
|
hist.decoders = block.async.newHist.decoders
|
||||||
|
hist.recentOffsets = block.async.newHist.recentOffsets
|
||||||
|
hist.windowSize = block.async.newHist.windowSize
|
||||||
|
if block.async.newHist.dict != nil {
|
||||||
|
hist.setDict(block.async.newHist.dict)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if block.err != nil || block.Type != blockTypeCompressed {
|
||||||
|
hasErr = block.err != nil
|
||||||
|
seqExecute <- block
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hist.decoders.literals = block.async.literals
|
||||||
|
block.err = block.prepareSequences(block.async.seqData, &hist)
|
||||||
|
if debugDecoder && block.err != nil {
|
||||||
|
println("prepareSequences returned:", block.err)
|
||||||
|
}
|
||||||
|
hasErr = block.err != nil
|
||||||
|
if block.err == nil {
|
||||||
|
block.err = block.decodeSequences(&hist)
|
||||||
|
if debugDecoder && block.err != nil {
|
||||||
|
println("decodeSequences returned:", block.err)
|
||||||
|
}
|
||||||
|
hasErr = block.err != nil
|
||||||
|
// block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs]
|
||||||
|
block.async.seqSize = hist.decoders.seqSize
|
||||||
|
}
|
||||||
|
seqExecute <- block
|
||||||
|
}
|
||||||
|
close(seqExecute)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
|
||||||
|
// Async 3: Execute sequences...
|
||||||
|
frameHistCache := d.frame.history.b
|
||||||
|
go func() {
|
||||||
|
var hist history
|
||||||
|
var decodedFrame uint64
|
||||||
|
var fcs uint64
|
||||||
|
var hasErr bool
|
||||||
|
for block := range seqExecute {
|
||||||
|
out := decodeOutput{err: block.err, d: block}
|
||||||
|
if block.err != nil || hasErr {
|
||||||
|
hasErr = true
|
||||||
|
output <- out
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if block.async.newHist != nil {
|
||||||
|
if debugDecoder {
|
||||||
|
println("Async 3: new history")
|
||||||
|
}
|
||||||
|
hist.windowSize = block.async.newHist.windowSize
|
||||||
|
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
|
||||||
|
if block.async.newHist.dict != nil {
|
||||||
|
hist.setDict(block.async.newHist.dict)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cap(hist.b) < hist.allocFrameBuffer {
|
||||||
|
if cap(frameHistCache) >= hist.allocFrameBuffer {
|
||||||
|
hist.b = frameHistCache
|
||||||
|
} else {
|
||||||
|
hist.b = make([]byte, 0, hist.allocFrameBuffer)
|
||||||
|
println("Alloc history sized", hist.allocFrameBuffer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hist.b = hist.b[:0]
|
||||||
|
fcs = block.async.fcs
|
||||||
|
decodedFrame = 0
|
||||||
|
}
|
||||||
|
do := decodeOutput{err: block.err, d: block}
|
||||||
|
switch block.Type {
|
||||||
|
case blockTypeRLE:
|
||||||
|
if debugDecoder {
|
||||||
|
println("add rle block length:", block.RLESize)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cap(block.dst) < int(block.RLESize) {
|
||||||
|
if block.lowMem {
|
||||||
|
block.dst = make([]byte, block.RLESize)
|
||||||
|
} else {
|
||||||
|
block.dst = make([]byte, maxBlockSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
block.dst = block.dst[:block.RLESize]
|
||||||
|
v := block.data[0]
|
||||||
|
for i := range block.dst {
|
||||||
|
block.dst[i] = v
|
||||||
|
}
|
||||||
|
hist.append(block.dst)
|
||||||
|
do.b = block.dst
|
||||||
|
case blockTypeRaw:
|
||||||
|
if debugDecoder {
|
||||||
|
println("add raw block length:", len(block.data))
|
||||||
|
}
|
||||||
|
hist.append(block.data)
|
||||||
|
do.b = block.data
|
||||||
|
case blockTypeCompressed:
|
||||||
|
if debugDecoder {
|
||||||
|
println("execute with history length:", len(hist.b), "window:", hist.windowSize)
|
||||||
|
}
|
||||||
|
hist.decoders.seqSize = block.async.seqSize
|
||||||
|
hist.decoders.literals = block.async.literals
|
||||||
|
do.err = block.executeSequences(&hist)
|
||||||
|
hasErr = do.err != nil
|
||||||
|
if debugDecoder && hasErr {
|
||||||
|
println("executeSequences returned:", do.err)
|
||||||
|
}
|
||||||
|
do.b = block.dst
|
||||||
|
}
|
||||||
|
if !hasErr {
|
||||||
|
decodedFrame += uint64(len(do.b))
|
||||||
|
if decodedFrame > fcs {
|
||||||
|
println("fcs exceeded", block.Last, fcs, decodedFrame)
|
||||||
|
do.err = ErrFrameSizeExceeded
|
||||||
|
hasErr = true
|
||||||
|
} else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
|
||||||
|
do.err = ErrFrameSizeMismatch
|
||||||
|
hasErr = true
|
||||||
} else {
|
} else {
|
||||||
frame.history.setDict(&dict)
|
if debugDecoder {
|
||||||
|
println("fcs ok", block.Last, fcs, decodedFrame)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
output <- do
|
||||||
stream.output <- decodeOutput{
|
}
|
||||||
err: err,
|
close(output)
|
||||||
|
frameHistCache = hist.b
|
||||||
|
wg.Done()
|
||||||
|
if debugDecoder {
|
||||||
|
println("decoder goroutines finished")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
decodeStream:
|
||||||
|
for {
|
||||||
|
frame := d.frame
|
||||||
|
if debugDecoder {
|
||||||
|
println("New frame...")
|
||||||
|
}
|
||||||
|
var historySent bool
|
||||||
|
frame.history.reset()
|
||||||
|
err := frame.reset(&br)
|
||||||
|
if debugDecoder && err != nil {
|
||||||
|
println("Frame decoder returned", err)
|
||||||
|
}
|
||||||
|
if err == nil && frame.DictionaryID != nil {
|
||||||
|
dict, ok := d.dicts[*frame.DictionaryID]
|
||||||
|
if !ok {
|
||||||
|
err = ErrUnknownDictionary
|
||||||
|
} else {
|
||||||
|
frame.history.setDict(&dict)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
||||||
|
err = ErrDecoderSizeExceeded
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
case dec := <-d.decoders:
|
||||||
|
dec.sendErr(err)
|
||||||
|
seqPrepare <- dec
|
||||||
|
}
|
||||||
|
break decodeStream
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through all blocks of the frame.
|
||||||
|
for {
|
||||||
|
var dec *blockDec
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
break decodeStream
|
||||||
|
case dec = <-d.decoders:
|
||||||
|
// Once we have a decoder, we MUST return it.
|
||||||
|
}
|
||||||
|
err := frame.next(dec)
|
||||||
|
if !historySent {
|
||||||
|
h := frame.history
|
||||||
|
if debugDecoder {
|
||||||
|
println("Alloc History:", h.allocFrameBuffer)
|
||||||
}
|
}
|
||||||
|
dec.async.newHist = &h
|
||||||
|
dec.async.fcs = frame.FrameContentSize
|
||||||
|
historySent = true
|
||||||
|
} else {
|
||||||
|
dec.async.newHist = nil
|
||||||
|
}
|
||||||
|
if debugDecoder && err != nil {
|
||||||
|
println("next block returned error:", err)
|
||||||
|
}
|
||||||
|
dec.err = err
|
||||||
|
dec.checkCRC = nil
|
||||||
|
if dec.Last && frame.HasCheckSum && err == nil {
|
||||||
|
crc, err := frame.rawInput.readSmall(4)
|
||||||
|
if err != nil {
|
||||||
|
println("CRC missing?", err)
|
||||||
|
dec.err = err
|
||||||
|
}
|
||||||
|
var tmp [4]byte
|
||||||
|
copy(tmp[:], crc)
|
||||||
|
dec.checkCRC = tmp[:]
|
||||||
|
if debugDecoder {
|
||||||
|
println("found crc to check:", dec.checkCRC)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = dec.err
|
||||||
|
last := dec.Last
|
||||||
|
seqPrepare <- dec
|
||||||
|
if err != nil {
|
||||||
|
break decodeStream
|
||||||
|
}
|
||||||
|
if last {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
|
||||||
println("starting frame decoder")
|
|
||||||
}
|
|
||||||
|
|
||||||
// This goroutine will forward history between frames.
|
|
||||||
frame.frameDone.Add(1)
|
|
||||||
frame.initAsync()
|
|
||||||
|
|
||||||
go frame.startDecoder(stream.output)
|
|
||||||
decodeFrame:
|
|
||||||
// Go through all blocks of the frame.
|
|
||||||
for {
|
|
||||||
dec := <-d.decoders
|
|
||||||
select {
|
|
||||||
case <-stream.cancel:
|
|
||||||
if !frame.sendErr(dec, io.EOF) {
|
|
||||||
// To not let the decoder dangle, send it back.
|
|
||||||
stream.output <- decodeOutput{d: dec}
|
|
||||||
}
|
|
||||||
break decodeStream
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
err := frame.next(dec)
|
|
||||||
switch err {
|
|
||||||
case io.EOF:
|
|
||||||
// End of current frame, no error
|
|
||||||
println("EOF on next block")
|
|
||||||
break decodeFrame
|
|
||||||
case nil:
|
|
||||||
continue
|
|
||||||
default:
|
|
||||||
println("block decoder returned", err)
|
|
||||||
break decodeStream
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// All blocks have started decoding, check if there are more frames.
|
|
||||||
println("waiting for done")
|
|
||||||
frame.frameDone.Wait()
|
|
||||||
println("done waiting...")
|
|
||||||
}
|
}
|
||||||
frame.frameDone.Wait()
|
|
||||||
println("Sending EOS")
|
|
||||||
stream.output <- decodeOutput{err: errEndOfStream}
|
|
||||||
}
|
}
|
||||||
|
close(seqPrepare)
|
||||||
|
wg.Wait()
|
||||||
|
d.frame.history.b = frameHistCache
|
||||||
}
|
}
|
||||||
|
|
24
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
24
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
|
@ -28,6 +28,9 @@ func (o *decoderOptions) setDefault() {
|
||||||
concurrent: runtime.GOMAXPROCS(0),
|
concurrent: runtime.GOMAXPROCS(0),
|
||||||
maxWindowSize: MaxWindowSize,
|
maxWindowSize: MaxWindowSize,
|
||||||
}
|
}
|
||||||
|
if o.concurrent > 4 {
|
||||||
|
o.concurrent = 4
|
||||||
|
}
|
||||||
o.maxDecodedSize = 1 << 63
|
o.maxDecodedSize = 1 << 63
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,16 +40,25 @@ func WithDecoderLowmem(b bool) DOption {
|
||||||
return func(o *decoderOptions) error { o.lowMem = b; return nil }
|
return func(o *decoderOptions) error { o.lowMem = b; return nil }
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithDecoderConcurrency will set the concurrency,
|
// WithDecoderConcurrency sets the number of created decoders.
|
||||||
// meaning the maximum number of decoders to run concurrently.
|
// When decoding block with DecodeAll, this will limit the number
|
||||||
// The value supplied must be at least 1.
|
// of possible concurrently running decodes.
|
||||||
// By default this will be set to GOMAXPROCS.
|
// When decoding streams, this will limit the number of
|
||||||
|
// inflight blocks.
|
||||||
|
// When decoding streams and setting maximum to 1,
|
||||||
|
// no async decoding will be done.
|
||||||
|
// When a value of 0 is provided GOMAXPROCS will be used.
|
||||||
|
// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
|
||||||
func WithDecoderConcurrency(n int) DOption {
|
func WithDecoderConcurrency(n int) DOption {
|
||||||
return func(o *decoderOptions) error {
|
return func(o *decoderOptions) error {
|
||||||
if n <= 0 {
|
if n < 0 {
|
||||||
return errors.New("concurrency must be at least 1")
|
return errors.New("concurrency must be at least 1")
|
||||||
}
|
}
|
||||||
o.concurrent = n
|
if n == 0 {
|
||||||
|
o.concurrent = runtime.GOMAXPROCS(0)
|
||||||
|
} else {
|
||||||
|
o.concurrent = n
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
66
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
66
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
|
@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) {
|
||||||
if cap(s.filling) == 0 {
|
if cap(s.filling) == 0 {
|
||||||
s.filling = make([]byte, 0, e.o.blockSize)
|
s.filling = make([]byte, 0, e.o.blockSize)
|
||||||
}
|
}
|
||||||
if cap(s.current) == 0 {
|
if e.o.concurrent > 1 {
|
||||||
s.current = make([]byte, 0, e.o.blockSize)
|
if cap(s.current) == 0 {
|
||||||
}
|
s.current = make([]byte, 0, e.o.blockSize)
|
||||||
if cap(s.previous) == 0 {
|
}
|
||||||
s.previous = make([]byte, 0, e.o.blockSize)
|
if cap(s.previous) == 0 {
|
||||||
|
s.previous = make([]byte, 0, e.o.blockSize)
|
||||||
|
}
|
||||||
|
s.current = s.current[:0]
|
||||||
|
s.previous = s.previous[:0]
|
||||||
|
if s.writing == nil {
|
||||||
|
s.writing = &blockEnc{lowMem: e.o.lowMem}
|
||||||
|
s.writing.init()
|
||||||
|
}
|
||||||
|
s.writing.initNewEncode()
|
||||||
}
|
}
|
||||||
if s.encoder == nil {
|
if s.encoder == nil {
|
||||||
s.encoder = e.o.encoder()
|
s.encoder = e.o.encoder()
|
||||||
}
|
}
|
||||||
if s.writing == nil {
|
|
||||||
s.writing = &blockEnc{lowMem: e.o.lowMem}
|
|
||||||
s.writing.init()
|
|
||||||
}
|
|
||||||
s.writing.initNewEncode()
|
|
||||||
s.filling = s.filling[:0]
|
s.filling = s.filling[:0]
|
||||||
s.current = s.current[:0]
|
|
||||||
s.previous = s.previous[:0]
|
|
||||||
s.encoder.Reset(e.o.dict, false)
|
s.encoder.Reset(e.o.dict, false)
|
||||||
s.headerWritten = false
|
s.headerWritten = false
|
||||||
s.eofWritten = false
|
s.eofWritten = false
|
||||||
|
@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||||
return s.err
|
return s.err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SYNC:
|
||||||
|
if e.o.concurrent == 1 {
|
||||||
|
src := s.filling
|
||||||
|
s.nInput += int64(len(s.filling))
|
||||||
|
if debugEncoder {
|
||||||
|
println("Adding sync block,", len(src), "bytes, final:", final)
|
||||||
|
}
|
||||||
|
enc := s.encoder
|
||||||
|
blk := enc.Block()
|
||||||
|
blk.reset(nil)
|
||||||
|
enc.Encode(blk, src)
|
||||||
|
blk.last = final
|
||||||
|
if final {
|
||||||
|
s.eofWritten = true
|
||||||
|
}
|
||||||
|
|
||||||
|
err := errIncompressible
|
||||||
|
// If we got the exact same number of literals as input,
|
||||||
|
// assume the literals cannot be compressed.
|
||||||
|
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
||||||
|
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||||
|
}
|
||||||
|
switch err {
|
||||||
|
case errIncompressible:
|
||||||
|
if debugEncoder {
|
||||||
|
println("Storing incompressible block as raw")
|
||||||
|
}
|
||||||
|
blk.encodeRaw(src)
|
||||||
|
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
s.err = err
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, s.err = s.w.Write(blk.output)
|
||||||
|
s.nWritten += int64(len(blk.output))
|
||||||
|
s.filling = s.filling[:0]
|
||||||
|
return s.err
|
||||||
|
}
|
||||||
|
|
||||||
// Move blocks forward.
|
// Move blocks forward.
|
||||||
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
||||||
s.nInput += int64(len(s.current))
|
s.nInput += int64(len(s.current))
|
||||||
|
|
1
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
|
@ -76,6 +76,7 @@ func WithEncoderCRC(b bool) EOption {
|
||||||
// WithEncoderConcurrency will set the concurrency,
|
// WithEncoderConcurrency will set the concurrency,
|
||||||
// meaning the maximum number of encoders to run concurrently.
|
// meaning the maximum number of encoders to run concurrently.
|
||||||
// The value supplied must be at least 1.
|
// The value supplied must be at least 1.
|
||||||
|
// For streams, setting a value of 1 will disable async compression.
|
||||||
// By default this will be set to GOMAXPROCS.
|
// By default this will be set to GOMAXPROCS.
|
||||||
func WithEncoderConcurrency(n int) EOption {
|
func WithEncoderConcurrency(n int) EOption {
|
||||||
return func(o *encoderOptions) error {
|
return func(o *encoderOptions) error {
|
||||||
|
|
203
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
203
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
|
@ -8,23 +8,17 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"hash"
|
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/klauspost/compress/zstd/internal/xxhash"
|
"github.com/klauspost/compress/zstd/internal/xxhash"
|
||||||
)
|
)
|
||||||
|
|
||||||
type frameDec struct {
|
type frameDec struct {
|
||||||
o decoderOptions
|
o decoderOptions
|
||||||
crc hash.Hash64
|
crc *xxhash.Digest
|
||||||
offset int64
|
|
||||||
|
|
||||||
WindowSize uint64
|
WindowSize uint64
|
||||||
|
|
||||||
// In order queue of blocks being decoded.
|
|
||||||
decoding chan *blockDec
|
|
||||||
|
|
||||||
// Frame history passed between blocks
|
// Frame history passed between blocks
|
||||||
history history
|
history history
|
||||||
|
|
||||||
|
@ -34,15 +28,10 @@ type frameDec struct {
|
||||||
bBuf byteBuf
|
bBuf byteBuf
|
||||||
|
|
||||||
FrameContentSize uint64
|
FrameContentSize uint64
|
||||||
frameDone sync.WaitGroup
|
|
||||||
|
|
||||||
DictionaryID *uint32
|
DictionaryID *uint32
|
||||||
HasCheckSum bool
|
HasCheckSum bool
|
||||||
SingleSegment bool
|
SingleSegment bool
|
||||||
|
|
||||||
// asyncRunning indicates whether the async routine processes input on 'decoding'.
|
|
||||||
asyncRunningMu sync.Mutex
|
|
||||||
asyncRunning bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -208,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||||
default:
|
default:
|
||||||
fcsSize = 1 << v
|
fcsSize = 1 << v
|
||||||
}
|
}
|
||||||
d.FrameContentSize = 0
|
d.FrameContentSize = fcsUnknown
|
||||||
if fcsSize > 0 {
|
if fcsSize > 0 {
|
||||||
b, err := br.readSmall(fcsSize)
|
b, err := br.readSmall(fcsSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||||
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
|
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
|
println("Read FCS:", d.FrameContentSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move this to shared.
|
// Move this to shared.
|
||||||
d.HasCheckSum = fhd&(1<<2) != 0
|
d.HasCheckSum = fhd&(1<<2) != 0
|
||||||
if d.HasCheckSum {
|
if d.HasCheckSum {
|
||||||
|
@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||||
}
|
}
|
||||||
d.history.windowSize = int(d.WindowSize)
|
d.history.windowSize = int(d.WindowSize)
|
||||||
if d.o.lowMem && d.history.windowSize < maxBlockSize {
|
if d.o.lowMem && d.history.windowSize < maxBlockSize {
|
||||||
d.history.maxSize = d.history.windowSize * 2
|
d.history.allocFrameBuffer = d.history.windowSize * 2
|
||||||
|
// TODO: Maybe use FrameContent size
|
||||||
} else {
|
} else {
|
||||||
d.history.maxSize = d.history.windowSize + maxBlockSize
|
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if debugDecoder {
|
||||||
|
println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
|
||||||
|
}
|
||||||
|
|
||||||
// history contains input - maybe we do something
|
// history contains input - maybe we do something
|
||||||
d.rawInput = br
|
d.rawInput = br
|
||||||
return nil
|
return nil
|
||||||
|
@ -276,49 +272,18 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||||
// next will start decoding the next block from stream.
|
// next will start decoding the next block from stream.
|
||||||
func (d *frameDec) next(block *blockDec) error {
|
func (d *frameDec) next(block *blockDec) error {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
printf("decoding new block %p:%p", block, block.data)
|
println("decoding new block")
|
||||||
}
|
}
|
||||||
err := block.reset(d.rawInput, d.WindowSize)
|
err := block.reset(d.rawInput, d.WindowSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("block error:", err)
|
println("block error:", err)
|
||||||
// Signal the frame decoder we have a problem.
|
// Signal the frame decoder we have a problem.
|
||||||
d.sendErr(block, err)
|
block.sendErr(err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
block.input <- struct{}{}
|
|
||||||
if debugDecoder {
|
|
||||||
println("next block:", block)
|
|
||||||
}
|
|
||||||
d.asyncRunningMu.Lock()
|
|
||||||
defer d.asyncRunningMu.Unlock()
|
|
||||||
if !d.asyncRunning {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if block.Last {
|
|
||||||
// We indicate the frame is done by sending io.EOF
|
|
||||||
d.decoding <- block
|
|
||||||
return io.EOF
|
|
||||||
}
|
|
||||||
d.decoding <- block
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendEOF will queue an error block on the frame.
|
|
||||||
// This will cause the frame decoder to return when it encounters the block.
|
|
||||||
// Returns true if the decoder was added.
|
|
||||||
func (d *frameDec) sendErr(block *blockDec, err error) bool {
|
|
||||||
d.asyncRunningMu.Lock()
|
|
||||||
defer d.asyncRunningMu.Unlock()
|
|
||||||
if !d.asyncRunning {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
println("sending error", err.Error())
|
|
||||||
block.sendErr(err)
|
|
||||||
d.decoding <- block
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkCRC will check the checksum if the frame has one.
|
// checkCRC will check the checksum if the frame has one.
|
||||||
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
|
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
|
||||||
func (d *frameDec) checkCRC() error {
|
func (d *frameDec) checkCRC() error {
|
||||||
|
@ -340,7 +305,7 @@ func (d *frameDec) checkCRC() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(tmp[:], want) {
|
if !bytes.Equal(tmp[:], want) && !ignoreCRC {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC Check Failed:", tmp[:], "!=", want)
|
println("CRC Check Failed:", tmp[:], "!=", want)
|
||||||
}
|
}
|
||||||
|
@ -352,131 +317,13 @@ func (d *frameDec) checkCRC() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *frameDec) initAsync() {
|
|
||||||
if !d.o.lowMem && !d.SingleSegment {
|
|
||||||
// set max extra size history to 2MB.
|
|
||||||
d.history.maxSize = d.history.windowSize + maxBlockSize
|
|
||||||
}
|
|
||||||
// re-alloc if more than one extra block size.
|
|
||||||
if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
|
|
||||||
d.history.b = make([]byte, 0, d.history.maxSize)
|
|
||||||
}
|
|
||||||
if cap(d.history.b) < d.history.maxSize {
|
|
||||||
d.history.b = make([]byte, 0, d.history.maxSize)
|
|
||||||
}
|
|
||||||
if cap(d.decoding) < d.o.concurrent {
|
|
||||||
d.decoding = make(chan *blockDec, d.o.concurrent)
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
h := d.history
|
|
||||||
printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
|
|
||||||
}
|
|
||||||
d.asyncRunningMu.Lock()
|
|
||||||
d.asyncRunning = true
|
|
||||||
d.asyncRunningMu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// startDecoder will start decoding blocks and write them to the writer.
|
|
||||||
// The decoder will stop as soon as an error occurs or at end of frame.
|
|
||||||
// When the frame has finished decoding the *bufio.Reader
|
|
||||||
// containing the remaining input will be sent on frameDec.frameDone.
|
|
||||||
func (d *frameDec) startDecoder(output chan decodeOutput) {
|
|
||||||
written := int64(0)
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
d.asyncRunningMu.Lock()
|
|
||||||
d.asyncRunning = false
|
|
||||||
d.asyncRunningMu.Unlock()
|
|
||||||
|
|
||||||
// Drain the currently decoding.
|
|
||||||
d.history.error = true
|
|
||||||
flushdone:
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case b := <-d.decoding:
|
|
||||||
b.history <- &d.history
|
|
||||||
output <- <-b.result
|
|
||||||
default:
|
|
||||||
break flushdone
|
|
||||||
}
|
|
||||||
}
|
|
||||||
println("frame decoder done, signalling done")
|
|
||||||
d.frameDone.Done()
|
|
||||||
}()
|
|
||||||
// Get decoder for first block.
|
|
||||||
block := <-d.decoding
|
|
||||||
block.history <- &d.history
|
|
||||||
for {
|
|
||||||
var next *blockDec
|
|
||||||
// Get result
|
|
||||||
r := <-block.result
|
|
||||||
if r.err != nil {
|
|
||||||
println("Result contained error", r.err)
|
|
||||||
output <- r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
|
|
||||||
d.offset += int64(len(r.b))
|
|
||||||
}
|
|
||||||
if !block.Last {
|
|
||||||
// Send history to next block
|
|
||||||
select {
|
|
||||||
case next = <-d.decoding:
|
|
||||||
if debugDecoder {
|
|
||||||
println("Sending ", len(d.history.b), "bytes as history")
|
|
||||||
}
|
|
||||||
next.history <- &d.history
|
|
||||||
default:
|
|
||||||
// Wait until we have sent the block, so
|
|
||||||
// other decoders can potentially get the decoder.
|
|
||||||
next = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add checksum, async to decoding.
|
|
||||||
if d.HasCheckSum {
|
|
||||||
n, err := d.crc.Write(r.b)
|
|
||||||
if err != nil {
|
|
||||||
r.err = err
|
|
||||||
if n != len(r.b) {
|
|
||||||
r.err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
output <- r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
written += int64(len(r.b))
|
|
||||||
if d.SingleSegment && uint64(written) > d.FrameContentSize {
|
|
||||||
println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
|
|
||||||
r.err = ErrFrameSizeExceeded
|
|
||||||
output <- r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if block.Last {
|
|
||||||
r.err = d.checkCRC()
|
|
||||||
output <- r
|
|
||||||
return
|
|
||||||
}
|
|
||||||
output <- r
|
|
||||||
if next == nil {
|
|
||||||
// There was no decoder available, we wait for one now that we have sent to the writer.
|
|
||||||
if debugDecoder {
|
|
||||||
println("Sending ", len(d.history.b), " bytes as history")
|
|
||||||
}
|
|
||||||
next = <-d.decoding
|
|
||||||
next.history <- &d.history
|
|
||||||
}
|
|
||||||
block = next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// runDecoder will create a sync decoder that will decode a block of data.
|
// runDecoder will create a sync decoder that will decode a block of data.
|
||||||
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||||
saved := d.history.b
|
saved := d.history.b
|
||||||
|
|
||||||
// We use the history for output to avoid copying it.
|
// We use the history for output to avoid copying it.
|
||||||
d.history.b = dst
|
d.history.b = dst
|
||||||
|
d.history.ignoreBuffer = len(dst)
|
||||||
// Store input length, so we only check new data.
|
// Store input length, so we only check new data.
|
||||||
crcStart := len(dst)
|
crcStart := len(dst)
|
||||||
var err error
|
var err error
|
||||||
|
@ -489,22 +336,30 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||||
println("next block:", dec)
|
println("next block:", dec)
|
||||||
}
|
}
|
||||||
err = dec.decodeBuf(&d.history)
|
err = dec.decodeBuf(&d.history)
|
||||||
if err != nil || dec.Last {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
if uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
||||||
err = ErrDecoderSizeExceeded
|
err = ErrDecoderSizeExceeded
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
|
||||||
println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
|
println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
|
||||||
err = ErrFrameSizeExceeded
|
err = ErrFrameSizeExceeded
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
if dec.Last {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if debugDecoder {
|
||||||
|
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
dst = d.history.b
|
dst = d.history.b
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if d.HasCheckSum {
|
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
|
||||||
|
err = ErrFrameSizeMismatch
|
||||||
|
} else if d.HasCheckSum {
|
||||||
var n int
|
var n int
|
||||||
n, err = d.crc.Write(dst[crcStart:])
|
n, err = d.crc.Write(dst[crcStart:])
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
|
11
vendor/github.com/klauspost/compress/zstd/fuzz.go
generated
vendored
Normal file
11
vendor/github.com/klauspost/compress/zstd/fuzz.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
//go:build ignorecrc
|
||||||
|
// +build ignorecrc
|
||||||
|
|
||||||
|
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||||
|
// License information can be found in the LICENSE file.
|
||||||
|
// Based on work by Yann Collet, released under BSD License.
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
// ignoreCRC can be used for fuzz testing to ignore CRC values...
|
||||||
|
const ignoreCRC = true
|
11
vendor/github.com/klauspost/compress/zstd/fuzz_none.go
generated
vendored
Normal file
11
vendor/github.com/klauspost/compress/zstd/fuzz_none.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
//go:build !ignorecrc
|
||||||
|
// +build !ignorecrc
|
||||||
|
|
||||||
|
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||||
|
// License information can be found in the LICENSE file.
|
||||||
|
// Based on work by Yann Collet, released under BSD License.
|
||||||
|
|
||||||
|
package zstd
|
||||||
|
|
||||||
|
// ignoreCRC can be used for fuzz testing to ignore CRC values...
|
||||||
|
const ignoreCRC = false
|
46
vendor/github.com/klauspost/compress/zstd/history.go
generated
vendored
46
vendor/github.com/klauspost/compress/zstd/history.go
generated
vendored
|
@ -10,20 +10,31 @@ import (
|
||||||
|
|
||||||
// history contains the information transferred between blocks.
|
// history contains the information transferred between blocks.
|
||||||
type history struct {
|
type history struct {
|
||||||
b []byte
|
// Literal decompression
|
||||||
huffTree *huff0.Scratch
|
huffTree *huff0.Scratch
|
||||||
recentOffsets [3]int
|
|
||||||
|
// Sequence decompression
|
||||||
decoders sequenceDecs
|
decoders sequenceDecs
|
||||||
windowSize int
|
recentOffsets [3]int
|
||||||
maxSize int
|
|
||||||
error bool
|
// History buffer...
|
||||||
dict *dict
|
b []byte
|
||||||
|
|
||||||
|
// ignoreBuffer is meant to ignore a number of bytes
|
||||||
|
// when checking for matches in history
|
||||||
|
ignoreBuffer int
|
||||||
|
|
||||||
|
windowSize int
|
||||||
|
allocFrameBuffer int // needed?
|
||||||
|
error bool
|
||||||
|
dict *dict
|
||||||
}
|
}
|
||||||
|
|
||||||
// reset will reset the history to initial state of a frame.
|
// reset will reset the history to initial state of a frame.
|
||||||
// The history must already have been initialized to the desired size.
|
// The history must already have been initialized to the desired size.
|
||||||
func (h *history) reset() {
|
func (h *history) reset() {
|
||||||
h.b = h.b[:0]
|
h.b = h.b[:0]
|
||||||
|
h.ignoreBuffer = 0
|
||||||
h.error = false
|
h.error = false
|
||||||
h.recentOffsets = [3]int{1, 4, 8}
|
h.recentOffsets = [3]int{1, 4, 8}
|
||||||
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
|
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
|
||||||
|
@ -35,7 +46,7 @@ func (h *history) reset() {
|
||||||
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
|
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
|
||||||
fseDecoderPool.Put(f)
|
fseDecoderPool.Put(f)
|
||||||
}
|
}
|
||||||
h.decoders = sequenceDecs{}
|
h.decoders = sequenceDecs{br: h.decoders.br}
|
||||||
if h.huffTree != nil {
|
if h.huffTree != nil {
|
||||||
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
||||||
huffDecoderPool.Put(h.huffTree)
|
huffDecoderPool.Put(h.huffTree)
|
||||||
|
@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) {
|
||||||
h.decoders.litLengths = dict.llDec
|
h.decoders.litLengths = dict.llDec
|
||||||
h.decoders.offsets = dict.ofDec
|
h.decoders.offsets = dict.ofDec
|
||||||
h.decoders.matchLengths = dict.mlDec
|
h.decoders.matchLengths = dict.mlDec
|
||||||
|
h.decoders.dict = dict.content
|
||||||
h.recentOffsets = dict.offsets
|
h.recentOffsets = dict.offsets
|
||||||
h.huffTree = dict.litEnc
|
h.huffTree = dict.litEnc
|
||||||
}
|
}
|
||||||
|
@ -83,6 +95,24 @@ func (h *history) append(b []byte) {
|
||||||
copy(h.b[h.windowSize-len(b):], b)
|
copy(h.b[h.windowSize-len(b):], b)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ensureBlock will ensure there is space for at least one block...
|
||||||
|
func (h *history) ensureBlock() {
|
||||||
|
if cap(h.b) < h.allocFrameBuffer {
|
||||||
|
h.b = make([]byte, 0, h.allocFrameBuffer)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
avail := cap(h.b) - len(h.b)
|
||||||
|
if avail >= h.windowSize || avail > maxCompressedBlockSize {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Move data down so we only have window size left.
|
||||||
|
// We know we have less than window size in b at this point.
|
||||||
|
discard := len(h.b) - h.windowSize
|
||||||
|
copy(h.b, h.b[discard:])
|
||||||
|
h.b = h.b[:h.windowSize]
|
||||||
|
}
|
||||||
|
|
||||||
// append bytes to history without ever discarding anything.
|
// append bytes to history without ever discarding anything.
|
||||||
func (h *history) appendKeep(b []byte) {
|
func (h *history) appendKeep(b []byte) {
|
||||||
h.b = append(h.b, b...)
|
h.b = append(h.b, b...)
|
||||||
|
|
349
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
349
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
|
@ -20,6 +20,10 @@ type seq struct {
|
||||||
llCode, mlCode, ofCode uint8
|
llCode, mlCode, ofCode uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type seqVals struct {
|
||||||
|
ll, ml, mo int
|
||||||
|
}
|
||||||
|
|
||||||
func (s seq) String() string {
|
func (s seq) String() string {
|
||||||
if s.offset <= 3 {
|
if s.offset <= 3 {
|
||||||
if s.offset == 0 {
|
if s.offset == 0 {
|
||||||
|
@ -61,16 +65,18 @@ type sequenceDecs struct {
|
||||||
offsets sequenceDec
|
offsets sequenceDec
|
||||||
matchLengths sequenceDec
|
matchLengths sequenceDec
|
||||||
prevOffset [3]int
|
prevOffset [3]int
|
||||||
hist []byte
|
|
||||||
dict []byte
|
dict []byte
|
||||||
literals []byte
|
literals []byte
|
||||||
out []byte
|
out []byte
|
||||||
|
nSeqs int
|
||||||
|
br *bitReader
|
||||||
|
seqSize int
|
||||||
windowSize int
|
windowSize int
|
||||||
maxBits uint8
|
maxBits uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize all 3 decoders from the stream input.
|
// initialize all 3 decoders from the stream input.
|
||||||
func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
|
func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error {
|
||||||
if err := s.litLengths.init(br); err != nil {
|
if err := s.litLengths.init(br); err != nil {
|
||||||
return errors.New("litLengths:" + err.Error())
|
return errors.New("litLengths:" + err.Error())
|
||||||
}
|
}
|
||||||
|
@ -80,8 +86,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
|
||||||
if err := s.matchLengths.init(br); err != nil {
|
if err := s.matchLengths.init(br); err != nil {
|
||||||
return errors.New("matchLengths:" + err.Error())
|
return errors.New("matchLengths:" + err.Error())
|
||||||
}
|
}
|
||||||
s.literals = literals
|
s.br = br
|
||||||
s.hist = hist.b
|
|
||||||
s.prevOffset = hist.recentOffsets
|
s.prevOffset = hist.recentOffsets
|
||||||
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
|
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
|
||||||
s.windowSize = hist.windowSize
|
s.windowSize = hist.windowSize
|
||||||
|
@ -94,11 +99,261 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
|
||||||
}
|
}
|
||||||
|
|
||||||
// decode sequences from the stream with the provided history.
|
// decode sequences from the stream with the provided history.
|
||||||
func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||||
|
br := s.br
|
||||||
|
|
||||||
|
// Grab full sizes tables, to avoid bounds checks.
|
||||||
|
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||||
|
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||||
|
s.seqSize = 0
|
||||||
|
litRemain := len(s.literals)
|
||||||
|
maxBlockSize := maxCompressedBlockSize
|
||||||
|
if s.windowSize < maxBlockSize {
|
||||||
|
maxBlockSize = s.windowSize
|
||||||
|
}
|
||||||
|
for i := range seqs {
|
||||||
|
var ll, mo, ml int
|
||||||
|
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||||
|
// inlined function:
|
||||||
|
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||||
|
|
||||||
|
// Final will not read from stream.
|
||||||
|
var llB, mlB, moB uint8
|
||||||
|
ll, llB = llState.final()
|
||||||
|
ml, mlB = mlState.final()
|
||||||
|
mo, moB = ofState.final()
|
||||||
|
|
||||||
|
// extra bits are stored in reverse order.
|
||||||
|
br.fillFast()
|
||||||
|
mo += br.getBits(moB)
|
||||||
|
if s.maxBits > 32 {
|
||||||
|
br.fillFast()
|
||||||
|
}
|
||||||
|
ml += br.getBits(mlB)
|
||||||
|
ll += br.getBits(llB)
|
||||||
|
|
||||||
|
if moB > 1 {
|
||||||
|
s.prevOffset[2] = s.prevOffset[1]
|
||||||
|
s.prevOffset[1] = s.prevOffset[0]
|
||||||
|
s.prevOffset[0] = mo
|
||||||
|
} else {
|
||||||
|
// mo = s.adjustOffset(mo, ll, moB)
|
||||||
|
// Inlined for rather big speedup
|
||||||
|
if ll == 0 {
|
||||||
|
// There is an exception though, when current sequence's literals_length = 0.
|
||||||
|
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
|
||||||
|
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
|
||||||
|
mo++
|
||||||
|
}
|
||||||
|
|
||||||
|
if mo == 0 {
|
||||||
|
mo = s.prevOffset[0]
|
||||||
|
} else {
|
||||||
|
var temp int
|
||||||
|
if mo == 3 {
|
||||||
|
temp = s.prevOffset[0] - 1
|
||||||
|
} else {
|
||||||
|
temp = s.prevOffset[mo]
|
||||||
|
}
|
||||||
|
|
||||||
|
if temp == 0 {
|
||||||
|
// 0 is not valid; input is corrupted; force offset to 1
|
||||||
|
println("WARNING: temp was 0")
|
||||||
|
temp = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if mo != 1 {
|
||||||
|
s.prevOffset[2] = s.prevOffset[1]
|
||||||
|
}
|
||||||
|
s.prevOffset[1] = s.prevOffset[0]
|
||||||
|
s.prevOffset[0] = temp
|
||||||
|
mo = temp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
br.fillFast()
|
||||||
|
} else {
|
||||||
|
if br.overread() {
|
||||||
|
if debugDecoder {
|
||||||
|
printf("reading sequence %d, exceeded available data\n", i)
|
||||||
|
}
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
ll, mo, ml = s.next(br, llState, mlState, ofState)
|
||||||
|
br.fill()
|
||||||
|
}
|
||||||
|
|
||||||
|
if debugSequences {
|
||||||
|
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
|
||||||
|
}
|
||||||
|
// Evaluate.
|
||||||
|
// We might be doing this async, so do it early.
|
||||||
|
if mo == 0 && ml > 0 {
|
||||||
|
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||||
|
}
|
||||||
|
if ml > maxMatchLen {
|
||||||
|
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||||
|
}
|
||||||
|
s.seqSize += ll + ml
|
||||||
|
if s.seqSize > maxBlockSize {
|
||||||
|
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||||
|
}
|
||||||
|
litRemain -= ll
|
||||||
|
if litRemain < 0 {
|
||||||
|
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
|
||||||
|
}
|
||||||
|
seqs[i] = seqVals{
|
||||||
|
ll: ll,
|
||||||
|
ml: ml,
|
||||||
|
mo: mo,
|
||||||
|
}
|
||||||
|
if i == len(seqs)-1 {
|
||||||
|
// This is the last sequence, so we shouldn't update state.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Manually inlined, ~ 5-20% faster
|
||||||
|
// Update all 3 states at once. Approx 20% faster.
|
||||||
|
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
|
||||||
|
if nBits == 0 {
|
||||||
|
llState = llTable[llState.newState()&maxTableMask]
|
||||||
|
mlState = mlTable[mlState.newState()&maxTableMask]
|
||||||
|
ofState = ofTable[ofState.newState()&maxTableMask]
|
||||||
|
} else {
|
||||||
|
bits := br.get32BitsFast(nBits)
|
||||||
|
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
||||||
|
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
||||||
|
|
||||||
|
lowBits = uint16(bits >> (ofState.nbBits() & 31))
|
||||||
|
lowBits &= bitMask[mlState.nbBits()&15]
|
||||||
|
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
|
||||||
|
|
||||||
|
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
|
||||||
|
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.seqSize += litRemain
|
||||||
|
if s.seqSize > maxBlockSize {
|
||||||
|
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||||
|
}
|
||||||
|
err := br.close()
|
||||||
|
if err != nil {
|
||||||
|
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// execute will execute the decoded sequence with the provided history.
|
||||||
|
// The sequence must be evaluated before being sent.
|
||||||
|
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
||||||
|
// Ensure we have enough output size...
|
||||||
|
if len(s.out)+s.seqSize > cap(s.out) {
|
||||||
|
addBytes := s.seqSize + len(s.out)
|
||||||
|
s.out = append(s.out, make([]byte, addBytes)...)
|
||||||
|
s.out = s.out[:len(s.out)-addBytes]
|
||||||
|
}
|
||||||
|
|
||||||
|
if debugDecoder {
|
||||||
|
printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var t = len(s.out)
|
||||||
|
out := s.out[:t+s.seqSize]
|
||||||
|
|
||||||
|
for _, seq := range seqs {
|
||||||
|
// Add literals
|
||||||
|
copy(out[t:], s.literals[:seq.ll])
|
||||||
|
t += seq.ll
|
||||||
|
s.literals = s.literals[seq.ll:]
|
||||||
|
|
||||||
|
// Copy from dictionary...
|
||||||
|
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
|
||||||
|
if len(s.dict) == 0 {
|
||||||
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// we may be in dictionary.
|
||||||
|
dictO := len(s.dict) - (seq.mo - (t + len(hist)))
|
||||||
|
if dictO < 0 || dictO >= len(s.dict) {
|
||||||
|
return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict))
|
||||||
|
}
|
||||||
|
end := dictO + seq.ml
|
||||||
|
if end > len(s.dict) {
|
||||||
|
n := len(s.dict) - dictO
|
||||||
|
copy(out[t:], s.dict[dictO:])
|
||||||
|
t += n
|
||||||
|
seq.ml -= n
|
||||||
|
} else {
|
||||||
|
copy(out[t:], s.dict[dictO:end])
|
||||||
|
t += end - dictO
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy from history.
|
||||||
|
if v := seq.mo - t; v > 0 {
|
||||||
|
// v is the start position in history from end.
|
||||||
|
start := len(hist) - v
|
||||||
|
if seq.ml > v {
|
||||||
|
// Some goes into current block.
|
||||||
|
// Copy remainder of history
|
||||||
|
copy(out[t:], hist[start:])
|
||||||
|
t += v
|
||||||
|
seq.ml -= v
|
||||||
|
} else {
|
||||||
|
copy(out[t:], hist[start:start+seq.ml])
|
||||||
|
t += seq.ml
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We must be in current buffer now
|
||||||
|
if seq.ml > 0 {
|
||||||
|
start := t - seq.mo
|
||||||
|
if seq.ml <= t-start {
|
||||||
|
// No overlap
|
||||||
|
copy(out[t:], out[start:start+seq.ml])
|
||||||
|
t += seq.ml
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
// Overlapping copy
|
||||||
|
// Extend destination slice and copy one byte at the time.
|
||||||
|
src := out[start : start+seq.ml]
|
||||||
|
dst := out[t:]
|
||||||
|
dst = dst[:len(src)]
|
||||||
|
t += len(src)
|
||||||
|
// Destination is the space we just added.
|
||||||
|
for i := range src {
|
||||||
|
dst[i] = src[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Add final literals
|
||||||
|
copy(out[t:], s.literals)
|
||||||
|
if debugDecoder {
|
||||||
|
t += len(s.literals)
|
||||||
|
if t != len(out) {
|
||||||
|
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.out = out
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode sequences from the stream with the provided history.
|
||||||
|
func (s *sequenceDecs) decodeSync(history *history) error {
|
||||||
|
br := s.br
|
||||||
|
seqs := s.nSeqs
|
||||||
startSize := len(s.out)
|
startSize := len(s.out)
|
||||||
// Grab full sizes tables, to avoid bounds checks.
|
// Grab full sizes tables, to avoid bounds checks.
|
||||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||||
|
hist := history.b[history.ignoreBuffer:]
|
||||||
|
out := s.out
|
||||||
|
maxBlockSize := maxCompressedBlockSize
|
||||||
|
if s.windowSize < maxBlockSize {
|
||||||
|
maxBlockSize = s.windowSize
|
||||||
|
}
|
||||||
|
|
||||||
for i := seqs - 1; i >= 0; i-- {
|
for i := seqs - 1; i >= 0; i-- {
|
||||||
if br.overread() {
|
if br.overread() {
|
||||||
|
@ -151,7 +406,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
|
|
||||||
if temp == 0 {
|
if temp == 0 {
|
||||||
// 0 is not valid; input is corrupted; force offset to 1
|
// 0 is not valid; input is corrupted; force offset to 1
|
||||||
println("temp was 0")
|
println("WARNING: temp was 0")
|
||||||
temp = 1
|
temp = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,51 +431,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
if ll > len(s.literals) {
|
if ll > len(s.literals) {
|
||||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
|
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
|
||||||
}
|
}
|
||||||
size := ll + ml + len(s.out)
|
size := ll + ml + len(out)
|
||||||
if size-startSize > maxBlockSize {
|
if size-startSize > maxBlockSize {
|
||||||
return fmt.Errorf("output (%d) bigger than max block size", size)
|
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
|
||||||
}
|
}
|
||||||
if size > cap(s.out) {
|
if size > cap(out) {
|
||||||
// Not enough size, which can happen under high volume block streaming conditions
|
// Not enough size, which can happen under high volume block streaming conditions
|
||||||
// but could be if destination slice is too small for sync operations.
|
// but could be if destination slice is too small for sync operations.
|
||||||
// over-allocating here can create a large amount of GC pressure so we try to keep
|
// over-allocating here can create a large amount of GC pressure so we try to keep
|
||||||
// it as contained as possible
|
// it as contained as possible
|
||||||
used := len(s.out) - startSize
|
used := len(out) - startSize
|
||||||
addBytes := 256 + ll + ml + used>>2
|
addBytes := 256 + ll + ml + used>>2
|
||||||
// Clamp to max block size.
|
// Clamp to max block size.
|
||||||
if used+addBytes > maxBlockSize {
|
if used+addBytes > maxBlockSize {
|
||||||
addBytes = maxBlockSize - used
|
addBytes = maxBlockSize - used
|
||||||
}
|
}
|
||||||
s.out = append(s.out, make([]byte, addBytes)...)
|
out = append(out, make([]byte, addBytes)...)
|
||||||
s.out = s.out[:len(s.out)-addBytes]
|
out = out[:len(out)-addBytes]
|
||||||
}
|
}
|
||||||
if ml > maxMatchLen {
|
if ml > maxMatchLen {
|
||||||
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add literals
|
// Add literals
|
||||||
s.out = append(s.out, s.literals[:ll]...)
|
out = append(out, s.literals[:ll]...)
|
||||||
s.literals = s.literals[ll:]
|
s.literals = s.literals[ll:]
|
||||||
out := s.out
|
|
||||||
|
|
||||||
if mo == 0 && ml > 0 {
|
if mo == 0 && ml > 0 {
|
||||||
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||||
}
|
}
|
||||||
|
|
||||||
if mo > len(s.out)+len(hist) || mo > s.windowSize {
|
if mo > len(out)+len(hist) || mo > s.windowSize {
|
||||||
if len(s.dict) == 0 {
|
if len(s.dict) == 0 {
|
||||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
||||||
}
|
}
|
||||||
|
|
||||||
// we may be in dictionary.
|
// we may be in dictionary.
|
||||||
dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
|
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
|
||||||
if dictO < 0 || dictO >= len(s.dict) {
|
if dictO < 0 || dictO >= len(s.dict) {
|
||||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
|
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
||||||
}
|
}
|
||||||
end := dictO + ml
|
end := dictO + ml
|
||||||
if end > len(s.dict) {
|
if end > len(s.dict) {
|
||||||
out = append(out, s.dict[dictO:]...)
|
out = append(out, s.dict[dictO:]...)
|
||||||
mo -= len(s.dict) - dictO
|
|
||||||
ml -= len(s.dict) - dictO
|
ml -= len(s.dict) - dictO
|
||||||
} else {
|
} else {
|
||||||
out = append(out, s.dict[dictO:end]...)
|
out = append(out, s.dict[dictO:end]...)
|
||||||
|
@ -231,26 +484,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
|
|
||||||
// Copy from history.
|
// Copy from history.
|
||||||
// TODO: Blocks without history could be made to ignore this completely.
|
// TODO: Blocks without history could be made to ignore this completely.
|
||||||
if v := mo - len(s.out); v > 0 {
|
if v := mo - len(out); v > 0 {
|
||||||
// v is the start position in history from end.
|
// v is the start position in history from end.
|
||||||
start := len(s.hist) - v
|
start := len(hist) - v
|
||||||
if ml > v {
|
if ml > v {
|
||||||
// Some goes into current block.
|
// Some goes into current block.
|
||||||
// Copy remainder of history
|
// Copy remainder of history
|
||||||
out = append(out, s.hist[start:]...)
|
out = append(out, hist[start:]...)
|
||||||
mo -= v
|
|
||||||
ml -= v
|
ml -= v
|
||||||
} else {
|
} else {
|
||||||
out = append(out, s.hist[start:start+ml]...)
|
out = append(out, hist[start:start+ml]...)
|
||||||
ml = 0
|
ml = 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// We must be in current buffer now
|
// We must be in current buffer now
|
||||||
if ml > 0 {
|
if ml > 0 {
|
||||||
start := len(s.out) - mo
|
start := len(out) - mo
|
||||||
if ml <= len(s.out)-start {
|
if ml <= len(out)-start {
|
||||||
// No overlap
|
// No overlap
|
||||||
out = append(out, s.out[start:start+ml]...)
|
out = append(out, out[start:start+ml]...)
|
||||||
} else {
|
} else {
|
||||||
// Overlapping copy
|
// Overlapping copy
|
||||||
// Extend destination slice and copy one byte at the time.
|
// Extend destination slice and copy one byte at the time.
|
||||||
|
@ -264,7 +516,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.out = out
|
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
// This is the last sequence, so we shouldn't update state.
|
// This is the last sequence, so we shouldn't update state.
|
||||||
break
|
break
|
||||||
|
@ -291,9 +542,14 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if space for literals
|
||||||
|
if len(s.literals)+len(s.out)-startSize > maxBlockSize {
|
||||||
|
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
|
||||||
|
}
|
||||||
|
|
||||||
// Add final literals
|
// Add final literals
|
||||||
s.out = append(s.out, s.literals...)
|
s.out = append(out, s.literals...)
|
||||||
return nil
|
return br.close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// update states, at least 27 bits must be available.
|
// update states, at least 27 bits must be available.
|
||||||
|
@ -457,36 +713,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
|
||||||
s.prevOffset[0] = temp
|
s.prevOffset[0] = temp
|
||||||
return temp
|
return temp
|
||||||
}
|
}
|
||||||
|
|
||||||
// mergeHistory will merge history.
|
|
||||||
func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
|
|
||||||
for i := uint(0); i < 3; i++ {
|
|
||||||
var sNew, sHist *sequenceDec
|
|
||||||
switch i {
|
|
||||||
default:
|
|
||||||
// same as "case 0":
|
|
||||||
sNew = &s.litLengths
|
|
||||||
sHist = &hist.litLengths
|
|
||||||
case 1:
|
|
||||||
sNew = &s.offsets
|
|
||||||
sHist = &hist.offsets
|
|
||||||
case 2:
|
|
||||||
sNew = &s.matchLengths
|
|
||||||
sHist = &hist.matchLengths
|
|
||||||
}
|
|
||||||
if sNew.repeat {
|
|
||||||
if sHist.fse == nil {
|
|
||||||
return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if sNew.fse == nil {
|
|
||||||
return nil, fmt.Errorf("sequence stream %d, no fse found", i)
|
|
||||||
}
|
|
||||||
if sHist.fse != nil && !sHist.fse.preDefined {
|
|
||||||
fseDecoderPool.Put(sHist.fse)
|
|
||||||
}
|
|
||||||
sHist.fse = sNew.fse
|
|
||||||
}
|
|
||||||
return hist, nil
|
|
||||||
}
|
|
||||||
|
|
18
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
18
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
|
@ -20,7 +20,7 @@ const ZipMethodPKWare = 20
|
||||||
|
|
||||||
var zipReaderPool sync.Pool
|
var zipReaderPool sync.Pool
|
||||||
|
|
||||||
// newZipReader cannot be used since we would leak goroutines...
|
// newZipReader creates a pooled zip decompressor.
|
||||||
func newZipReader(r io.Reader) io.ReadCloser {
|
func newZipReader(r io.Reader) io.ReadCloser {
|
||||||
dec, ok := zipReaderPool.Get().(*Decoder)
|
dec, ok := zipReaderPool.Get().(*Decoder)
|
||||||
if ok {
|
if ok {
|
||||||
|
@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
|
||||||
r.mu.Lock()
|
r.mu.Lock()
|
||||||
defer r.mu.Unlock()
|
defer r.mu.Unlock()
|
||||||
if r.dec == nil {
|
if r.dec == nil {
|
||||||
return 0, errors.New("Read after Close")
|
return 0, errors.New("read after close or EOF")
|
||||||
}
|
}
|
||||||
dec, err := r.dec.Read(p)
|
dec, err := r.dec.Read(p)
|
||||||
|
if err == io.EOF {
|
||||||
|
err = r.dec.Reset(nil)
|
||||||
|
zipReaderPool.Put(r.dec)
|
||||||
|
r.dec = nil
|
||||||
|
}
|
||||||
return dec, err
|
return dec, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
|
||||||
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
|
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
|
||||||
// See ZipCompressor for example.
|
// See ZipCompressor for example.
|
||||||
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
|
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
|
||||||
return func(r io.Reader) io.ReadCloser {
|
return newZipReader
|
||||||
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return d.IOReadCloser()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
11
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
11
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
|
@ -39,6 +39,9 @@ const zstdMinMatch = 3
|
||||||
// Reset the buffer offset when reaching this.
|
// Reset the buffer offset when reaching this.
|
||||||
const bufferReset = math.MaxInt32 - MaxWindowSize
|
const bufferReset = math.MaxInt32 - MaxWindowSize
|
||||||
|
|
||||||
|
// fcsUnknown is used for unknown frame content size.
|
||||||
|
const fcsUnknown = math.MaxUint64
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrReservedBlockType is returned when a reserved block type is found.
|
// ErrReservedBlockType is returned when a reserved block type is found.
|
||||||
// Typically this indicates wrong or corrupted input.
|
// Typically this indicates wrong or corrupted input.
|
||||||
|
@ -52,6 +55,10 @@ var (
|
||||||
// Typically returned on invalid input.
|
// Typically returned on invalid input.
|
||||||
ErrBlockTooSmall = errors.New("block too small")
|
ErrBlockTooSmall = errors.New("block too small")
|
||||||
|
|
||||||
|
// ErrUnexpectedBlockSize is returned when a block has unexpected size.
|
||||||
|
// Typically returned on invalid input.
|
||||||
|
ErrUnexpectedBlockSize = errors.New("unexpected block size")
|
||||||
|
|
||||||
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
|
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
|
||||||
// Typically this indicates wrong or corrupted input.
|
// Typically this indicates wrong or corrupted input.
|
||||||
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
|
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
|
||||||
|
@ -75,6 +82,10 @@ var (
|
||||||
// This is only returned if SingleSegment is specified on the frame.
|
// This is only returned if SingleSegment is specified on the frame.
|
||||||
ErrFrameSizeExceeded = errors.New("frame size exceeded")
|
ErrFrameSizeExceeded = errors.New("frame size exceeded")
|
||||||
|
|
||||||
|
// ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size.
|
||||||
|
// This is only returned if SingleSegment is specified on the frame.
|
||||||
|
ErrFrameSizeMismatch = errors.New("frame size does not match size on stream")
|
||||||
|
|
||||||
// ErrCRCMismatch is returned if CRC mismatches.
|
// ErrCRCMismatch is returned if CRC mismatches.
|
||||||
ErrCRCMismatch = errors.New("CRC check failed")
|
ErrCRCMismatch = errors.New("CRC check failed")
|
||||||
|
|
||||||
|
|
2
vendor/github.com/urfave/cli/v2/.gitignore
generated
vendored
2
vendor/github.com/urfave/cli/v2/.gitignore
generated
vendored
|
@ -5,3 +5,5 @@ vendor
|
||||||
.idea
|
.idea
|
||||||
internal/*/built-example
|
internal/*/built-example
|
||||||
coverage.txt
|
coverage.txt
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
|
2
vendor/github.com/urfave/cli/v2/README.md
generated
vendored
2
vendor/github.com/urfave/cli/v2/README.md
generated
vendored
|
@ -1,7 +1,7 @@
|
||||||
cli
|
cli
|
||||||
===
|
===
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://godoc.org/github.com/urfave/cli)
|
[![GoDoc](https://godoc.org/github.com/urfave/cli?status.svg)](https://pkg.go.dev/github.com/urfave/cli/v2)
|
||||||
[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli)
|
[![codebeat](https://codebeat.co/badges/0a8f30aa-f975-404b-b878-5fab3ae1cc5f)](https://codebeat.co/projects/github-com-urfave-cli)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli)
|
[![Go Report Card](https://goreportcard.com/badge/urfave/cli)](https://goreportcard.com/report/urfave/cli)
|
||||||
[![codecov](https://codecov.io/gh/urfave/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/cli)
|
[![codecov](https://codecov.io/gh/urfave/cli/branch/master/graph/badge.svg)](https://codecov.io/gh/urfave/cli)
|
||||||
|
|
8
vendor/github.com/urfave/cli/v2/app.go
generated
vendored
8
vendor/github.com/urfave/cli/v2/app.go
generated
vendored
|
@ -140,7 +140,7 @@ func (a *App) Setup() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.HelpName == "" {
|
if a.HelpName == "" {
|
||||||
a.HelpName = filepath.Base(os.Args[0])
|
a.HelpName = a.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.Usage == "" {
|
if a.Usage == "" {
|
||||||
|
@ -278,7 +278,7 @@ func (a *App) RunContext(ctx context.Context, arguments []string) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cerr := checkRequiredFlags(a.Flags, context)
|
cerr := context.checkRequiredFlags(a.Flags)
|
||||||
if cerr != nil {
|
if cerr != nil {
|
||||||
_ = ShowAppHelp(context)
|
_ = ShowAppHelp(context)
|
||||||
return cerr
|
return cerr
|
||||||
|
@ -328,7 +328,7 @@ func (a *App) RunContext(ctx context.Context, arguments []string) (err error) {
|
||||||
// RunAndExitOnError calls .Run() and exits non-zero if an error was returned
|
// RunAndExitOnError calls .Run() and exits non-zero if an error was returned
|
||||||
//
|
//
|
||||||
// Deprecated: instead you should return an error that fulfills cli.ExitCoder
|
// Deprecated: instead you should return an error that fulfills cli.ExitCoder
|
||||||
// to cli.App.Run. This will cause the application to exit with the given eror
|
// to cli.App.Run. This will cause the application to exit with the given error
|
||||||
// code in the cli.ExitCoder
|
// code in the cli.ExitCoder
|
||||||
func (a *App) RunAndExitOnError() {
|
func (a *App) RunAndExitOnError() {
|
||||||
if err := a.Run(os.Args); err != nil {
|
if err := a.Run(os.Args); err != nil {
|
||||||
|
@ -397,7 +397,7 @@ func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cerr := checkRequiredFlags(a.Flags, context)
|
cerr := context.checkRequiredFlags(a.Flags)
|
||||||
if cerr != nil {
|
if cerr != nil {
|
||||||
_ = ShowSubcommandHelp(context)
|
_ = ShowSubcommandHelp(context)
|
||||||
return cerr
|
return cerr
|
||||||
|
|
4
vendor/github.com/urfave/cli/v2/command.go
generated
vendored
4
vendor/github.com/urfave/cli/v2/command.go
generated
vendored
|
@ -127,7 +127,7 @@ func (c *Command) Run(ctx *Context) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cerr := checkRequiredFlags(c.Flags, context)
|
cerr := context.checkRequiredFlags(c.Flags)
|
||||||
if cerr != nil {
|
if cerr != nil {
|
||||||
_ = ShowCommandHelp(context, c.Name)
|
_ = ShowCommandHelp(context, c.Name)
|
||||||
return cerr
|
return cerr
|
||||||
|
@ -227,6 +227,7 @@ func (c *Command) startApp(ctx *Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Usage = c.Usage
|
app.Usage = c.Usage
|
||||||
|
app.UsageText = c.UsageText
|
||||||
app.Description = c.Description
|
app.Description = c.Description
|
||||||
app.ArgsUsage = c.ArgsUsage
|
app.ArgsUsage = c.ArgsUsage
|
||||||
|
|
||||||
|
@ -243,6 +244,7 @@ func (c *Command) startApp(ctx *Context) error {
|
||||||
app.Version = ctx.App.Version
|
app.Version = ctx.App.Version
|
||||||
app.HideVersion = true
|
app.HideVersion = true
|
||||||
app.Compiled = ctx.App.Compiled
|
app.Compiled = ctx.App.Compiled
|
||||||
|
app.Reader = ctx.App.Reader
|
||||||
app.Writer = ctx.App.Writer
|
app.Writer = ctx.App.Writer
|
||||||
app.ErrWriter = ctx.App.ErrWriter
|
app.ErrWriter = ctx.App.ErrWriter
|
||||||
app.ExitErrHandler = ctx.App.ExitErrHandler
|
app.ExitErrHandler = ctx.App.ExitErrHandler
|
||||||
|
|
136
vendor/github.com/urfave/cli/v2/context.go
generated
vendored
136
vendor/github.com/urfave/cli/v2/context.go
generated
vendored
|
@ -2,9 +2,7 @@ package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -53,20 +51,18 @@ func (c *Context) Set(name, value string) error {
|
||||||
|
|
||||||
// IsSet determines if the flag was actually set
|
// IsSet determines if the flag was actually set
|
||||||
func (c *Context) IsSet(name string) bool {
|
func (c *Context) IsSet(name string) bool {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
isSet := false
|
||||||
isSet := false
|
fs.Visit(func(f *flag.Flag) {
|
||||||
fs.Visit(func(f *flag.Flag) {
|
if f.Name == name {
|
||||||
if f.Name == name {
|
isSet = true
|
||||||
isSet = true
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if isSet {
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
if isSet {
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
f := lookupFlag(name, c)
|
f := c.lookupFlag(name)
|
||||||
if f == nil {
|
if f == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -108,7 +104,10 @@ func (c *Context) Lineage() []*Context {
|
||||||
|
|
||||||
// Value returns the value of the flag corresponding to `name`
|
// Value returns the value of the flag corresponding to `name`
|
||||||
func (c *Context) Value(name string) interface{} {
|
func (c *Context) Value(name string) interface{} {
|
||||||
return c.flagSet.Lookup(name).Value.(flag.Getter).Get()
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
|
return fs.Lookup(name).Value.(flag.Getter).Get()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Args returns the command line arguments associated with the context.
|
// Args returns the command line arguments associated with the context.
|
||||||
|
@ -122,7 +121,7 @@ func (c *Context) NArg() int {
|
||||||
return c.Args().Len()
|
return c.Args().Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookupFlag(name string, ctx *Context) Flag {
|
func (ctx *Context) lookupFlag(name string) Flag {
|
||||||
for _, c := range ctx.Lineage() {
|
for _, c := range ctx.Lineage() {
|
||||||
if c.Command == nil {
|
if c.Command == nil {
|
||||||
continue
|
continue
|
||||||
|
@ -150,8 +149,11 @@ func lookupFlag(name string, ctx *Context) Flag {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookupFlagSet(name string, ctx *Context) *flag.FlagSet {
|
func (ctx *Context) lookupFlagSet(name string) *flag.FlagSet {
|
||||||
for _, c := range ctx.Lineage() {
|
for _, c := range ctx.Lineage() {
|
||||||
|
if c.flagSet == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if f := c.flagSet.Lookup(name); f != nil {
|
if f := c.flagSet.Lookup(name); f != nil {
|
||||||
return c.flagSet
|
return c.flagSet
|
||||||
}
|
}
|
||||||
|
@ -160,89 +162,7 @@ func lookupFlagSet(name string, ctx *Context) *flag.FlagSet {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) {
|
func (context *Context) checkRequiredFlags(flags []Flag) requiredFlagsErr {
|
||||||
switch ff.Value.(type) {
|
|
||||||
case Serializer:
|
|
||||||
_ = set.Set(name, ff.Value.(Serializer).Serialize())
|
|
||||||
default:
|
|
||||||
_ = set.Set(name, ff.Value.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func normalizeFlags(flags []Flag, set *flag.FlagSet) error {
|
|
||||||
visited := make(map[string]bool)
|
|
||||||
set.Visit(func(f *flag.Flag) {
|
|
||||||
visited[f.Name] = true
|
|
||||||
})
|
|
||||||
for _, f := range flags {
|
|
||||||
parts := f.Names()
|
|
||||||
if len(parts) == 1 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var ff *flag.Flag
|
|
||||||
for _, name := range parts {
|
|
||||||
name = strings.Trim(name, " ")
|
|
||||||
if visited[name] {
|
|
||||||
if ff != nil {
|
|
||||||
return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name)
|
|
||||||
}
|
|
||||||
ff = set.Lookup(name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ff == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, name := range parts {
|
|
||||||
name = strings.Trim(name, " ")
|
|
||||||
if !visited[name] {
|
|
||||||
copyFlag(name, ff, set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeFlagNameVisitor(names *[]string) func(*flag.Flag) {
|
|
||||||
return func(f *flag.Flag) {
|
|
||||||
nameParts := strings.Split(f.Name, ",")
|
|
||||||
name := strings.TrimSpace(nameParts[0])
|
|
||||||
|
|
||||||
for _, part := range nameParts {
|
|
||||||
part = strings.TrimSpace(part)
|
|
||||||
if len(part) > len(name) {
|
|
||||||
name = part
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if name != "" {
|
|
||||||
*names = append(*names, name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type requiredFlagsErr interface {
|
|
||||||
error
|
|
||||||
getMissingFlags() []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type errRequiredFlags struct {
|
|
||||||
missingFlags []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *errRequiredFlags) Error() string {
|
|
||||||
numberOfMissingFlags := len(e.missingFlags)
|
|
||||||
if numberOfMissingFlags == 1 {
|
|
||||||
return fmt.Sprintf("Required flag %q not set", e.missingFlags[0])
|
|
||||||
}
|
|
||||||
joinedMissingFlags := strings.Join(e.missingFlags, ", ")
|
|
||||||
return fmt.Sprintf("Required flags %q not set", joinedMissingFlags)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *errRequiredFlags) getMissingFlags() []string {
|
|
||||||
return e.missingFlags
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkRequiredFlags(flags []Flag, context *Context) requiredFlagsErr {
|
|
||||||
var missingFlags []string
|
var missingFlags []string
|
||||||
for _, f := range flags {
|
for _, f := range flags {
|
||||||
if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() {
|
if rf, ok := f.(RequiredFlag); ok && rf.IsRequired() {
|
||||||
|
@ -271,3 +191,21 @@ func checkRequiredFlags(flags []Flag, context *Context) requiredFlagsErr {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeFlagNameVisitor(names *[]string) func(*flag.Flag) {
|
||||||
|
return func(f *flag.Flag) {
|
||||||
|
nameParts := strings.Split(f.Name, ",")
|
||||||
|
name := strings.TrimSpace(nameParts[0])
|
||||||
|
|
||||||
|
for _, part := range nameParts {
|
||||||
|
part = strings.TrimSpace(part)
|
||||||
|
if len(part) > len(name) {
|
||||||
|
name = part
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if name != "" {
|
||||||
|
*names = append(*names, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
67
vendor/github.com/urfave/cli/v2/docs.go
generated
vendored
67
vendor/github.com/urfave/cli/v2/docs.go
generated
vendored
|
@ -15,31 +15,39 @@ import (
|
||||||
// The function errors if either parsing or writing of the string fails.
|
// The function errors if either parsing or writing of the string fails.
|
||||||
func (a *App) ToMarkdown() (string, error) {
|
func (a *App) ToMarkdown() (string, error) {
|
||||||
var w bytes.Buffer
|
var w bytes.Buffer
|
||||||
if err := a.writeDocTemplate(&w); err != nil {
|
if err := a.writeDocTemplate(&w, 0); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return w.String(), nil
|
return w.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToMan creates a man page string for the `*App`
|
// ToMan creates a man page string with section number for the `*App`
|
||||||
// The function errors if either parsing or writing of the string fails.
|
// The function errors if either parsing or writing of the string fails.
|
||||||
func (a *App) ToMan() (string, error) {
|
func (a *App) ToManWithSection(sectionNumber int) (string, error) {
|
||||||
var w bytes.Buffer
|
var w bytes.Buffer
|
||||||
if err := a.writeDocTemplate(&w); err != nil {
|
if err := a.writeDocTemplate(&w, sectionNumber); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
man := md2man.Render(w.Bytes())
|
man := md2man.Render(w.Bytes())
|
||||||
return string(man), nil
|
return string(man), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ToMan creates a man page string for the `*App`
|
||||||
|
// The function errors if either parsing or writing of the string fails.
|
||||||
|
func (a *App) ToMan() (string, error) {
|
||||||
|
man, err := a.ToManWithSection(8)
|
||||||
|
return man, err
|
||||||
|
}
|
||||||
|
|
||||||
type cliTemplate struct {
|
type cliTemplate struct {
|
||||||
App *App
|
App *App
|
||||||
|
SectionNum int
|
||||||
Commands []string
|
Commands []string
|
||||||
GlobalArgs []string
|
GlobalArgs []string
|
||||||
SynopsisArgs []string
|
SynopsisArgs []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *App) writeDocTemplate(w io.Writer) error {
|
func (a *App) writeDocTemplate(w io.Writer, sectionNum int) error {
|
||||||
const name = "cli"
|
const name = "cli"
|
||||||
t, err := template.New(name).Parse(MarkdownDocTemplate)
|
t, err := template.New(name).Parse(MarkdownDocTemplate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -47,6 +55,7 @@ func (a *App) writeDocTemplate(w io.Writer) error {
|
||||||
}
|
}
|
||||||
return t.ExecuteTemplate(w, name, &cliTemplate{
|
return t.ExecuteTemplate(w, name, &cliTemplate{
|
||||||
App: a,
|
App: a,
|
||||||
|
SectionNum: sectionNum,
|
||||||
Commands: prepareCommands(a.Commands, 0),
|
Commands: prepareCommands(a.Commands, 0),
|
||||||
GlobalArgs: prepareArgsWithValues(a.VisibleFlags()),
|
GlobalArgs: prepareArgsWithValues(a.VisibleFlags()),
|
||||||
SynopsisArgs: prepareArgsSynopsis(a.VisibleFlags()),
|
SynopsisArgs: prepareArgsSynopsis(a.VisibleFlags()),
|
||||||
|
@ -59,15 +68,16 @@ func prepareCommands(commands []*Command, level int) []string {
|
||||||
if command.Hidden {
|
if command.Hidden {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
usage := ""
|
|
||||||
if command.Usage != "" {
|
|
||||||
usage = command.Usage
|
|
||||||
}
|
|
||||||
|
|
||||||
prepared := fmt.Sprintf("%s %s\n\n%s\n",
|
usageText := prepareUsageText(command)
|
||||||
|
|
||||||
|
usage := prepareUsage(command, usageText)
|
||||||
|
|
||||||
|
prepared := fmt.Sprintf("%s %s\n\n%s%s",
|
||||||
strings.Repeat("#", level+2),
|
strings.Repeat("#", level+2),
|
||||||
strings.Join(command.Names(), ", "),
|
strings.Join(command.Names(), ", "),
|
||||||
usage,
|
usage,
|
||||||
|
usageText,
|
||||||
)
|
)
|
||||||
|
|
||||||
flags := prepareArgsWithValues(command.Flags)
|
flags := prepareArgsWithValues(command.Flags)
|
||||||
|
@ -146,3 +156,40 @@ func flagDetails(flag DocGenerationFlag) string {
|
||||||
}
|
}
|
||||||
return ": " + description
|
return ": " + description
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func prepareUsageText(command *Command) string {
|
||||||
|
if command.UsageText == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove leading and trailing newlines
|
||||||
|
preparedUsageText := strings.Trim(command.UsageText, "\n")
|
||||||
|
|
||||||
|
var usageText string
|
||||||
|
if strings.Contains(preparedUsageText, "\n") {
|
||||||
|
// Format multi-line string as a code block using the 4 space schema to allow for embedded markdown such
|
||||||
|
// that it will not break the continuous code block.
|
||||||
|
for _, ln := range strings.Split(preparedUsageText, "\n") {
|
||||||
|
usageText += fmt.Sprintf(" %s\n", ln)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Style a single line as a note
|
||||||
|
usageText = fmt.Sprintf(">%s\n", preparedUsageText)
|
||||||
|
}
|
||||||
|
|
||||||
|
return usageText
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareUsage(command *Command, usageText string) string {
|
||||||
|
if command.Usage == "" {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
usage := command.Usage + "\n"
|
||||||
|
// Add a newline to the Usage IFF there is a UsageText
|
||||||
|
if usageText != "" {
|
||||||
|
usage += "\n"
|
||||||
|
}
|
||||||
|
|
||||||
|
return usage
|
||||||
|
}
|
||||||
|
|
22
vendor/github.com/urfave/cli/v2/errors.go
generated
vendored
22
vendor/github.com/urfave/cli/v2/errors.go
generated
vendored
|
@ -47,6 +47,28 @@ func (m *multiError) Errors() []error {
|
||||||
return errs
|
return errs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type requiredFlagsErr interface {
|
||||||
|
error
|
||||||
|
getMissingFlags() []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type errRequiredFlags struct {
|
||||||
|
missingFlags []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *errRequiredFlags) Error() string {
|
||||||
|
numberOfMissingFlags := len(e.missingFlags)
|
||||||
|
if numberOfMissingFlags == 1 {
|
||||||
|
return fmt.Sprintf("Required flag %q not set", e.missingFlags[0])
|
||||||
|
}
|
||||||
|
joinedMissingFlags := strings.Join(e.missingFlags, ", ")
|
||||||
|
return fmt.Sprintf("Required flags %q not set", joinedMissingFlags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *errRequiredFlags) getMissingFlags() []string {
|
||||||
|
return e.missingFlags
|
||||||
|
}
|
||||||
|
|
||||||
// ErrorFormatter is the interface that will suitably format the error output
|
// ErrorFormatter is the interface that will suitably format the error output
|
||||||
type ErrorFormatter interface {
|
type ErrorFormatter interface {
|
||||||
Format(s fmt.State, verb rune)
|
Format(s fmt.State, verb rune)
|
||||||
|
|
66
vendor/github.com/urfave/cli/v2/flag.go
generated
vendored
66
vendor/github.com/urfave/cli/v2/flag.go
generated
vendored
|
@ -1,6 +1,7 @@
|
||||||
package cli
|
package cli
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -118,6 +119,14 @@ type DocGenerationFlag interface {
|
||||||
GetValue() string
|
GetValue() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VisibleFlag is an interface that allows to check if a flag is visible
|
||||||
|
type VisibleFlag interface {
|
||||||
|
Flag
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
IsVisible() bool
|
||||||
|
}
|
||||||
|
|
||||||
func flagSet(name string, flags []Flag) (*flag.FlagSet, error) {
|
func flagSet(name string, flags []Flag) (*flag.FlagSet, error) {
|
||||||
set := flag.NewFlagSet(name, flag.ContinueOnError)
|
set := flag.NewFlagSet(name, flag.ContinueOnError)
|
||||||
|
|
||||||
|
@ -130,11 +139,52 @@ func flagSet(name string, flags []Flag) (*flag.FlagSet, error) {
|
||||||
return set, nil
|
return set, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) {
|
||||||
|
switch ff.Value.(type) {
|
||||||
|
case Serializer:
|
||||||
|
_ = set.Set(name, ff.Value.(Serializer).Serialize())
|
||||||
|
default:
|
||||||
|
_ = set.Set(name, ff.Value.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeFlags(flags []Flag, set *flag.FlagSet) error {
|
||||||
|
visited := make(map[string]bool)
|
||||||
|
set.Visit(func(f *flag.Flag) {
|
||||||
|
visited[f.Name] = true
|
||||||
|
})
|
||||||
|
for _, f := range flags {
|
||||||
|
parts := f.Names()
|
||||||
|
if len(parts) == 1 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
var ff *flag.Flag
|
||||||
|
for _, name := range parts {
|
||||||
|
name = strings.Trim(name, " ")
|
||||||
|
if visited[name] {
|
||||||
|
if ff != nil {
|
||||||
|
return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name)
|
||||||
|
}
|
||||||
|
ff = set.Lookup(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ff == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, name := range parts {
|
||||||
|
name = strings.Trim(name, " ")
|
||||||
|
if !visited[name] {
|
||||||
|
copyFlag(name, ff, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func visibleFlags(fl []Flag) []Flag {
|
func visibleFlags(fl []Flag) []Flag {
|
||||||
var visible []Flag
|
var visible []Flag
|
||||||
for _, f := range fl {
|
for _, f := range fl {
|
||||||
field := flagValue(f).FieldByName("Hidden")
|
if vf, ok := f.(VisibleFlag); ok && vf.IsVisible() {
|
||||||
if !field.IsValid() || !field.Bool() {
|
|
||||||
visible = append(visible, f)
|
visible = append(visible, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -359,7 +409,11 @@ func stringifySliceFlag(usage string, names, defaultVals []string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal))
|
usageWithDefault := strings.TrimSpace(fmt.Sprintf("%s%s", usage, defaultVal))
|
||||||
return fmt.Sprintf("%s\t%s", prefixedNames(names, placeholder), usageWithDefault)
|
multiInputString := "(accepts multiple inputs)"
|
||||||
|
if usageWithDefault != "" {
|
||||||
|
multiInputString = "\t" + multiInputString
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s\t%s%s", prefixedNames(names, placeholder), usageWithDefault, multiInputString)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasFlag(flags []Flag, fl Flag) bool {
|
func hasFlag(flags []Flag, fl Flag) bool {
|
||||||
|
@ -380,8 +434,10 @@ func flagFromEnvOrFile(envVars []string, filePath string) (val string, ok bool)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, fileVar := range strings.Split(filePath, ",") {
|
for _, fileVar := range strings.Split(filePath, ",") {
|
||||||
if data, err := ioutil.ReadFile(fileVar); err == nil {
|
if fileVar != "" {
|
||||||
return string(data), true
|
if data, err := ioutil.ReadFile(fileVar); err == nil {
|
||||||
|
return string(data), true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return "", false
|
return "", false
|
||||||
|
|
7
vendor/github.com/urfave/cli/v2/flag_bool.go
generated
vendored
7
vendor/github.com/urfave/cli/v2/flag_bool.go
generated
vendored
|
@ -58,6 +58,11 @@ func (f *BoolFlag) GetValue() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *BoolFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *BoolFlag) Apply(set *flag.FlagSet) error {
|
func (f *BoolFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -87,7 +92,7 @@ func (f *BoolFlag) Apply(set *flag.FlagSet) error {
|
||||||
// Bool looks up the value of a local BoolFlag, returns
|
// Bool looks up the value of a local BoolFlag, returns
|
||||||
// false if not found
|
// false if not found
|
||||||
func (c *Context) Bool(name string) bool {
|
func (c *Context) Bool(name string) bool {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupBool(name, fs)
|
return lookupBool(name, fs)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
|
|
7
vendor/github.com/urfave/cli/v2/flag_duration.go
generated
vendored
7
vendor/github.com/urfave/cli/v2/flag_duration.go
generated
vendored
|
@ -58,6 +58,11 @@ func (f *DurationFlag) GetValue() string {
|
||||||
return f.Value.String()
|
return f.Value.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *DurationFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *DurationFlag) Apply(set *flag.FlagSet) error {
|
func (f *DurationFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -86,7 +91,7 @@ func (f *DurationFlag) Apply(set *flag.FlagSet) error {
|
||||||
// Duration looks up the value of a local DurationFlag, returns
|
// Duration looks up the value of a local DurationFlag, returns
|
||||||
// 0 if not found
|
// 0 if not found
|
||||||
func (c *Context) Duration(name string) time.Duration {
|
func (c *Context) Duration(name string) time.Duration {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupDuration(name, fs)
|
return lookupDuration(name, fs)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
|
10
vendor/github.com/urfave/cli/v2/flag_float64.go
generated
vendored
10
vendor/github.com/urfave/cli/v2/flag_float64.go
generated
vendored
|
@ -58,12 +58,16 @@ func (f *Float64Flag) GetValue() string {
|
||||||
return fmt.Sprintf("%f", f.Value)
|
return fmt.Sprintf("%f", f.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *Float64Flag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *Float64Flag) Apply(set *flag.FlagSet) error {
|
func (f *Float64Flag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
if val != "" {
|
if val != "" {
|
||||||
valFloat, err := strconv.ParseFloat(val, 10)
|
valFloat, err := strconv.ParseFloat(val, 64)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not parse %q as float64 value for flag %s: %s", val, f.Name, err)
|
return fmt.Errorf("could not parse %q as float64 value for flag %s: %s", val, f.Name, err)
|
||||||
}
|
}
|
||||||
|
@ -87,7 +91,7 @@ func (f *Float64Flag) Apply(set *flag.FlagSet) error {
|
||||||
// Float64 looks up the value of a local Float64Flag, returns
|
// Float64 looks up the value of a local Float64Flag, returns
|
||||||
// 0 if not found
|
// 0 if not found
|
||||||
func (c *Context) Float64(name string) float64 {
|
func (c *Context) Float64(name string) float64 {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupFloat64(name, fs)
|
return lookupFloat64(name, fs)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
|
29
vendor/github.com/urfave/cli/v2/flag_float64_slice.go
generated
vendored
29
vendor/github.com/urfave/cli/v2/flag_float64_slice.go
generated
vendored
|
@ -19,6 +19,16 @@ func NewFloat64Slice(defaults ...float64) *Float64Slice {
|
||||||
return &Float64Slice{slice: append([]float64{}, defaults...)}
|
return &Float64Slice{slice: append([]float64{}, defaults...)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clone allocate a copy of self object
|
||||||
|
func (f *Float64Slice) clone() *Float64Slice {
|
||||||
|
n := &Float64Slice{
|
||||||
|
slice: make([]float64, len(f.slice)),
|
||||||
|
hasBeenSet: f.hasBeenSet,
|
||||||
|
}
|
||||||
|
copy(n.slice, f.slice)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
// Set parses the value into a float64 and appends it to the list of values
|
// Set parses the value into a float64 and appends it to the list of values
|
||||||
func (f *Float64Slice) Set(value string) error {
|
func (f *Float64Slice) Set(value string) error {
|
||||||
if !f.hasBeenSet {
|
if !f.hasBeenSet {
|
||||||
|
@ -117,6 +127,11 @@ func (f *Float64SliceFlag) GetValue() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *Float64SliceFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -129,15 +144,19 @@ func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set this to false so that we reset the slice if we then set values from
|
||||||
|
// flags that have already been set by the environment.
|
||||||
|
f.Value.hasBeenSet = false
|
||||||
f.HasBeenSet = true
|
f.HasBeenSet = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.Value == nil {
|
||||||
|
f.Value = &Float64Slice{}
|
||||||
|
}
|
||||||
|
copyValue := f.Value.clone()
|
||||||
for _, name := range f.Names() {
|
for _, name := range f.Names() {
|
||||||
if f.Value == nil {
|
set.Var(copyValue, name, f.Usage)
|
||||||
f.Value = &Float64Slice{}
|
|
||||||
}
|
|
||||||
set.Var(f.Value, name, f.Usage)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -146,7 +165,7 @@ func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// Float64Slice looks up the value of a local Float64SliceFlag, returns
|
// Float64Slice looks up the value of a local Float64SliceFlag, returns
|
||||||
// nil if not found
|
// nil if not found
|
||||||
func (c *Context) Float64Slice(name string) []float64 {
|
func (c *Context) Float64Slice(name string) []float64 {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupFloat64Slice(name, fs)
|
return lookupFloat64Slice(name, fs)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
7
vendor/github.com/urfave/cli/v2/flag_generic.go
generated
vendored
7
vendor/github.com/urfave/cli/v2/flag_generic.go
generated
vendored
|
@ -66,6 +66,11 @@ func (f *GenericFlag) GetValue() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *GenericFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply takes the flagset and calls Set on the generic flag with the value
|
// Apply takes the flagset and calls Set on the generic flag with the value
|
||||||
// provided by the user for parsing by the flag
|
// provided by the user for parsing by the flag
|
||||||
func (f GenericFlag) Apply(set *flag.FlagSet) error {
|
func (f GenericFlag) Apply(set *flag.FlagSet) error {
|
||||||
|
@ -89,7 +94,7 @@ func (f GenericFlag) Apply(set *flag.FlagSet) error {
|
||||||
// Generic looks up the value of a local GenericFlag, returns
|
// Generic looks up the value of a local GenericFlag, returns
|
||||||
// nil if not found
|
// nil if not found
|
||||||
func (c *Context) Generic(name string) interface{} {
|
func (c *Context) Generic(name string) interface{} {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupGeneric(name, fs)
|
return lookupGeneric(name, fs)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
7
vendor/github.com/urfave/cli/v2/flag_int.go
generated
vendored
7
vendor/github.com/urfave/cli/v2/flag_int.go
generated
vendored
|
@ -58,6 +58,11 @@ func (f *IntFlag) GetValue() string {
|
||||||
return fmt.Sprintf("%d", f.Value)
|
return fmt.Sprintf("%d", f.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *IntFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *IntFlag) Apply(set *flag.FlagSet) error {
|
func (f *IntFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -87,7 +92,7 @@ func (f *IntFlag) Apply(set *flag.FlagSet) error {
|
||||||
// Int looks up the value of a local IntFlag, returns
|
// Int looks up the value of a local IntFlag, returns
|
||||||
// 0 if not found
|
// 0 if not found
|
||||||
func (c *Context) Int(name string) int {
|
func (c *Context) Int(name string) int {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupInt(name, fs)
|
return lookupInt(name, fs)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
|
7
vendor/github.com/urfave/cli/v2/flag_int64.go
generated
vendored
7
vendor/github.com/urfave/cli/v2/flag_int64.go
generated
vendored
|
@ -58,6 +58,11 @@ func (f *Int64Flag) GetValue() string {
|
||||||
return fmt.Sprintf("%d", f.Value)
|
return fmt.Sprintf("%d", f.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *Int64Flag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *Int64Flag) Apply(set *flag.FlagSet) error {
|
func (f *Int64Flag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -86,7 +91,7 @@ func (f *Int64Flag) Apply(set *flag.FlagSet) error {
|
||||||
// Int64 looks up the value of a local Int64Flag, returns
|
// Int64 looks up the value of a local Int64Flag, returns
|
||||||
// 0 if not found
|
// 0 if not found
|
||||||
func (c *Context) Int64(name string) int64 {
|
func (c *Context) Int64(name string) int64 {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupInt64(name, fs)
|
return lookupInt64(name, fs)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
|
32
vendor/github.com/urfave/cli/v2/flag_int64_slice.go
generated
vendored
32
vendor/github.com/urfave/cli/v2/flag_int64_slice.go
generated
vendored
|
@ -19,6 +19,16 @@ func NewInt64Slice(defaults ...int64) *Int64Slice {
|
||||||
return &Int64Slice{slice: append([]int64{}, defaults...)}
|
return &Int64Slice{slice: append([]int64{}, defaults...)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clone allocate a copy of self object
|
||||||
|
func (i *Int64Slice) clone() *Int64Slice {
|
||||||
|
n := &Int64Slice{
|
||||||
|
slice: make([]int64, len(i.slice)),
|
||||||
|
hasBeenSet: i.hasBeenSet,
|
||||||
|
}
|
||||||
|
copy(n.slice, i.slice)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
// Set parses the value into an integer and appends it to the list of values
|
// Set parses the value into an integer and appends it to the list of values
|
||||||
func (i *Int64Slice) Set(value string) error {
|
func (i *Int64Slice) Set(value string) error {
|
||||||
if !i.hasBeenSet {
|
if !i.hasBeenSet {
|
||||||
|
@ -118,6 +128,11 @@ func (f *Int64SliceFlag) GetValue() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *Int64SliceFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -129,14 +144,18 @@ func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set this to false so that we reset the slice if we then set values from
|
||||||
|
// flags that have already been set by the environment.
|
||||||
|
f.Value.hasBeenSet = false
|
||||||
f.HasBeenSet = true
|
f.HasBeenSet = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.Value == nil {
|
||||||
|
f.Value = &Int64Slice{}
|
||||||
|
}
|
||||||
|
copyValue := f.Value.clone()
|
||||||
for _, name := range f.Names() {
|
for _, name := range f.Names() {
|
||||||
if f.Value == nil {
|
set.Var(copyValue, name, f.Usage)
|
||||||
f.Value = &Int64Slice{}
|
|
||||||
}
|
|
||||||
set.Var(f.Value, name, f.Usage)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -145,7 +164,10 @@ func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// Int64Slice looks up the value of a local Int64SliceFlag, returns
|
// Int64Slice looks up the value of a local Int64SliceFlag, returns
|
||||||
// nil if not found
|
// nil if not found
|
||||||
func (c *Context) Int64Slice(name string) []int64 {
|
func (c *Context) Int64Slice(name string) []int64 {
|
||||||
return lookupInt64Slice(name, c.flagSet)
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
|
return lookupInt64Slice(name, fs)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func lookupInt64Slice(name string, set *flag.FlagSet) []int64 {
|
func lookupInt64Slice(name string, set *flag.FlagSet) []int64 {
|
||||||
|
|
31
vendor/github.com/urfave/cli/v2/flag_int_slice.go
generated
vendored
31
vendor/github.com/urfave/cli/v2/flag_int_slice.go
generated
vendored
|
@ -19,6 +19,16 @@ func NewIntSlice(defaults ...int) *IntSlice {
|
||||||
return &IntSlice{slice: append([]int{}, defaults...)}
|
return &IntSlice{slice: append([]int{}, defaults...)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clone allocate a copy of self object
|
||||||
|
func (i *IntSlice) clone() *IntSlice {
|
||||||
|
n := &IntSlice{
|
||||||
|
slice: make([]int, len(i.slice)),
|
||||||
|
hasBeenSet: i.hasBeenSet,
|
||||||
|
}
|
||||||
|
copy(n.slice, i.slice)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: Consistently have specific Set function for Int64 and Float64 ?
|
// TODO: Consistently have specific Set function for Int64 and Float64 ?
|
||||||
// SetInt directly adds an integer to the list of values
|
// SetInt directly adds an integer to the list of values
|
||||||
func (i *IntSlice) SetInt(value int) {
|
func (i *IntSlice) SetInt(value int) {
|
||||||
|
@ -129,6 +139,11 @@ func (f *IntSliceFlag) GetValue() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *IntSliceFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *IntSliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *IntSliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -140,14 +155,18 @@ func (f *IntSliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set this to false so that we reset the slice if we then set values from
|
||||||
|
// flags that have already been set by the environment.
|
||||||
|
f.Value.hasBeenSet = false
|
||||||
f.HasBeenSet = true
|
f.HasBeenSet = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.Value == nil {
|
||||||
|
f.Value = &IntSlice{}
|
||||||
|
}
|
||||||
|
copyValue := f.Value.clone()
|
||||||
for _, name := range f.Names() {
|
for _, name := range f.Names() {
|
||||||
if f.Value == nil {
|
set.Var(copyValue, name, f.Usage)
|
||||||
f.Value = &IntSlice{}
|
|
||||||
}
|
|
||||||
set.Var(f.Value, name, f.Usage)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -156,8 +175,8 @@ func (f *IntSliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// IntSlice looks up the value of a local IntSliceFlag, returns
|
// IntSlice looks up the value of a local IntSliceFlag, returns
|
||||||
// nil if not found
|
// nil if not found
|
||||||
func (c *Context) IntSlice(name string) []int {
|
func (c *Context) IntSlice(name string) []int {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupIntSlice(name, c.flagSet)
|
return lookupIntSlice(name, fs)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
7
vendor/github.com/urfave/cli/v2/flag_path.go
generated
vendored
7
vendor/github.com/urfave/cli/v2/flag_path.go
generated
vendored
|
@ -54,6 +54,11 @@ func (f *PathFlag) GetValue() string {
|
||||||
return f.Value
|
return f.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *PathFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *PathFlag) Apply(set *flag.FlagSet) error {
|
func (f *PathFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -75,7 +80,7 @@ func (f *PathFlag) Apply(set *flag.FlagSet) error {
|
||||||
// Path looks up the value of a local PathFlag, returns
|
// Path looks up the value of a local PathFlag, returns
|
||||||
// "" if not found
|
// "" if not found
|
||||||
func (c *Context) Path(name string) string {
|
func (c *Context) Path(name string) string {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupPath(name, fs)
|
return lookupPath(name, fs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
12
vendor/github.com/urfave/cli/v2/flag_string.go
generated
vendored
12
vendor/github.com/urfave/cli/v2/flag_string.go
generated
vendored
|
@ -55,6 +55,11 @@ func (f *StringFlag) GetValue() string {
|
||||||
return f.Value
|
return f.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *StringFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *StringFlag) Apply(set *flag.FlagSet) error {
|
func (f *StringFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -76,7 +81,7 @@ func (f *StringFlag) Apply(set *flag.FlagSet) error {
|
||||||
// String looks up the value of a local StringFlag, returns
|
// String looks up the value of a local StringFlag, returns
|
||||||
// "" if not found
|
// "" if not found
|
||||||
func (c *Context) String(name string) string {
|
func (c *Context) String(name string) string {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupString(name, fs)
|
return lookupString(name, fs)
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
|
@ -85,10 +90,7 @@ func (c *Context) String(name string) string {
|
||||||
func lookupString(name string, set *flag.FlagSet) string {
|
func lookupString(name string, set *flag.FlagSet) string {
|
||||||
f := set.Lookup(name)
|
f := set.Lookup(name)
|
||||||
if f != nil {
|
if f != nil {
|
||||||
parsed, err := f.Value.String(), error(nil)
|
parsed := f.Value.String()
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return parsed
|
return parsed
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
|
|
35
vendor/github.com/urfave/cli/v2/flag_string_slice.go
generated
vendored
35
vendor/github.com/urfave/cli/v2/flag_string_slice.go
generated
vendored
|
@ -18,6 +18,16 @@ func NewStringSlice(defaults ...string) *StringSlice {
|
||||||
return &StringSlice{slice: append([]string{}, defaults...)}
|
return &StringSlice{slice: append([]string{}, defaults...)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// clone allocate a copy of self object
|
||||||
|
func (s *StringSlice) clone() *StringSlice {
|
||||||
|
n := &StringSlice{
|
||||||
|
slice: make([]string, len(s.slice)),
|
||||||
|
hasBeenSet: s.hasBeenSet,
|
||||||
|
}
|
||||||
|
copy(n.slice, s.slice)
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
// Set appends the string value to the list of values
|
// Set appends the string value to the list of values
|
||||||
func (s *StringSlice) Set(value string) error {
|
func (s *StringSlice) Set(value string) error {
|
||||||
if !s.hasBeenSet {
|
if !s.hasBeenSet {
|
||||||
|
@ -114,6 +124,11 @@ func (f *StringSliceFlag) GetValue() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *StringSliceFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
|
func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
|
|
||||||
|
@ -144,17 +159,15 @@ func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
f.HasBeenSet = true
|
f.HasBeenSet = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if f.Value == nil {
|
||||||
|
f.Value = &StringSlice{}
|
||||||
|
}
|
||||||
|
setValue := f.Destination
|
||||||
|
if f.Destination == nil {
|
||||||
|
setValue = f.Value.clone()
|
||||||
|
}
|
||||||
for _, name := range f.Names() {
|
for _, name := range f.Names() {
|
||||||
if f.Value == nil {
|
set.Var(setValue, name, f.Usage)
|
||||||
f.Value = &StringSlice{}
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.Destination != nil {
|
|
||||||
set.Var(f.Destination, name, f.Usage)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
set.Var(f.Value, name, f.Usage)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -163,7 +176,7 @@ func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
|
||||||
// StringSlice looks up the value of a local StringSliceFlag, returns
|
// StringSlice looks up the value of a local StringSliceFlag, returns
|
||||||
// nil if not found
|
// nil if not found
|
||||||
func (c *Context) StringSlice(name string) []string {
|
func (c *Context) StringSlice(name string) []string {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupStringSlice(name, fs)
|
return lookupStringSlice(name, fs)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
17
vendor/github.com/urfave/cli/v2/flag_timestamp.go
generated
vendored
17
vendor/github.com/urfave/cli/v2/flag_timestamp.go
generated
vendored
|
@ -71,6 +71,7 @@ type TimestampFlag struct {
|
||||||
Value *Timestamp
|
Value *Timestamp
|
||||||
DefaultText string
|
DefaultText string
|
||||||
HasBeenSet bool
|
HasBeenSet bool
|
||||||
|
Destination *Timestamp
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsSet returns whether or not the flag has been set through env or file
|
// IsSet returns whether or not the flag has been set through env or file
|
||||||
|
@ -113,6 +114,11 @@ func (f *TimestampFlag) GetValue() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *TimestampFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *TimestampFlag) Apply(set *flag.FlagSet) error {
|
func (f *TimestampFlag) Apply(set *flag.FlagSet) error {
|
||||||
if f.Layout == "" {
|
if f.Layout == "" {
|
||||||
|
@ -123,6 +129,10 @@ func (f *TimestampFlag) Apply(set *flag.FlagSet) error {
|
||||||
}
|
}
|
||||||
f.Value.SetLayout(f.Layout)
|
f.Value.SetLayout(f.Layout)
|
||||||
|
|
||||||
|
if f.Destination != nil {
|
||||||
|
f.Destination.SetLayout(f.Layout)
|
||||||
|
}
|
||||||
|
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
if err := f.Value.Set(val); err != nil {
|
if err := f.Value.Set(val); err != nil {
|
||||||
return fmt.Errorf("could not parse %q as timestamp value for flag %s: %s", val, f.Name, err)
|
return fmt.Errorf("could not parse %q as timestamp value for flag %s: %s", val, f.Name, err)
|
||||||
|
@ -131,6 +141,11 @@ func (f *TimestampFlag) Apply(set *flag.FlagSet) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range f.Names() {
|
for _, name := range f.Names() {
|
||||||
|
if f.Destination != nil {
|
||||||
|
set.Var(f.Destination, name, f.Usage)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
set.Var(f.Value, name, f.Usage)
|
set.Var(f.Value, name, f.Usage)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -138,7 +153,7 @@ func (f *TimestampFlag) Apply(set *flag.FlagSet) error {
|
||||||
|
|
||||||
// Timestamp gets the timestamp from a flag name
|
// Timestamp gets the timestamp from a flag name
|
||||||
func (c *Context) Timestamp(name string) *time.Time {
|
func (c *Context) Timestamp(name string) *time.Time {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupTimestamp(name, fs)
|
return lookupTimestamp(name, fs)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
7
vendor/github.com/urfave/cli/v2/flag_uint.go
generated
vendored
7
vendor/github.com/urfave/cli/v2/flag_uint.go
generated
vendored
|
@ -52,6 +52,11 @@ func (f *UintFlag) GetUsage() string {
|
||||||
return f.Usage
|
return f.Usage
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *UintFlag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *UintFlag) Apply(set *flag.FlagSet) error {
|
func (f *UintFlag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -86,7 +91,7 @@ func (f *UintFlag) GetValue() string {
|
||||||
// Uint looks up the value of a local UintFlag, returns
|
// Uint looks up the value of a local UintFlag, returns
|
||||||
// 0 if not found
|
// 0 if not found
|
||||||
func (c *Context) Uint(name string) uint {
|
func (c *Context) Uint(name string) uint {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupUint(name, fs)
|
return lookupUint(name, fs)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
|
7
vendor/github.com/urfave/cli/v2/flag_uint64.go
generated
vendored
7
vendor/github.com/urfave/cli/v2/flag_uint64.go
generated
vendored
|
@ -52,6 +52,11 @@ func (f *Uint64Flag) GetUsage() string {
|
||||||
return f.Usage
|
return f.Usage
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVisible returns true if the flag is not hidden, otherwise false
|
||||||
|
func (f *Uint64Flag) IsVisible() bool {
|
||||||
|
return !f.Hidden
|
||||||
|
}
|
||||||
|
|
||||||
// Apply populates the flag given the flag set and environment
|
// Apply populates the flag given the flag set and environment
|
||||||
func (f *Uint64Flag) Apply(set *flag.FlagSet) error {
|
func (f *Uint64Flag) Apply(set *flag.FlagSet) error {
|
||||||
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
|
||||||
|
@ -86,7 +91,7 @@ func (f *Uint64Flag) GetValue() string {
|
||||||
// Uint64 looks up the value of a local Uint64Flag, returns
|
// Uint64 looks up the value of a local Uint64Flag, returns
|
||||||
// 0 if not found
|
// 0 if not found
|
||||||
func (c *Context) Uint64(name string) uint64 {
|
func (c *Context) Uint64(name string) uint64 {
|
||||||
if fs := lookupFlagSet(name, c); fs != nil {
|
if fs := c.lookupFlagSet(name); fs != nil {
|
||||||
return lookupUint64(name, fs)
|
return lookupUint64(name, fs)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
|
|
2
vendor/github.com/urfave/cli/v2/funcs.go
generated
vendored
2
vendor/github.com/urfave/cli/v2/funcs.go
generated
vendored
|
@ -17,7 +17,7 @@ type ActionFunc func(*Context) error
|
||||||
// CommandNotFoundFunc is executed if the proper command cannot be found
|
// CommandNotFoundFunc is executed if the proper command cannot be found
|
||||||
type CommandNotFoundFunc func(*Context, string)
|
type CommandNotFoundFunc func(*Context, string)
|
||||||
|
|
||||||
// OnUsageErrorFunc is executed if an usage error occurs. This is useful for displaying
|
// OnUsageErrorFunc is executed if a usage error occurs. This is useful for displaying
|
||||||
// customized usage error messages. This function is able to replace the
|
// customized usage error messages. This function is able to replace the
|
||||||
// original error messages. If this function is not set, the "Incorrect usage"
|
// original error messages. If this function is not set, the "Incorrect usage"
|
||||||
// is displayed and the execution is interrupted.
|
// is displayed and the execution is interrupted.
|
||||||
|
|
20
vendor/github.com/urfave/cli/v2/template.go
generated
vendored
20
vendor/github.com/urfave/cli/v2/template.go
generated
vendored
|
@ -7,7 +7,7 @@ var AppHelpTemplate = `NAME:
|
||||||
{{.Name}}{{if .Usage}} - {{.Usage}}{{end}}
|
{{.Name}}{{if .Usage}} - {{.Usage}}{{end}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
|
{{if .UsageText}}{{.UsageText | nindent 3 | trim}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Version}}{{if not .HideVersion}}
|
||||||
|
|
||||||
VERSION:
|
VERSION:
|
||||||
{{.Version}}{{end}}{{end}}{{if .Description}}
|
{{.Version}}{{end}}{{end}}{{if .Description}}
|
||||||
|
@ -39,7 +39,7 @@ var CommandHelpTemplate = `NAME:
|
||||||
{{.HelpName}} - {{.Usage}}
|
{{.HelpName}} - {{.Usage}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
|
{{if .UsageText}}{{.UsageText | nindent 3 | trim}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}}
|
||||||
|
|
||||||
CATEGORY:
|
CATEGORY:
|
||||||
{{.Category}}{{end}}{{if .Description}}
|
{{.Category}}{{end}}{{if .Description}}
|
||||||
|
@ -59,7 +59,7 @@ var SubcommandHelpTemplate = `NAME:
|
||||||
{{.HelpName}} - {{.Usage}}
|
{{.HelpName}} - {{.Usage}}
|
||||||
|
|
||||||
USAGE:
|
USAGE:
|
||||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
|
{{if .UsageText}}{{.UsageText | nindent 3 | trim}}{{else}}{{.HelpName}} command{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Description}}
|
||||||
|
|
||||||
DESCRIPTION:
|
DESCRIPTION:
|
||||||
{{.Description | nindent 3 | trim}}{{end}}
|
{{.Description | nindent 3 | trim}}{{end}}
|
||||||
|
@ -74,9 +74,9 @@ OPTIONS:
|
||||||
{{end}}{{end}}
|
{{end}}{{end}}
|
||||||
`
|
`
|
||||||
|
|
||||||
var MarkdownDocTemplate = `% {{ .App.Name }} 8
|
var MarkdownDocTemplate = `{{if gt .SectionNum 0}}% {{ .App.Name }} {{ .SectionNum }}
|
||||||
|
|
||||||
# NAME
|
{{end}}# NAME
|
||||||
|
|
||||||
{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }}
|
{{ .App.Name }}{{ if .App.Usage }} - {{ .App.Usage }}{{ end }}
|
||||||
|
|
||||||
|
@ -86,16 +86,18 @@ var MarkdownDocTemplate = `% {{ .App.Name }} 8
|
||||||
{{ if .SynopsisArgs }}
|
{{ if .SynopsisArgs }}
|
||||||
` + "```" + `
|
` + "```" + `
|
||||||
{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + `
|
{{ range $v := .SynopsisArgs }}{{ $v }}{{ end }}` + "```" + `
|
||||||
{{ end }}{{ if .App.UsageText }}
|
{{ end }}{{ if .App.Description }}
|
||||||
# DESCRIPTION
|
# DESCRIPTION
|
||||||
|
|
||||||
{{ .App.UsageText }}
|
{{ .App.Description }}
|
||||||
{{ end }}
|
{{ end }}
|
||||||
**Usage**:
|
**Usage**:
|
||||||
|
|
||||||
` + "```" + `
|
` + "```" + `{{ if .App.UsageText }}
|
||||||
|
{{ .App.UsageText }}
|
||||||
|
{{ else }}
|
||||||
{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
|
{{ .App.Name }} [GLOBAL OPTIONS] command [COMMAND OPTIONS] [ARGUMENTS...]
|
||||||
` + "```" + `
|
{{ end }}` + "```" + `
|
||||||
{{ if .GlobalArgs }}
|
{{ if .GlobalArgs }}
|
||||||
# GLOBAL OPTIONS
|
# GLOBAL OPTIONS
|
||||||
{{ range $v := .GlobalArgs }}
|
{{ range $v := .GlobalArgs }}
|
||||||
|
|
76
vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go
generated
vendored
76
vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go
generated
vendored
|
@ -52,6 +52,13 @@ const (
|
||||||
// The AWS authorization header name for the security session token if available.
|
// The AWS authorization header name for the security session token if available.
|
||||||
awsSecurityTokenHeader = "x-amz-security-token"
|
awsSecurityTokenHeader = "x-amz-security-token"
|
||||||
|
|
||||||
|
// The name of the header containing the session token for metadata endpoint calls
|
||||||
|
awsIMDSv2SessionTokenHeader = "X-aws-ec2-metadata-token"
|
||||||
|
|
||||||
|
awsIMDSv2SessionTtlHeader = "X-aws-ec2-metadata-token-ttl-seconds"
|
||||||
|
|
||||||
|
awsIMDSv2SessionTtl = "300"
|
||||||
|
|
||||||
// The AWS authorization header name for the auto-generated date.
|
// The AWS authorization header name for the auto-generated date.
|
||||||
awsDateHeader = "x-amz-date"
|
awsDateHeader = "x-amz-date"
|
||||||
|
|
||||||
|
@ -241,6 +248,7 @@ type awsCredentialSource struct {
|
||||||
RegionURL string
|
RegionURL string
|
||||||
RegionalCredVerificationURL string
|
RegionalCredVerificationURL string
|
||||||
CredVerificationURL string
|
CredVerificationURL string
|
||||||
|
IMDSv2SessionTokenURL string
|
||||||
TargetResource string
|
TargetResource string
|
||||||
requestSigner *awsRequestSigner
|
requestSigner *awsRequestSigner
|
||||||
region string
|
region string
|
||||||
|
@ -268,12 +276,22 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro
|
||||||
|
|
||||||
func (cs awsCredentialSource) subjectToken() (string, error) {
|
func (cs awsCredentialSource) subjectToken() (string, error) {
|
||||||
if cs.requestSigner == nil {
|
if cs.requestSigner == nil {
|
||||||
awsSecurityCredentials, err := cs.getSecurityCredentials()
|
awsSessionToken, err := cs.getAWSSessionToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if cs.region, err = cs.getRegion(); err != nil {
|
headers := make(map[string]string)
|
||||||
|
if awsSessionToken != "" {
|
||||||
|
headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
|
||||||
|
}
|
||||||
|
|
||||||
|
awsSecurityCredentials, err := cs.getSecurityCredentials(headers)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if cs.region, err = cs.getRegion(headers); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -340,7 +358,37 @@ func (cs awsCredentialSource) subjectToken() (string, error) {
|
||||||
return url.QueryEscape(string(result)), nil
|
return url.QueryEscape(string(result)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *awsCredentialSource) getRegion() (string, error) {
|
func (cs *awsCredentialSource) getAWSSessionToken() (string, error) {
|
||||||
|
if cs.IMDSv2SessionTokenURL == "" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
req, err := http.NewRequest("PUT", cs.IMDSv2SessionTokenURL, nil)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
req.Header.Add(awsIMDSv2SessionTtlHeader, awsIMDSv2SessionTtl)
|
||||||
|
|
||||||
|
resp, err := cs.doRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
respBody, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
return "", fmt.Errorf("oauth2/google: unable to retrieve AWS session token - %s", string(respBody))
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(respBody), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) {
|
||||||
if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" {
|
if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" {
|
||||||
return envAwsRegion, nil
|
return envAwsRegion, nil
|
||||||
}
|
}
|
||||||
|
@ -357,6 +405,10 @@ func (cs *awsCredentialSource) getRegion() (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for name, value := range headers {
|
||||||
|
req.Header.Add(name, value)
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := cs.doRequest(req)
|
resp, err := cs.doRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -381,7 +433,7 @@ func (cs *awsCredentialSource) getRegion() (string, error) {
|
||||||
return string(respBody[:respBodyEnd]), nil
|
return string(respBody[:respBodyEnd]), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCredentials, err error) {
|
func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) {
|
||||||
if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" {
|
if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" {
|
||||||
if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" {
|
if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" {
|
||||||
return awsSecurityCredentials{
|
return awsSecurityCredentials{
|
||||||
|
@ -392,12 +444,12 @@ func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCrede
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
roleName, err := cs.getMetadataRoleName()
|
roleName, err := cs.getMetadataRoleName(headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
credentials, err := cs.getMetadataSecurityCredentials(roleName)
|
credentials, err := cs.getMetadataSecurityCredentials(roleName, headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -413,7 +465,7 @@ func (cs *awsCredentialSource) getSecurityCredentials() (result awsSecurityCrede
|
||||||
return credentials, nil
|
return credentials, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (awsSecurityCredentials, error) {
|
func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string, headers map[string]string) (awsSecurityCredentials, error) {
|
||||||
var result awsSecurityCredentials
|
var result awsSecurityCredentials
|
||||||
|
|
||||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil)
|
req, err := http.NewRequest("GET", fmt.Sprintf("%s/%s", cs.CredVerificationURL, roleName), nil)
|
||||||
|
@ -422,6 +474,10 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (
|
||||||
}
|
}
|
||||||
req.Header.Add("Content-Type", "application/json")
|
req.Header.Add("Content-Type", "application/json")
|
||||||
|
|
||||||
|
for name, value := range headers {
|
||||||
|
req.Header.Add(name, value)
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := cs.doRequest(req)
|
resp, err := cs.doRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, err
|
return result, err
|
||||||
|
@ -441,7 +497,7 @@ func (cs *awsCredentialSource) getMetadataSecurityCredentials(roleName string) (
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cs *awsCredentialSource) getMetadataRoleName() (string, error) {
|
func (cs *awsCredentialSource) getMetadataRoleName(headers map[string]string) (string, error) {
|
||||||
if cs.CredVerificationURL == "" {
|
if cs.CredVerificationURL == "" {
|
||||||
return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint")
|
return "", errors.New("oauth2/google: unable to determine the AWS metadata server security credentials endpoint")
|
||||||
}
|
}
|
||||||
|
@ -451,6 +507,10 @@ func (cs *awsCredentialSource) getMetadataRoleName() (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for name, value := range headers {
|
||||||
|
req.Header.Add(name, value)
|
||||||
|
}
|
||||||
|
|
||||||
resp, err := cs.doRequest(req)
|
resp, err := cs.doRequest(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
|
11
vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go
generated
vendored
11
vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go
generated
vendored
|
@ -175,6 +175,7 @@ type CredentialSource struct {
|
||||||
RegionURL string `json:"region_url"`
|
RegionURL string `json:"region_url"`
|
||||||
RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
|
RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
|
||||||
CredVerificationURL string `json:"cred_verification_url"`
|
CredVerificationURL string `json:"cred_verification_url"`
|
||||||
|
IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"`
|
||||||
Format format `json:"format"`
|
Format format `json:"format"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,14 +186,20 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) {
|
||||||
if awsVersion != 1 {
|
if awsVersion != 1 {
|
||||||
return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion)
|
return nil, fmt.Errorf("oauth2/google: aws version '%d' is not supported in the current build", awsVersion)
|
||||||
}
|
}
|
||||||
return awsCredentialSource{
|
|
||||||
|
awsCredSource := awsCredentialSource{
|
||||||
EnvironmentID: c.CredentialSource.EnvironmentID,
|
EnvironmentID: c.CredentialSource.EnvironmentID,
|
||||||
RegionURL: c.CredentialSource.RegionURL,
|
RegionURL: c.CredentialSource.RegionURL,
|
||||||
RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL,
|
RegionalCredVerificationURL: c.CredentialSource.RegionalCredVerificationURL,
|
||||||
CredVerificationURL: c.CredentialSource.URL,
|
CredVerificationURL: c.CredentialSource.URL,
|
||||||
TargetResource: c.Audience,
|
TargetResource: c.Audience,
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
}, nil
|
}
|
||||||
|
if c.CredentialSource.IMDSv2SessionTokenURL != "" {
|
||||||
|
awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL
|
||||||
|
}
|
||||||
|
|
||||||
|
return awsCredSource, nil
|
||||||
}
|
}
|
||||||
} else if c.CredentialSource.File != "" {
|
} else if c.CredentialSource.File != "" {
|
||||||
return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil
|
return fileCredentialSource{File: c.CredentialSource.File, Format: c.CredentialSource.Format}, nil
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
|
@ -458,8 +458,8 @@ func Fsync(fd int) error {
|
||||||
//sys Listen(s int, n int) (err error)
|
//sys Listen(s int, n int) (err error)
|
||||||
//sys lstat(path string, stat *Stat_t) (err error)
|
//sys lstat(path string, stat *Stat_t) (err error)
|
||||||
//sys Pause() (err error)
|
//sys Pause() (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = pread64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = pread64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = pwrite64
|
||||||
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
|
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
|
||||||
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
|
//sys Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error)
|
||||||
//sysnb Setregid(rgid int, egid int) (err error)
|
//sysnb Setregid(rgid int, egid int) (err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
|
@ -546,8 +546,8 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
|
||||||
//sys Open(path string, mode int, perm uint32) (fd int, err error)
|
//sys Open(path string, mode int, perm uint32) (fd int, err error)
|
||||||
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
|
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
|
||||||
//sys Pathconf(path string, name int) (val int, err error)
|
//sys Pathconf(path string, name int) (val int, err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error)
|
//sys pread(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys read(fd int, p []byte) (n int, err error)
|
//sys read(fd int, p []byte) (n int, err error)
|
||||||
//sys Readlink(path string, buf []byte) (n int, err error)
|
//sys Readlink(path string, buf []byte) (n int, err error)
|
||||||
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
|
@ -125,12 +125,12 @@ func Pipe2(p []int, flags int) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error)
|
//sys extpread(fd int, p []byte, flags int, offset int64) (n int, err error)
|
||||||
func Pread(fd int, p []byte, offset int64) (n int, err error) {
|
func pread(fd int, p []byte, offset int64) (n int, err error) {
|
||||||
return extpread(fd, p, 0, offset)
|
return extpread(fd, p, 0, offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error)
|
//sys extpwrite(fd int, p []byte, flags int, offset int64) (n int, err error)
|
||||||
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
|
func pwrite(fd int, p []byte, offset int64) (n int, err error) {
|
||||||
return extpwrite(fd, p, 0, offset)
|
return extpwrite(fd, p, 0, offset)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_freebsd.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_freebsd.go
generated
vendored
|
@ -638,8 +638,8 @@ func PtraceSingleStep(pid int) (err error) {
|
||||||
//sys Open(path string, mode int, perm uint32) (fd int, err error)
|
//sys Open(path string, mode int, perm uint32) (fd int, err error)
|
||||||
//sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error)
|
//sys Openat(fdat int, path string, mode int, perm uint32) (fd int, err error)
|
||||||
//sys Pathconf(path string, name int) (val int, err error)
|
//sys Pathconf(path string, name int) (val int, err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error)
|
//sys pread(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys read(fd int, p []byte) (n int, err error)
|
//sys read(fd int, p []byte) (n int, err error)
|
||||||
//sys Readlink(path string, buf []byte) (n int, err error)
|
//sys Readlink(path string, buf []byte) (n int, err error)
|
||||||
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_386.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_386.go
generated
vendored
|
@ -35,8 +35,8 @@ func setTimeval(sec, usec int64) Timeval {
|
||||||
//sys Iopl(level int) (err error)
|
//sys Iopl(level int) (err error)
|
||||||
//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32
|
//sys Lchown(path string, uid int, gid int) (err error) = SYS_LCHOWN32
|
||||||
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
|
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
||||||
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
|
//sys setfsgid(gid int) (prev int, err error) = SYS_SETFSGID32
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
generated
vendored
|
@ -29,8 +29,8 @@ func Lstat(path string, stat *Stat_t) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
//sys Pause() (err error)
|
//sys Pause() (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||||
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
||||||
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_arm.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_arm.go
generated
vendored
|
@ -96,8 +96,8 @@ func Utime(path string, buf *Utimbuf) error {
|
||||||
|
|
||||||
//sys utimes(path string, times *[2]Timeval) (err error)
|
//sys utimes(path string, times *[2]Timeval) (err error)
|
||||||
|
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64
|
//sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64
|
||||||
//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
|
//sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64
|
||||||
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
generated
vendored
|
@ -22,8 +22,8 @@ import "unsafe"
|
||||||
//sysnb getrlimit(resource int, rlim *Rlimit) (err error)
|
//sysnb getrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
//sysnb Getuid() (uid int)
|
//sysnb Getuid() (uid int)
|
||||||
//sys Listen(s int, n int) (err error)
|
//sys Listen(s int, n int) (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||||
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
||||||
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
generated
vendored
|
@ -21,8 +21,8 @@ package unix
|
||||||
//sys Lchown(path string, uid int, gid int) (err error)
|
//sys Lchown(path string, uid int, gid int) (err error)
|
||||||
//sys Listen(s int, n int) (err error)
|
//sys Listen(s int, n int) (err error)
|
||||||
//sys Pause() (err error)
|
//sys Pause() (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||||
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
||||||
|
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
generated
vendored
|
@ -25,8 +25,8 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
|
||||||
//sysnb Getuid() (uid int)
|
//sysnb Getuid() (uid int)
|
||||||
//sys Lchown(path string, uid int, gid int) (err error)
|
//sys Lchown(path string, uid int, gid int) (err error)
|
||||||
//sys Listen(s int, n int) (err error)
|
//sys Listen(s int, n int) (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||||
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
|
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_ppc.go
generated
vendored
|
@ -27,8 +27,8 @@ import (
|
||||||
//sys Listen(s int, n int) (err error)
|
//sys Listen(s int, n int) (err error)
|
||||||
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
|
//sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64
|
||||||
//sys Pause() (err error)
|
//sys Pause() (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||||
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
|
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
|
||||||
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = SYS_SENDFILE64
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
generated
vendored
|
@ -26,8 +26,8 @@ package unix
|
||||||
//sys Listen(s int, n int) (err error)
|
//sys Listen(s int, n int) (err error)
|
||||||
//sys Lstat(path string, stat *Stat_t) (err error)
|
//sys Lstat(path string, stat *Stat_t) (err error)
|
||||||
//sys Pause() (err error)
|
//sys Pause() (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||||
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
||||||
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
|
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go
generated
vendored
|
@ -22,8 +22,8 @@ import "unsafe"
|
||||||
//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
|
//sysnb Getrlimit(resource int, rlim *Rlimit) (err error)
|
||||||
//sysnb Getuid() (uid int)
|
//sysnb Getuid() (uid int)
|
||||||
//sys Listen(s int, n int) (err error)
|
//sys Listen(s int, n int) (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
||||||
|
|
||||||
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
|
func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
generated
vendored
|
@ -26,8 +26,8 @@ import (
|
||||||
//sys Lchown(path string, uid int, gid int) (err error)
|
//sys Lchown(path string, uid int, gid int) (err error)
|
||||||
//sys Lstat(path string, stat *Stat_t) (err error)
|
//sys Lstat(path string, stat *Stat_t) (err error)
|
||||||
//sys Pause() (err error)
|
//sys Pause() (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||||
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
||||||
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
|
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
generated
vendored
|
@ -23,8 +23,8 @@ package unix
|
||||||
//sys Listen(s int, n int) (err error)
|
//sys Listen(s int, n int) (err error)
|
||||||
//sys Lstat(path string, stat *Stat_t) (err error)
|
//sys Lstat(path string, stat *Stat_t) (err error)
|
||||||
//sys Pause() (err error)
|
//sys Pause() (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
//sys pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
|
||||||
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
|
||||||
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
|
||||||
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
|
//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_netbsd.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_netbsd.go
generated
vendored
|
@ -313,8 +313,8 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
|
||||||
//sys Open(path string, mode int, perm uint32) (fd int, err error)
|
//sys Open(path string, mode int, perm uint32) (fd int, err error)
|
||||||
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
|
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
|
||||||
//sys Pathconf(path string, name int) (val int, err error)
|
//sys Pathconf(path string, name int) (val int, err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error)
|
//sys pread(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys read(fd int, p []byte) (n int, err error)
|
//sys read(fd int, p []byte) (n int, err error)
|
||||||
//sys Readlink(path string, buf []byte) (n int, err error)
|
//sys Readlink(path string, buf []byte) (n int, err error)
|
||||||
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_openbsd.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_openbsd.go
generated
vendored
|
@ -274,8 +274,8 @@ func Uname(uname *Utsname) error {
|
||||||
//sys Open(path string, mode int, perm uint32) (fd int, err error)
|
//sys Open(path string, mode int, perm uint32) (fd int, err error)
|
||||||
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
|
//sys Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error)
|
||||||
//sys Pathconf(path string, name int) (val int, err error)
|
//sys Pathconf(path string, name int) (val int, err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error)
|
//sys pread(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys read(fd int, p []byte) (n int, err error)
|
//sys read(fd int, p []byte) (n int, err error)
|
||||||
//sys Readlink(path string, buf []byte) (n int, err error)
|
//sys Readlink(path string, buf []byte) (n int, err error)
|
||||||
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
//sys Readlinkat(dirfd int, path string, buf []byte) (n int, err error)
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
4
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
|
@ -661,8 +661,8 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
||||||
//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
|
//sys Openat(dirfd int, path string, flags int, mode uint32) (fd int, err error)
|
||||||
//sys Pathconf(path string, name int) (val int, err error)
|
//sys Pathconf(path string, name int) (val int, err error)
|
||||||
//sys Pause() (err error)
|
//sys Pause() (err error)
|
||||||
//sys Pread(fd int, p []byte, offset int64) (n int, err error)
|
//sys pread(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys Pwrite(fd int, p []byte, offset int64) (n int, err error)
|
//sys pwrite(fd int, p []byte, offset int64) (n int, err error)
|
||||||
//sys read(fd int, p []byte) (n int, err error)
|
//sys read(fd int, p []byte) (n int, err error)
|
||||||
//sys Readlink(path string, buf []byte) (n int, err error)
|
//sys Readlink(path string, buf []byte) (n int, err error)
|
||||||
//sys Rename(from string, to string) (err error)
|
//sys Rename(from string, to string) (err error)
|
||||||
|
|
24
vendor/golang.org/x/sys/unix/syscall_unix.go
generated
vendored
24
vendor/golang.org/x/sys/unix/syscall_unix.go
generated
vendored
|
@ -177,6 +177,30 @@ func Write(fd int, p []byte) (n int, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Pread(fd int, p []byte, offset int64) (n int, err error) {
|
||||||
|
n, err = pread(fd, p, offset)
|
||||||
|
if raceenabled {
|
||||||
|
if n > 0 {
|
||||||
|
raceWriteRange(unsafe.Pointer(&p[0]), n)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
raceAcquire(unsafe.Pointer(&ioSync))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
|
||||||
|
if raceenabled {
|
||||||
|
raceReleaseMerge(unsafe.Pointer(&ioSync))
|
||||||
|
}
|
||||||
|
n, err = pwrite(fd, p, offset)
|
||||||
|
if raceenabled && n > 0 {
|
||||||
|
raceReadRange(unsafe.Pointer(&p[0]), n)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// For testing: clients can set this flag to force
|
// For testing: clients can set this flag to force
|
||||||
// creation of IPv6 sockets to return EAFNOSUPPORT.
|
// creation of IPv6 sockets to return EAFNOSUPPORT.
|
||||||
var SocketDisableIPv6 bool
|
var SocketDisableIPv6 bool
|
||||||
|
|
4
vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
generated
vendored
4
vendor/golang.org/x/sys/unix/zsyscall_aix_ppc.go
generated
vendored
|
@ -975,7 +975,7 @@ func Pause() (err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Pread(fd int, p []byte, offset int64) (n int, err error) {
|
func pread(fd int, p []byte, offset int64) (n int, err error) {
|
||||||
var _p0 *byte
|
var _p0 *byte
|
||||||
if len(p) > 0 {
|
if len(p) > 0 {
|
||||||
_p0 = &p[0]
|
_p0 = &p[0]
|
||||||
|
@ -992,7 +992,7 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) {
|
||||||
|
|
||||||
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
|
||||||
|
|
||||||
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
|
func pwrite(fd int, p []byte, offset int64) (n int, err error) {
|
||||||
var _p0 *byte
|
var _p0 *byte
|
||||||
if len(p) > 0 {
|
if len(p) > 0 {
|
||||||
_p0 = &p[0]
|
_p0 = &p[0]
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue