Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2023-03-14 16:26:58 -07:00
commit 827cde4c64
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
53 changed files with 2430 additions and 254 deletions

View file

@ -2,39 +2,70 @@ package remote_read_integration
import ( import (
"bufio" "bufio"
"encoding/json"
"fmt"
"log"
"net/http" "net/http"
"net/http/httptest" "net/http/httptest"
"reflect" "reflect"
"sort"
"strconv"
"testing" "testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/prometheus"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/native/stream"
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport" parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport"
) )
// LabelValues represents series from api/v1/series response
type LabelValues map[string]string
// Response represents response from api/v1/series
type Response struct {
Status string `json:"status"`
Series []LabelValues `json:"data"`
}
// RemoteWriteServer represents fake remote write server with database
type RemoteWriteServer struct { type RemoteWriteServer struct {
server *httptest.Server server *httptest.Server
series []vm.TimeSeries series []vm.TimeSeries
expectedSeries []vm.TimeSeries
} }
// NewRemoteWriteServer prepares test remote write server // NewRemoteWriteServer prepares test remote write server
func NewRemoteWriteServer(t *testing.T) *RemoteWriteServer { func NewRemoteWriteServer(t *testing.T) *RemoteWriteServer {
rws := &RemoteWriteServer{series: make([]vm.TimeSeries, 0)} rws := &RemoteWriteServer{series: make([]vm.TimeSeries, 0)}
mux := http.NewServeMux() mux := http.NewServeMux()
mux.Handle("/api/v1/import", rws.getWriteHandler(t)) mux.Handle("/api/v1/import", rws.getWriteHandler(t))
mux.Handle("/health", rws.handlePing()) mux.Handle("/health", rws.handlePing())
mux.Handle("/api/v1/series", rws.seriesHandler())
mux.Handle("/api/v1/export/native", rws.exportNativeHandler())
mux.Handle("/api/v1/import/native", rws.importNativeHandler(t))
rws.server = httptest.NewServer(mux) rws.server = httptest.NewServer(mux)
return rws return rws
} }
// Close closes the server. // Close closes the server
func (rws *RemoteWriteServer) Close() { func (rws *RemoteWriteServer) Close() {
rws.server.Close() rws.server.Close()
} }
func (rws *RemoteWriteServer) ExpectedSeries(series []vm.TimeSeries) { // Series saves generated series for fake database
func (rws *RemoteWriteServer) Series(series []vm.TimeSeries) {
rws.series = append(rws.series, series...) rws.series = append(rws.series, series...)
} }
// ExpectedSeries saves expected results to check in the handler
func (rws *RemoteWriteServer) ExpectedSeries(series []vm.TimeSeries) {
rws.expectedSeries = append(rws.expectedSeries, series...)
}
// URL returns server url
func (rws *RemoteWriteServer) URL() string { func (rws *RemoteWriteServer) URL() string {
return rws.server.URL return rws.server.URL
} }
@ -68,13 +99,14 @@ func (rws *RemoteWriteServer) getWriteHandler(t *testing.T) http.Handler {
rows.Reset() rows.Reset()
} }
if !reflect.DeepEqual(tss, rws.series) { if !reflect.DeepEqual(tss, rws.expectedSeries) {
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
t.Fatalf("datasets not equal, expected: %#v; \n got: %#v", rws.series, tss) t.Fatalf("datasets not equal, expected: %#v; \n got: %#v", rws.expectedSeries, tss)
return return
} }
w.WriteHeader(http.StatusNoContent) w.WriteHeader(http.StatusNoContent)
return
}) })
} }
@ -84,3 +116,146 @@ func (rws *RemoteWriteServer) handlePing() http.Handler {
_, _ = w.Write([]byte("OK")) _, _ = w.Write([]byte("OK"))
}) })
} }
func (rws *RemoteWriteServer) seriesHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
var labelValues []LabelValues
for _, ser := range rws.series {
metricNames := make(LabelValues)
if ser.Name != "" {
metricNames["__name__"] = ser.Name
}
for _, p := range ser.LabelPairs {
metricNames[p.Name] = p.Value
}
labelValues = append(labelValues, metricNames)
}
resp := Response{
Status: "success",
Series: labelValues,
}
err := json.NewEncoder(w).Encode(resp)
if err != nil {
log.Printf("error send series: %s", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
})
}
func (rws *RemoteWriteServer) exportNativeHandler() http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
err := prometheus.ExportNativeHandler(now, w, r)
if err != nil {
log.Printf("error export series via native protocol: %s", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusNoContent)
return
})
}
func (rws *RemoteWriteServer) importNativeHandler(t *testing.T) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
common.StartUnmarshalWorkers()
defer common.StopUnmarshalWorkers()
var gotTimeSeries []vm.TimeSeries
err := stream.Parse(r.Body, false, func(block *stream.Block) error {
mn := &block.MetricName
var timeseries vm.TimeSeries
timeseries.Name = string(mn.MetricGroup)
timeseries.Timestamps = append(timeseries.Timestamps, block.Timestamps...)
timeseries.Values = append(timeseries.Values, block.Values...)
for i := range mn.Tags {
tag := &mn.Tags[i]
timeseries.LabelPairs = append(timeseries.LabelPairs, vm.LabelPair{
Name: string(tag.Key),
Value: string(tag.Value),
})
}
gotTimeSeries = append(gotTimeSeries, timeseries)
return nil
})
if err != nil {
log.Printf("error parse stream blocks: %s", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
// got timeseries should be sorted
// because they are processed independently
sort.SliceStable(gotTimeSeries, func(i, j int) bool {
iv, jv := gotTimeSeries[i], gotTimeSeries[j]
switch {
case iv.Values[0] != jv.Values[0]:
return iv.Values[0] < jv.Values[0]
case iv.Timestamps[0] != jv.Timestamps[0]:
return iv.Timestamps[0] < jv.Timestamps[0]
default:
return iv.Name < jv.Name
}
})
if !reflect.DeepEqual(gotTimeSeries, rws.expectedSeries) {
w.WriteHeader(http.StatusInternalServerError)
t.Fatalf("datasets not equal, expected: %#v;\n got: %#v", rws.expectedSeries, gotTimeSeries)
}
w.WriteHeader(http.StatusNoContent)
return
})
}
// GenerateVNSeries generates test timeseries
func GenerateVNSeries(start, end, numOfSeries, numOfSamples int64) []vm.TimeSeries {
var ts []vm.TimeSeries
j := 0
for i := 0; i < int(numOfSeries); i++ {
if i%3 == 0 {
j++
}
timeSeries := vm.TimeSeries{
Name: fmt.Sprintf("vm_metric_%d", j),
LabelPairs: []vm.LabelPair{
{Name: "job", Value: strconv.Itoa(i)},
},
}
ts = append(ts, timeSeries)
}
for i := range ts {
t, v := generateTimeStampsAndValues(i, start, end, numOfSamples)
ts[i].Timestamps = t
ts[i].Values = v
}
return ts
}
func generateTimeStampsAndValues(idx int, startTime, endTime, numOfSamples int64) ([]int64, []float64) {
delta := (endTime - startTime) / numOfSamples
var timestamps []int64
var values []float64
t := startTime
for t != endTime {
v := 100 * int64(idx)
timestamps = append(timestamps, t*1000)
values = append(values, float64(v))
t = t + delta
}
return timestamps, values
}

View file

@ -2,118 +2,295 @@ package main
import ( import (
"context" "context"
"flag"
"fmt"
"log"
"os"
"testing" "testing"
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
remote_read_integration "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
) )
// If you want to run this test:
// 1. run two instances of victoriametrics and define -httpListenAddr for both or just for second instance
// 2. define srcAddr and dstAddr const with your victoriametrics addresses
// 3. define matchFilter const with your importing data
// 4. define timeStartFilter
// 5. run each test one by one
const ( const (
matchFilter = `{job="avalanche"}` storagePath = "TestStorage"
timeStartFilter = "2020-01-01T20:07:00Z" retentionPeriod = "100y"
timeEndFilter = "2020-08-01T20:07:00Z"
srcAddr = "http://127.0.0.1:8428"
dstAddr = "http://127.0.0.1:8528"
) )
// This test simulates close process if user abort it
func Test_vmNativeProcessor_run(t *testing.T) { func Test_vmNativeProcessor_run(t *testing.T) {
t.Skip()
processFlags()
vmstorage.Init(promql.ResetRollupResultCacheIfNeeded)
defer func() {
vmstorage.Stop()
if err := os.RemoveAll(storagePath); err != nil {
log.Fatalf("cannot remove %q: %s", storagePath, err)
}
}()
type fields struct { type fields struct {
filter native.Filter filter native.Filter
rateLimit int64 dst *native.Client
dst *native.Client src *native.Client
src *native.Client backoff *backoff.Backoff
s *stats
rateLimit int64
interCluster bool
cc int
matchName string
matchValue string
} }
type args struct {
ctx context.Context
silent bool
}
tests := []struct { tests := []struct {
name string name string
fields fields fields fields
closer func(cancelFunc context.CancelFunc) args args
wantErr bool vmSeries func(start, end, numOfSeries, numOfSamples int64) []vm.TimeSeries
expectedSeries []vm.TimeSeries
start string
end string
numOfSamples int64
numOfSeries int64
chunk string
wantErr bool
}{ }{
{ {
name: "simulate syscall.SIGINT", name: "step minute on minute time range",
start: "2022-11-25T11:23:05+02:00",
end: "2022-11-27T11:24:05+02:00",
numOfSamples: 2,
numOfSeries: 3,
chunk: stepper.StepMinute,
fields: fields{ fields: fields{
filter: native.Filter{ filter: native.Filter{},
Match: matchFilter, backoff: backoff.New(),
TimeStart: timeStartFilter, rateLimit: 0,
interCluster: false,
cc: 1,
matchName: "__name__",
matchValue: ".*",
},
args: args{
ctx: context.Background(),
silent: true,
},
vmSeries: remote_read_integration.GenerateVNSeries,
expectedSeries: []vm.TimeSeries{
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
Timestamps: []int64{1669368185000, 1669454615000},
Values: []float64{0, 0},
}, },
rateLimit: 0, {
dst: &native.Client{ Name: "vm_metric_1",
Addr: dstAddr, LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
Timestamps: []int64{1669368185000, 1669454615000},
Values: []float64{100, 100},
}, },
src: &native.Client{ {
Addr: srcAddr, Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
Timestamps: []int64{1669368185000, 1669454615000},
Values: []float64{200, 200},
}, },
}, },
closer: func(cancelFunc context.CancelFunc) {
time.Sleep(time.Second * 5)
cancelFunc()
},
wantErr: true,
},
{
name: "simulate correct work",
fields: fields{
filter: native.Filter{
Match: matchFilter,
TimeStart: timeStartFilter,
},
rateLimit: 0,
dst: &native.Client{
Addr: dstAddr,
},
src: &native.Client{
Addr: srcAddr,
},
},
closer: func(cancelFunc context.CancelFunc) {},
wantErr: false, wantErr: false,
}, },
{ {
name: "simulate correct work with chunking", name: "step month on month time range",
start: "2022-09-26T11:23:05+02:00",
end: "2022-11-26T11:24:05+02:00",
numOfSamples: 2,
numOfSeries: 3,
chunk: stepper.StepMonth,
fields: fields{ fields: fields{
filter: native.Filter{ filter: native.Filter{},
Match: matchFilter, backoff: backoff.New(),
TimeStart: timeStartFilter, rateLimit: 0,
TimeEnd: timeEndFilter, interCluster: false,
Chunk: stepper.StepMonth, cc: 1,
matchName: "__name__",
matchValue: ".*",
},
args: args{
ctx: context.Background(),
silent: true,
},
vmSeries: remote_read_integration.GenerateVNSeries,
expectedSeries: []vm.TimeSeries{
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
Timestamps: []int64{1664184185000},
Values: []float64{0},
}, },
rateLimit: 0, {
dst: &native.Client{ Name: "vm_metric_1",
Addr: dstAddr, LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
Timestamps: []int64{1666819415000},
Values: []float64{0},
}, },
src: &native.Client{ {
Addr: srcAddr, Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
Timestamps: []int64{1664184185000},
Values: []float64{100},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
Timestamps: []int64{1666819415000},
Values: []float64{100},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
Timestamps: []int64{1664184185000},
Values: []float64{200},
},
{
Name: "vm_metric_1",
LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
Timestamps: []int64{1666819415000},
Values: []float64{200},
}, },
}, },
closer: func(cancelFunc context.CancelFunc) {},
wantErr: false, wantErr: false,
}, },
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
ctx, cancelFn := context.WithCancel(context.Background()) src := remote_read_integration.NewRemoteWriteServer(t)
p := &vmNativeProcessor{ dst := remote_read_integration.NewRemoteWriteServer(t)
filter: tt.fields.filter,
rateLimit: tt.fields.rateLimit, defer func() {
dst: tt.fields.dst, src.Close()
src: tt.fields.src, dst.Close()
}()
start, err := time.Parse(time.RFC3339, tt.start)
if err != nil {
t.Fatalf("Error parse start time: %s", err)
} }
tt.closer(cancelFn) end, err := time.Parse(time.RFC3339, tt.end)
if err != nil {
t.Fatalf("Error parse end time: %s", err)
}
if err := p.run(ctx, true); (err != nil) != tt.wantErr { tt.fields.filter.Match = fmt.Sprintf("%s=%q", tt.fields.matchName, tt.fields.matchValue)
tt.fields.filter.TimeStart = tt.start
tt.fields.filter.TimeEnd = tt.end
rws := tt.vmSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples)
src.Series(rws)
dst.ExpectedSeries(tt.expectedSeries)
if err := fillStorage(rws); err != nil {
t.Fatalf("error add series to storage: %s", err)
}
tt.fields.src = &native.Client{
AuthCfg: nil,
Addr: src.URL(),
ExtraLabels: []string{},
DisableHTTPKeepAlive: false,
}
tt.fields.dst = &native.Client{
AuthCfg: nil,
Addr: dst.URL(),
ExtraLabels: []string{},
DisableHTTPKeepAlive: false,
}
p := &vmNativeProcessor{
filter: tt.fields.filter,
dst: tt.fields.dst,
src: tt.fields.src,
backoff: tt.fields.backoff,
s: tt.fields.s,
rateLimit: tt.fields.rateLimit,
interCluster: tt.fields.interCluster,
cc: tt.fields.cc,
}
if err := p.run(tt.args.ctx, tt.args.silent); (err != nil) != tt.wantErr {
t.Errorf("run() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("run() error = %v, wantErr %v", err, tt.wantErr)
} }
deleted, err := deleteSeries(tt.fields.matchName, tt.fields.matchValue)
if err != nil {
t.Fatalf("error delete series: %s", err)
}
if int64(deleted) != tt.numOfSeries {
t.Fatalf("expected deleted series %d; got deleted series %d", tt.numOfSeries, deleted)
}
}) })
} }
} }
func processFlags() {
flag.Parse()
for _, fv := range []struct {
flag string
value string
}{
{flag: "storageDataPath", value: storagePath},
{flag: "retentionPeriod", value: retentionPeriod},
} {
// panics if flag doesn't exist
if err := flag.Lookup(fv.flag).Value.Set(fv.value); err != nil {
log.Fatalf("unable to set %q with value %q, err: %v", fv.flag, fv.value, err)
}
}
}
func fillStorage(series []vm.TimeSeries) error {
var mrs []storage.MetricRow
for _, series := range series {
var labels []prompb.Label
for _, lp := range series.LabelPairs {
labels = append(labels, prompb.Label{Name: []byte(lp.Name), Value: []byte(lp.Value)})
}
if series.Name != "" {
labels = append(labels, prompb.Label{Name: []byte("__name__"), Value: []byte(series.Name)})
}
mr := storage.MetricRow{}
mr.MetricNameRaw = storage.MarshalMetricNameRaw(mr.MetricNameRaw[:0], labels)
timestamps := series.Timestamps
values := series.Values
for i, value := range values {
mr.Timestamp = timestamps[i]
mr.Value = value
mrs = append(mrs, mr)
}
}
if err := vmstorage.AddRows(mrs); err != nil {
return fmt.Errorf("unexpected error in AddRows: %s", err)
}
vmstorage.Storage.DebugFlush()
return nil
}
func deleteSeries(name, value string) (int, error) {
tfs := storage.NewTagFilters()
if err := tfs.Add([]byte(name), []byte(value), false, true); err != nil {
return 0, fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
}
return vmstorage.DeleteSeries(nil, []*storage.TagFilters{tfs})
}

View file

@ -15,6 +15,8 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip ## tip
* FEATURE: log metrics with truncated labels if the length of label value in the ingested metric exceeds `-maxLabelValueLen`. This should simplify debugging for this case.
## [v1.89.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.89.1) ## [v1.89.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.89.1)
Released at 2023-03-12 Released at 2023-03-12

View file

@ -13,3 +13,4 @@ sort: 26
7. [Multi-regional setup with VictoriaMetrics: Dedicated regions for monitoring](https://docs.victoriametrics.com/guides/multi-regional-setup-dedicated-regions.html) 7. [Multi-regional setup with VictoriaMetrics: Dedicated regions for monitoring](https://docs.victoriametrics.com/guides/multi-regional-setup-dedicated-regions.html)
8. [How to delete or replace metrics in VictoriaMetrics](https://docs.victoriametrics.com/guides/guide-delete-or-replace-metrics.html) 8. [How to delete or replace metrics in VictoriaMetrics](https://docs.victoriametrics.com/guides/guide-delete-or-replace-metrics.html)
9. [How to monitor kubernetes cluster using Managed VictoriaMetrics](https://docs.victoriametrics.com/managed-victoriametrics/how-to-monitor-k8s.html) 9. [How to monitor kubernetes cluster using Managed VictoriaMetrics](https://docs.victoriametrics.com/managed-victoriametrics/how-to-monitor-k8s.html)
10. [How to configure vmgateway for multi-tenant access using Grafana and OpenID Connect](https://docs.victoriametrics.com/guides/grafana-vmgateway-openid-configuration.html)

View file

@ -0,0 +1,194 @@
# How to configure vmgateway for multi-tenant access using Grafana and OpenID Connect
Using [Grafana](https://grafana.com/) with [vmgateway](https://docs.victoriametrics.com/vmgateway.html) is a great way to provide [multi-tenant](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#multitenancy) access to your metrics.
vmgateway provides a way to authenticate users using [JWT tokens](https://en.wikipedia.org/wiki/JSON_Web_Token) issued by an external identity provider.
Those tokens can include information about the user and the tenant they belong to, which can be used
to restrict access to metrics to only those that belong to the tenant.
## Prerequisites
* Identity service that can issue [JWT tokens](https://en.wikipedia.org/wiki/JSON_Web_Token)
* [Grafana](https://grafana.com/)
* VictoriaMetrics single-node or cluster version
* [vmgateway](https://docs.victoriametrics.com/vmgateway.html)
## Configure identity service
The identity service must be able to issue JWT tokens with the following `vm_access` claim:
```json
{
"vm_access": {
"tenant_id": {
"account_id": 0,
"project_id": 0
}
}
}
```
See details about all supported options in the [vmgateway documentation](https://docs.victoriametrics.com/vmgateway.html#access-control).
### Configuration example for Keycloak
[Keycloak](https://www.keycloak.org/) is an open source identity service that can be used to issue JWT tokens.
1. Log in with admin credentials to your Keycloak instance
2. Go to `Clients` -> `Create`.
Use `OpenID Connect` as `Client Type`.
Specify `grafana` as `Client ID`.
Click `Next`.
<img src="grafana-vmgateway-openid-configuration/create-client-1.png" width="800">
3. Enable `Client authentication`.
Enable `Authorization`.
<img src="grafana-vmgateway-openid-configuration/create-client-2.png" width="800">
Click `Next`.
4. Add Grafana URL as `Valid Redirect URIs`. For example, `http://localhost:3000/`.
<img src="grafana-vmgateway-openid-configuration/create-client-3.png" width="800">
Click `Save`.
5. Go to `Clients` -> `grafana` -> `Credentials`.
<img src="grafana-vmgateway-openid-configuration/client-secret.png" width="800">
Copy the value of `Client secret`. It will be used later in Grafana configuration.
6. Go to `Clients` -> `grafana` -> `Client scopes`.
Click at `grafana-dedicated` -> `Add mapper`.
<img src="grafana-vmgateway-openid-configuration/create-mapper-1.png" width="800">
<img src="grafana-vmgateway-openid-configuration/create-mapper-2.png" width="800">
Configure the mapper as follows
- `Mapper Type` as `User Attribute`.
- `Name` as `vm_access`.
- `Token Claim Name` as `vm_access`.
- `User Attribute` as `vm_access`.
- `Claim JSON Type` as `JSON`.
Enable `Add to ID token` and `Add to access token`.
<img src="grafana-vmgateway-openid-configuration/create-mapper-3.png" width="800">
Click `Save`.
7. Go to `Users` -> select user to configure claims -> `Attributes`.
Specify `vm_access` as `Key`.
Specify `{"tenant_id" : {"account_id": 0, "project_id": 0 }}` as `Value`.
<img src="grafana-vmgateway-openid-configuration/user-attributes.png" width="800">
Click `Save`.
## Configure grafana
To forward JWT tokens Grafana must be configured to use OpenID Connect authentication as follows:
```ini
[auth.generic_oauth]
enabled = true
allow_sign_up = true
name = keycloak
client_id = {CLIENT_ID_FROM_IDENTITY_PROVIDER}
client_secret = {SECRET_FROM_IDENTITY_PROVIDER}
scopes = openid profile email
auth_url = http://localhost:3001/realms/{KEYCLOACK_REALM}/protocol/openid-connect/auth
token_url = http://localhost:3001/realms/{KEYCLOACK_REALM}/protocol/openid-connect/token
api_url = http://localhost:3001/realms/{KEYCLOACK_REALM}/protocol/openid-connect/userinfo
```
After restarting Grafana with the new config you should be able to log in using your identity provider.
## Start vmgateway
### Multi-tenant access for VictoriaMetrics cluster
Now starting vmgateway with enabled authentication is as simple as adding the `-enable.auth=true` flag.
In order to enable multi-tenant access, you must also specify the `-clusterMode=true` flag.
```console
./bin/vmgateway -eula \
-enable.auth=true \
-clusterMode=true \
-write.url=http://localhost:8480 \
-read.url=http://localhost:8481
```
With this configuration vmgateway will use the `vm_access` claim from the JWT token to restrict access to metrics.
For example, if the JWT token contains the following `vm_access` claim:
```json
{
"vm_access": {
"tenant_id": {
"account_id": 0,
"project_id": 0
}
}
}
```
> Note: in case `project_id` is not specified, default value `0` is used.
Then vmgateway will proxy request to an endpoint with the following path:
```console
http://localhost:8480/select/0:0/
```
This allows to restrict access to specific tenants without having to create separate datasources in Grafana,
or manually managing access at another proxy level.
### Multi-tenant access for single-node VictoriaMetrics
In order to use multi-tenant access with single-node VictoriaMetrics, you can use token claims such as `extra_labels`
or `extra_filters` filled dynamically by using Identity Provider's user information.
vmgateway uses those claims and [enhanced Prometheus querying API](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#prometheus-querying-api-enhancements)
to provide additional filtering capabilities.
For example, the following claims can be used to restrict user access to specific metrics:
```json
{
"vm_access": {
"extra_labels": {
"team": "dev"
},
"extra_filters": ["{env=~\"aws|gcp\",cluster!=\"production\"}"]
}
}
```
This will add the following query args to the proxied request:
- `extra_labels=team=dev`
- `extra_filters={env=~"aws|gcp",cluster!="production"}`
With this configuration VictoriaMetrics will add the following filters to every query: `{team="dev", env=~"aws|gcp", cluster!="production"}`.
So when user will try to query `vm_http_requests_total` query will be transformed to `vm_http_requests_total{team="dev", env=~"aws|gcp", cluster!="production"}`.
### Token signature verification
It is also possible to enable [JWT token signature verification](https://docs.victoriametrics.com/vmgateway.html#jwt-signature-verification) at
vmgateway.
To do this by using OpenID Connect discovery endpoint you need to specify the `-auth.oidcDiscoveryEndpoints` flag. For example:
```console
./bin/vmgateway -eula \
-enable.auth=true \
-clusterMode=true \
-write.url=http://localhost:8480 \
-read.url=http://localhost:8481
-auth.oidcDiscoveryEndpoints=http://localhost:3001/realms/master/.well-known/openid-configuration
```
Now vmgateway will print the following message on startup:
```console
2023-03-13T14:45:31.552Z info VictoriaMetrics/app/vmgateway/main.go:154 using 2 keys for JWT token signature verification
```
That means that vmgateway has successfully fetched the public keys from the OpenID Connect discovery endpoint.
It is also possible to provide the public keys directly via the `-auth.publicKeys` flag. See the [vmgateway documentation](https://docs.victoriametrics.com/vmgateway.html#jwt-signature-verification) for details.
## Use Grafana to query metrics
Create a new Prometheus datasource in Grafana with the following URL `http://<vmgateway>:8431`.
URL should point to the vmgateway instance.
You can also use VictoriaMetrics [Grafana datasource](https://github.com/VictoriaMetrics/grafana-datasource) plugin.
See installation instructions [here](https://github.com/VictoriaMetrics/grafana-datasource#installation).
Enable `Forward OAuth identity` flag.
<img src="grafana-vmgateway-openid-configuration/grafana-ds.png" width="800">
Now you can use Grafana to query metrics from the specified tenant.
Users with `vm_access` claim will be able to query metrics from the specified tenant.

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

18
go.mod
View file

@ -14,19 +14,19 @@ require (
github.com/VictoriaMetrics/metrics v1.23.1 github.com/VictoriaMetrics/metrics v1.23.1
github.com/VictoriaMetrics/metricsql v0.56.1 github.com/VictoriaMetrics/metricsql v0.56.1
github.com/aws/aws-sdk-go-v2 v1.17.6 github.com/aws/aws-sdk-go-v2 v1.17.6
github.com/aws/aws-sdk-go-v2/config v1.18.16 github.com/aws/aws-sdk-go-v2/config v1.18.17
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.57
github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6 github.com/aws/aws-sdk-go-v2/service/s3 v1.30.6
github.com/cespare/xxhash/v2 v2.2.0 github.com/cespare/xxhash/v2 v2.2.0
github.com/cheggaaa/pb/v3 v3.1.2 github.com/cheggaaa/pb/v3 v3.1.2
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/fatih/color v1.14.1 // indirect github.com/fatih/color v1.15.0 // indirect
github.com/go-kit/kit v0.12.0 github.com/go-kit/kit v0.12.0
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4 github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.7.1 github.com/googleapis/gax-go/v2 v2.7.1
github.com/influxdata/influxdb v1.11.0 github.com/influxdata/influxdb v1.11.0
github.com/klauspost/compress v1.16.0 github.com/klauspost/compress v1.16.3
github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/oklog/ulid v1.3.1 github.com/oklog/ulid v1.3.1
@ -54,10 +54,10 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.44.219 // indirect github.com/aws/aws-sdk-go v1.44.221 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.16 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.0 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.3.31 // indirect
@ -107,7 +107,7 @@ require (
go.opentelemetry.io/otel/trace v1.14.0 // indirect go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.uber.org/atomic v1.10.0 // indirect go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.2.1 // indirect go.uber.org/goleak v1.2.1 // indirect
golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 // indirect golang.org/x/exp v0.0.0-20230314191032-db074128a8ec // indirect
golang.org/x/sync v0.1.0 // indirect golang.org/x/sync v0.1.0 // indirect
golang.org/x/text v0.8.0 // indirect golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.3.0 // indirect golang.org/x/time v0.3.0 // indirect
@ -115,6 +115,6 @@ require (
google.golang.org/appengine v1.6.7 // indirect google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.53.0 // indirect google.golang.org/grpc v1.53.0 // indirect
google.golang.org/protobuf v1.29.0 // indirect google.golang.org/protobuf v1.29.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

36
go.sum
View file

@ -86,20 +86,20 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.219 h1:YOFxTUQZvdRzgwb6XqLFRwNHxoUdKBuunITC7IFhvbc= github.com/aws/aws-sdk-go v1.44.221 h1:yndn4uvLolKXPoXIwKHhO5XtwlTnJfXLBKXs84C5+hQ=
github.com/aws/aws-sdk-go v1.44.219/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go v1.44.221/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0= github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0=
github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno=
github.com/aws/aws-sdk-go-v2/config v1.18.16 h1:4r7gsCu8Ekwl5iJGE/GmspA2UifqySCCkyyyPFeWs3w= github.com/aws/aws-sdk-go-v2/config v1.18.17 h1:jwTkhULSrbr/SQA8tfdYqZxpG8YsRycmIXxJcbrqY5E=
github.com/aws/aws-sdk-go-v2/config v1.18.16/go.mod h1:XjM6lVbq7UgELp9NjXBrb1DQY/ownlWsvDhEQksemJc= github.com/aws/aws-sdk-go-v2/config v1.18.17/go.mod h1:Lj3E7XcxJnxMa+AYo89YiL68s1cFJRGduChynYU67VA=
github.com/aws/aws-sdk-go-v2/credentials v1.13.16 h1:GgToSxaENX/1zXIGNFfiVk4hxryYJ5Vt4Mh8XLAL7Lc= github.com/aws/aws-sdk-go-v2/credentials v1.13.17 h1:IubQO/RNeIVKF5Jy77w/LfUvmmCxTnk2TP1UZZIMiF4=
github.com/aws/aws-sdk-go-v2/credentials v1.13.16/go.mod h1:KP7aFJhfwPFgx9aoVYL2nYHjya5WBD98CWaadpgmnpY= github.com/aws/aws-sdk-go-v2/credentials v1.13.17/go.mod h1:K9xeFo1g/YPMguMUD69YpwB4Nyi6W/5wn706xIInJFg=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 h1:5qyqXASrX2zy5cTnoHHa4N2c3Lc94GH7gjnBP3GwKdU= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.0 h1:/2Cb3SK3xVOQA7Xfr5nCWCo5H3UiNINtsVvVdk8sQqA=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24/go.mod h1:neYVaeKr5eT7BzwULuG2YbLhzWZ22lpjKdCybR7AXrQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.0/go.mod h1:neYVaeKr5eT7BzwULuG2YbLhzWZ22lpjKdCybR7AXrQ=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56 h1:kFDCPqqVvb9vYcW82L7xYfrBGpuxXQ/8A/zYVayRQK4= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.57 h1:ubKS0iZH5veiqb44qeHzaoKNPvCZQeBVFw4JDhfeWjk=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56/go.mod h1:FoSBuessadgy8Cqp9gQF8U5rzi1XVQhiEJ6su2/kBEE= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.57/go.mod h1:dRBjXtcjmYglxVHpdoGGVWvZumDC27I2GLDGI0Uw4RQ=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 h1:y+8n9AGDjikyXoMBTRaHHHSaFEB8267ykmvyPodJfys= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 h1:y+8n9AGDjikyXoMBTRaHHHSaFEB8267ykmvyPodJfys=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30/go.mod h1:LUBAO3zNXQjoONBKn/kR1y0Q4cj/D02Ts0uHYjcCQLM=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 h1:r+Kv+SEJquhAZXaJ7G4u44cIwXV3f8K+N482NNAzJZA= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.24 h1:r+Kv+SEJquhAZXaJ7G4u44cIwXV3f8K+N482NNAzJZA=
@ -163,8 +163,8 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY=
github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
@ -315,8 +315,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -481,8 +481,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 h1:LGJsf5LRplCck6jUCH3dBL2dmycNruWNF5xugkSlfXw= golang.org/x/exp v0.0.0-20230314191032-db074128a8ec h1:pAv+d8BM2JNnNctsLJ6nnZ6NqXT8N4+eauvZSb3P0I0=
golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/exp v0.0.0-20230314191032-db074128a8ec/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -761,8 +761,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.29.0 h1:44S3JjaKmLEE4YIkjzexaP+NzZsudE3Zin5Njn/pYX0= google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM=
google.golang.org/protobuf v1.29.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View file

@ -486,7 +486,7 @@ func MarshalMetricNameRaw(dst []byte, labels []prompb.Label) []byte {
label.Name = label.Name[:maxLabelNameLen] label.Name = label.Name[:maxLabelNameLen]
} }
if len(label.Value) > maxLabelValueLen { if len(label.Value) > maxLabelValueLen {
atomic.AddUint64(&TooLongLabelValues, 1) trackTruncatedLabels(labels, label)
label.Value = label.Value[:maxLabelValueLen] label.Value = label.Value[:maxLabelValueLen]
} }
if len(label.Value) == 0 { if len(label.Value) == 0 {
@ -534,7 +534,7 @@ func trackDroppedLabels(labels, droppedLabels []prompb.Label) {
select { select {
case <-droppedLabelsLogTicker.C: case <-droppedLabelsLogTicker.C:
// Do not call logger.WithThrottler() here, since this will result in increased CPU usage // Do not call logger.WithThrottler() here, since this will result in increased CPU usage
// because labelsToString() will be called with each trackDroppedLAbels call. // because labelsToString() will be called with each trackDroppedLabels call.
logger.Warnf("dropping %d labels for %s; dropped labels: %s; either reduce the number of labels for this metric "+ logger.Warnf("dropping %d labels for %s; dropped labels: %s; either reduce the number of labels for this metric "+
"or increase -maxLabelsPerTimeseries=%d command-line flag value", "or increase -maxLabelsPerTimeseries=%d command-line flag value",
len(droppedLabels), labelsToString(labels), labelsToString(droppedLabels), maxLabelsPerTimeseries) len(droppedLabels), labelsToString(labels), labelsToString(droppedLabels), maxLabelsPerTimeseries)
@ -542,7 +542,21 @@ func trackDroppedLabels(labels, droppedLabels []prompb.Label) {
} }
} }
func trackTruncatedLabels(labels []prompb.Label, truncated *prompb.Label) {
atomic.AddUint64(&TooLongLabelValues, 1)
select {
case <-truncatedLabelsLogTicker.C:
// Do not call logger.WithThrottler() here, since this will result in increased CPU usage
// because labelsToString() will be called with each trackTruncatedLabels call.
logger.Warnf("truncated label value as it exceeds configured maximal label value length: max %d, actual %d;"+
" truncated label: %s; original labels: %s; either reduce the label value length or increase -maxLabelValueLen=%d;",
maxLabelValueLen, len(truncated.Value), truncated.Name, labelsToString(labels), maxLabelValueLen)
default:
}
}
var droppedLabelsLogTicker = time.NewTicker(5 * time.Second) var droppedLabelsLogTicker = time.NewTicker(5 * time.Second)
var truncatedLabelsLogTicker = time.NewTicker(5 * time.Second)
func labelsToString(labels []prompb.Label) string { func labelsToString(labels []prompb.Label) string {
labelsCopy := append([]prompb.Label{}, labels...) labelsCopy := append([]prompb.Label{}, labels...)

View file

@ -1,3 +1,7 @@
# v1.18.17 (2023-03-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.18.16 (2023-03-10) # v1.18.16 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package config package config
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.18.16" const goModuleVersion = "1.18.17"

View file

@ -1,3 +1,7 @@
# v1.13.17 (2023-03-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.13.16 (2023-03-10) # v1.13.16 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package credentials package credentials
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.13.16" const goModuleVersion = "1.13.17"

View file

@ -1,3 +1,7 @@
# v1.13.0 (2023-03-14)
* **Feature**: Add flag to disable IMDSv1 fallback
# v1.12.24 (2023-03-10) # v1.12.24 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View file

@ -174,6 +174,16 @@ type Options struct {
// The logger writer interface to write logging messages to. // The logger writer interface to write logging messages to.
Logger logging.Logger Logger logging.Logger
// Configure IMDSv1 fallback behavior. By default, the client will attempt
// to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary]
// the client will return any errors encountered from attempting to fetch a token
// instead of silently using the insecure data flow of IMDSv1.
//
// See [configuring IMDS] for more information.
//
// [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
EnableFallback aws.Ternary
// provides the caching of API tokens used for operation calls. If unset, // provides the caching of API tokens used for operation calls. If unset,
// the API token will not be retrieved for the operation. // the API token will not be retrieved for the operation.
tokenProvider *tokenProvider tokenProvider *tokenProvider

View file

@ -3,4 +3,4 @@
package imds package imds
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.12.24" const goModuleVersion = "1.13.0"

View file

@ -4,12 +4,14 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/smithy-go"
"github.com/aws/smithy-go/logging"
"net/http" "net/http"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/middleware" "github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http" smithyhttp "github.com/aws/smithy-go/transport/http"
) )
@ -68,7 +70,7 @@ func (t *tokenProvider) HandleFinalize(
) ( ) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error, out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) { ) {
if !t.enabled() { if t.fallbackEnabled() && !t.enabled() {
// short-circuits to insecure data flow if token provider is disabled. // short-circuits to insecure data flow if token provider is disabled.
return next.HandleFinalize(ctx, input) return next.HandleFinalize(ctx, input)
} }
@ -115,23 +117,15 @@ func (t *tokenProvider) HandleDeserialize(
} }
if resp.StatusCode == http.StatusUnauthorized { // unauthorized if resp.StatusCode == http.StatusUnauthorized { // unauthorized
err = &retryableError{Err: err}
t.enable() t.enable()
err = &retryableError{Err: err, isRetryable: true}
} }
return out, metadata, err return out, metadata, err
} }
type retryableError struct {
Err error
}
func (*retryableError) RetryableError() bool { return true }
func (e *retryableError) Error() string { return e.Err.Error() }
func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) { func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) {
if !t.enabled() { if t.fallbackEnabled() && !t.enabled() {
return nil, &bypassTokenRetrievalError{ return nil, &bypassTokenRetrievalError{
Err: fmt.Errorf("cannot get API token, provider disabled"), Err: fmt.Errorf("cannot get API token, provider disabled"),
} }
@ -147,7 +141,7 @@ func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error)
tok, err = t.updateToken(ctx) tok, err = t.updateToken(ctx)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot get API token, %w", err) return nil, err
} }
return tok, nil return tok, nil
@ -167,17 +161,19 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
TokenTTL: t.tokenTTL, TokenTTL: t.tokenTTL,
}) })
if err != nil { if err != nil {
// change the disabled flag on token provider to true, when error is request timeout error.
var statusErr interface{ HTTPStatusCode() int } var statusErr interface{ HTTPStatusCode() int }
if errors.As(err, &statusErr) { if errors.As(err, &statusErr) {
switch statusErr.HTTPStatusCode() { switch statusErr.HTTPStatusCode() {
// Disable future get token if failed because of 403, 404, or 405
// Disable get token if failed because of 403, 404, or 405
case http.StatusForbidden, case http.StatusForbidden,
http.StatusNotFound, http.StatusNotFound,
http.StatusMethodNotAllowed: http.StatusMethodNotAllowed:
t.disable() if t.fallbackEnabled() {
logger := middleware.GetLogger(ctx)
logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err)
t.disable()
}
// 400 errors are terminal, and need to be upstreamed // 400 errors are terminal, and need to be upstreamed
case http.StatusBadRequest: case http.StatusBadRequest:
@ -192,8 +188,17 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
atomic.StoreUint32(&t.disabled, 1) atomic.StoreUint32(&t.disabled, 1)
} }
// Token couldn't be retrieved, but bypass this, and allow the if !t.fallbackEnabled() {
// request to continue. // NOTE: getToken() is an implementation detail of some outer operation
// (e.g. GetMetadata). It has its own retries that have already been exhausted.
// Mark the underlying error as a terminal error.
err = &retryableError{Err: err, isRetryable: false}
return nil, err
}
// Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request
// and allow the request to proceed. Future requests _may_ re-attempt fetching a
// token if not disabled.
return nil, &bypassTokenRetrievalError{Err: err} return nil, &bypassTokenRetrievalError{Err: err}
} }
@ -206,21 +211,21 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
return tok, nil return tok, nil
} }
type bypassTokenRetrievalError struct {
Err error
}
func (e *bypassTokenRetrievalError) Error() string {
return fmt.Sprintf("bypass token retrieval, %v", e.Err)
}
func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err }
// enabled returns if the token provider is current enabled or not. // enabled returns if the token provider is current enabled or not.
func (t *tokenProvider) enabled() bool { func (t *tokenProvider) enabled() bool {
return atomic.LoadUint32(&t.disabled) == 0 return atomic.LoadUint32(&t.disabled) == 0
} }
// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise
func (t *tokenProvider) fallbackEnabled() bool {
switch t.client.options.EnableFallback {
case aws.FalseTernary:
return false
default:
return true
}
}
// disable disables the token provider and it will no longer attempt to inject // disable disables the token provider and it will no longer attempt to inject
// the token, nor request updates. // the token, nor request updates.
func (t *tokenProvider) disable() { func (t *tokenProvider) disable() {
@ -235,3 +240,22 @@ func (t *tokenProvider) enable() {
t.tokenMux.Unlock() t.tokenMux.Unlock()
atomic.StoreUint32(&t.disabled, 0) atomic.StoreUint32(&t.disabled, 0)
} }
type bypassTokenRetrievalError struct {
Err error
}
func (e *bypassTokenRetrievalError) Error() string {
return fmt.Sprintf("bypass token retrieval, %v", e.Err)
}
func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err }
type retryableError struct {
Err error
isRetryable bool
}
func (e *retryableError) RetryableError() bool { return e.isRetryable }
func (e *retryableError) Error() string { return e.Err.Error() }

View file

@ -1,3 +1,7 @@
# v1.11.57 (2023-03-14)
* **Dependency Update**: Updated to the latest SDK module versions
# v1.11.56 (2023-03-10) # v1.11.56 (2023-03-10)
* **Dependency Update**: Updated to the latest SDK module versions * **Dependency Update**: Updated to the latest SDK module versions

View file

@ -3,4 +3,4 @@
package manager package manager
// goModuleVersion is the tagged release for this module // goModuleVersion is the tagged release for this module
const goModuleVersion = "1.11.56" const goModuleVersion = "1.11.57"

View file

@ -20,16 +20,16 @@ type RequestRetryer interface{}
// A Config provides service configuration for service clients. By default, // A Config provides service configuration for service clients. By default,
// all clients will use the defaults.DefaultConfig structure. // all clients will use the defaults.DefaultConfig structure.
// //
// // Create Session with MaxRetries configuration to be shared by multiple // // Create Session with MaxRetries configuration to be shared by multiple
// // service clients. // // service clients.
// sess := session.Must(session.NewSession(&aws.Config{ // sess := session.Must(session.NewSession(&aws.Config{
// MaxRetries: aws.Int(3), // MaxRetries: aws.Int(3),
// })) // }))
// //
// // Create S3 service client with a specific Region. // // Create S3 service client with a specific Region.
// svc := s3.New(sess, &aws.Config{ // svc := s3.New(sess, &aws.Config{
// Region: aws.String("us-west-2"), // Region: aws.String("us-west-2"),
// }) // })
type Config struct { type Config struct {
// Enables verbose error printing of all credential chain errors. // Enables verbose error printing of all credential chain errors.
// Should be used when wanting to see all errors while attempting to // Should be used when wanting to see all errors while attempting to
@ -192,6 +192,23 @@ type Config struct {
// //
EC2MetadataDisableTimeoutOverride *bool EC2MetadataDisableTimeoutOverride *bool
// Set this to `false` to disable EC2Metadata client from falling back to IMDSv1.
// By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility.
// You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata
// client will return any errors encountered from attempting to fetch a token instead of silently
// using the insecure data flow of IMDSv1.
//
// Example:
// sess := session.Must(session.NewSession(aws.NewConfig()
// .WithEC2MetadataEnableFallback(false)))
//
// svc := s3.New(sess)
//
// See [configuring IMDS] for more information.
//
// [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
EC2MetadataEnableFallback *bool
// Instructs the endpoint to be generated for a service client to // Instructs the endpoint to be generated for a service client to
// be the dual stack endpoint. The dual stack endpoint will support // be the dual stack endpoint. The dual stack endpoint will support
// both IPv4 and IPv6 addressing. // both IPv4 and IPv6 addressing.
@ -283,16 +300,16 @@ type Config struct {
// NewConfig returns a new Config pointer that can be chained with builder // NewConfig returns a new Config pointer that can be chained with builder
// methods to set multiple configuration values inline without using pointers. // methods to set multiple configuration values inline without using pointers.
// //
// // Create Session with MaxRetries configuration to be shared by multiple // // Create Session with MaxRetries configuration to be shared by multiple
// // service clients. // // service clients.
// sess := session.Must(session.NewSession(aws.NewConfig(). // sess := session.Must(session.NewSession(aws.NewConfig().
// WithMaxRetries(3), // WithMaxRetries(3),
// )) // ))
// //
// // Create S3 service client with a specific Region. // // Create S3 service client with a specific Region.
// svc := s3.New(sess, aws.NewConfig(). // svc := s3.New(sess, aws.NewConfig().
// WithRegion("us-west-2"), // WithRegion("us-west-2"),
// ) // )
func NewConfig() *Config { func NewConfig() *Config {
return &Config{} return &Config{}
} }
@ -432,6 +449,13 @@ func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
return c return c
} }
// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value
// returning a Config pointer for chaining.
func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config {
c.EC2MetadataEnableFallback = &v
return c
}
// WithSleepDelay overrides the function used to sleep while waiting for the // WithSleepDelay overrides the function used to sleep while waiting for the
// next retry. Defaults to time.Sleep. // next retry. Defaults to time.Sleep.
func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
@ -576,6 +600,10 @@ func mergeInConfig(dst *Config, other *Config) {
dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
} }
if other.EC2MetadataEnableFallback != nil {
dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback
}
if other.SleepDelay != nil { if other.SleepDelay != nil {
dst.SleepDelay = other.SleepDelay dst.SleepDelay = other.SleepDelay
} }

View file

@ -57,13 +57,13 @@ type EC2Metadata struct {
// New creates a new instance of the EC2Metadata client with a session. // New creates a new instance of the EC2Metadata client with a session.
// This client is safe to use across multiple goroutines. // This client is safe to use across multiple goroutines.
// //
//
// Example: // Example:
// // Create a EC2Metadata client from just a session.
// svc := ec2metadata.New(mySession)
// //
// // Create a EC2Metadata client with additional configuration // // Create a EC2Metadata client from just a session.
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) // svc := ec2metadata.New(mySession)
//
// // Create a EC2Metadata client with additional configuration
// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
c := p.ClientConfig(ServiceName, cfgs...) c := p.ClientConfig(ServiceName, cfgs...)
return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)

View file

@ -1,6 +1,7 @@
package ec2metadata package ec2metadata
import ( import (
"fmt"
"net/http" "net/http"
"sync/atomic" "sync/atomic"
"time" "time"
@ -33,11 +34,15 @@ func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider {
return &tokenProvider{client: c, configuredTTL: duration} return &tokenProvider{client: c, configuredTTL: duration}
} }
// check if fallback is enabled
func (t *tokenProvider) fallbackEnabled() bool {
return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback
}
// fetchTokenHandler fetches token for EC2Metadata service client by default. // fetchTokenHandler fetches token for EC2Metadata service client by default.
func (t *tokenProvider) fetchTokenHandler(r *request.Request) { func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
// short-circuits to insecure data flow if tokenProvider is disabled. // short-circuits to insecure data flow if tokenProvider is disabled.
if v := atomic.LoadUint32(&t.disabled); v == 1 { if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() {
return return
} }
@ -49,23 +54,21 @@ func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
output, err := t.client.getToken(r.Context(), t.configuredTTL) output, err := t.client.getToken(r.Context(), t.configuredTTL)
if err != nil { if err != nil {
// only attempt fallback to insecure data flow if IMDSv1 is enabled
if !t.fallbackEnabled() {
r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err)
return
}
// change the disabled flag on token provider to true, // change the disabled flag on token provider to true and fallback
// when error is request timeout error.
if requestFailureError, ok := err.(awserr.RequestFailure); ok { if requestFailureError, ok := err.(awserr.RequestFailure); ok {
switch requestFailureError.StatusCode() { switch requestFailureError.StatusCode() {
case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed: case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed:
atomic.StoreUint32(&t.disabled, 1) atomic.StoreUint32(&t.disabled, 1)
t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError))
case http.StatusBadRequest: case http.StatusBadRequest:
r.Error = requestFailureError r.Error = requestFailureError
} }
// Check if request timed out while waiting for response
if e, ok := requestFailureError.OrigErr().(awserr.Error); ok {
if e.Code() == request.ErrCodeRequestError {
atomic.StoreUint32(&t.disabled, 1)
}
}
} }
return return
} }

View file

@ -9118,6 +9118,9 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "ap-southeast-3", Region: "ap-southeast-3",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "ap-southeast-4",
}: endpoint{},
endpointKey{ endpointKey{
Region: "ca-central-1", Region: "ca-central-1",
}: endpoint{}, }: endpoint{},
@ -21664,6 +21667,9 @@ var awsPartition = partition{
endpointKey{ endpointKey{
Region: "eu-central-1", Region: "eu-central-1",
}: endpoint{}, }: endpoint{},
endpointKey{
Region: "eu-central-2",
}: endpoint{},
endpointKey{ endpointKey{
Region: "eu-north-1", Region: "eu-north-1",
}: endpoint{}, }: endpoint{},
@ -31030,6 +31036,24 @@ var awsusgovPartition = partition{
Region: "us-gov-east-1", Region: "us-gov-east-1",
}, },
}, },
endpointKey{
Region: "us-gov-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
},
endpointKey{
Region: "us-gov-east-1-fips",
}: endpoint{
Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{ endpointKey{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}: endpoint{ }: endpoint{
@ -31038,6 +31062,24 @@ var awsusgovPartition = partition{
Region: "us-gov-west-1", Region: "us-gov-west-1",
}, },
}, },
endpointKey{
Region: "us-gov-west-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
},
endpointKey{
Region: "us-gov-west-1-fips",
}: endpoint{
Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
CredentialScope: credentialScope{
Region: "us-gov-west-1",
},
Deprecated: boxedTrue,
},
}, },
}, },
"acm": service{ "acm": service{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.44.219" const SDKVersion = "1.44.221"

19
vendor/github.com/fatih/color/color_windows.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
package color
import (
"os"
"golang.org/x/sys/windows"
)
func init() {
// Opt-in for ansi color support for current process.
// https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences
var outMode uint32
out := windows.Handle(os.Stdout.Fd())
if err := windows.GetConsoleMode(out, &outMode); err != nil {
return
}
outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
_ = windows.SetConsoleMode(out, outMode)
}

View file

@ -16,6 +16,21 @@ This package provides various compression algorithms.
# changelog # changelog
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
* s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
* Jan 21st, 2023 (v1.15.15) * Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728

View file

@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error {
// If the buffer is over-read an error is returned. // If the buffer is over-read an error is returned.
func (s *Scratch) decompress() error { func (s *Scratch) decompress() error {
br := &s.bits br := &s.bits
br.init(s.br.unread()) if err := br.init(s.br.unread()); err != nil {
return err
}
var s1, s2 decoder var s1, s2 decoder
// Initialize and decode first state and symbol. // Initialize and decode first state and symbol.

View file

@ -215,6 +215,67 @@ has been reached. In this case it will assume that the minimum size has been rea
If nothing has been written to the response writer, nothing will be flushed. If nothing has been written to the response writer, nothing will be flushed.
## BREACH mitigation
[BREACH](http://css.csail.mit.edu/6.858/2020/readings/breach.pdf) is a specialized attack where attacker controlled data
is injected alongside secret data in a response body. This can lead to sidechannel attacks, where observing the compressed response
size can reveal if there are overlaps between the secret data and the injected data.
For more information see https://breachattack.com/
It can be hard to judge if you are vulnerable to BREACH.
In general, if you do not include any user provided content in the response body you are safe,
but if you do, or you are in doubt, you can apply mitigations.
`gzhttp` can apply [Heal the Breach](https://ieeexplore.ieee.org/document/9754554), or improved content aware padding.
```Go
// RandomJitter adds 1->n random bytes to output based on checksum of payload.
// Specify the amount of input to buffer before applying jitter.
// This should cover the sensitive part of your response.
// This can be used to obfuscate the exact compressed size.
// Specifying 0 will use a buffer size of 64KB.
// 'paranoid' will use a slower hashing function, that MAY provide more safety.
// If a negative buffer is given, the amount of jitter will not be content dependent.
// This provides *less* security than applying content based jitter.
func RandomJitter(n, buffer int, paranoid bool) option
...
```
The jitter is added as a "Comment" field. This field has a 1 byte overhead, so actual extra size will be 2 -> n+1 (inclusive).
A good option would be to apply 32 random bytes, with default 64KB buffer: `gzhttp.RandomJitter(32, 0, false)`.
Note that flushing the data forces the padding to be applied, which means that only data before the flush is considered for content aware padding.
The *padding* in the comment is the text `Padding-Padding-Padding-Padding-Pad....`
The *length* is `1 + crc32c(payload) MOD n` or `1 + sha256(payload) MOD n` (paranoid), or just random from `crypto/rand` if buffer < 0.
### Paranoid?
The padding size is determined by the remainder of a CRC32 of the content.
Since the payload contains elements unknown to the attacker, there is no reason to believe they can derive any information
from this remainder, or predict it.
However, for those that feel uncomfortable with a CRC32 being used for this can enable "paranoid" mode which will use SHA256 for determining the padding.
The hashing itself is about 2 orders of magnitude slower, but in overall terms will maybe only reduce speed by 10%.
Paranoid mode has no effect if buffer is < 0 (non-content aware padding).
### Examples
Adding the option `gzhttp.RandomJitter(32, 50000)` will apply from 1 up to 32 bytes of random data to the output.
The number of bytes added depends on the content of the first 50000 bytes, or all of them if the output was less than that.
Adding the option `gzhttp.RandomJitter(32, -1)` will apply from 1 up to 32 bytes of random data to the output.
Each call will apply a random amount of jitter. This should be considered less secure than content based jitter.
This can be used if responses are very big, deterministic and the buffer size would be too big to cover where the mutation occurs.
## License ## License
[Apache 2.0](LICENSE) [Apache 2.0](LICENSE)

View file

@ -2,8 +2,15 @@ package gzhttp
import ( import (
"bufio" "bufio"
"crypto/rand"
"crypto/sha256"
"encoding/binary"
"errors"
"fmt" "fmt"
"hash/crc32"
"io" "io"
"math"
"math/bits"
"mime" "mime"
"net" "net"
"net/http" "net/http"
@ -67,6 +74,9 @@ type GzipResponseWriter struct {
setContentType bool // Add content type, if missing and detected. setContentType bool // Add content type, if missing and detected.
suffixETag string // Suffix to add to ETag header if response is compressed. suffixETag string // Suffix to add to ETag header if response is compressed.
dropETag bool // Drop ETag header if response is compressed (supersedes suffixETag). dropETag bool // Drop ETag header if response is compressed (supersedes suffixETag).
sha256Jitter bool // Use sha256 for jitter.
randomJitter string // Add random bytes to output as header field.
jitterBuffer int // Maximum buffer to accumulate before doing jitter.
contentTypeFilter func(ct string) bool // Only compress if the response is one of these content-types. All are accepted if empty. contentTypeFilter func(ct string) bool // Only compress if the response is one of these content-types. All are accepted if empty.
} }
@ -97,6 +107,9 @@ func (w *GzipResponseWriter) Write(b []byte) (int, error) {
if w.minSize > wantBuf { if w.minSize > wantBuf {
wantBuf = w.minSize wantBuf = w.minSize
} }
if w.jitterBuffer > 0 && w.jitterBuffer > wantBuf {
wantBuf = w.jitterBuffer
}
toAdd := len(b) toAdd := len(b)
if len(w.buf)+toAdd > wantBuf { if len(w.buf)+toAdd > wantBuf {
toAdd = wantBuf - len(w.buf) toAdd = wantBuf - len(w.buf)
@ -112,7 +125,7 @@ func (w *GzipResponseWriter) Write(b []byte) (int, error) {
ct := hdr.Get(contentType) ct := hdr.Get(contentType)
if cl == 0 || cl >= w.minSize && (ct == "" || w.contentTypeFilter(ct)) { if cl == 0 || cl >= w.minSize && (ct == "" || w.contentTypeFilter(ct)) {
// If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data. // If the current buffer is less than minSize and a Content-Length isn't set, then wait until we have more data.
if len(w.buf) < w.minSize && cl == 0 { if len(w.buf) < w.minSize && cl == 0 || (w.jitterBuffer > 0 && len(w.buf) < w.jitterBuffer) {
return len(b), nil return len(b), nil
} }
@ -131,7 +144,7 @@ func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// If the Content-Type is acceptable to GZIP, initialize the GZIP writer. // If the Content-Type is acceptable to GZIP, initialize the GZIP writer.
if w.contentTypeFilter(ct) { if w.contentTypeFilter(ct) {
if err := w.startGzip(); err != nil { if err := w.startGzip(remain); err != nil {
return 0, err return 0, err
} }
if len(remain) > 0 { if len(remain) > 0 {
@ -156,8 +169,10 @@ func (w *GzipResponseWriter) Write(b []byte) (int, error) {
return len(b), nil return len(b), nil
} }
var castagnoliTable = crc32.MakeTable(crc32.Castagnoli)
// startGzip initializes a GZIP writer and writes the buffer. // startGzip initializes a GZIP writer and writes the buffer.
func (w *GzipResponseWriter) startGzip() error { func (w *GzipResponseWriter) startGzip(remain []byte) error {
// Set the GZIP header. // Set the GZIP header.
w.Header().Set(contentEncoding, "gzip") w.Header().Set(contentEncoding, "gzip")
@ -199,6 +214,49 @@ func (w *GzipResponseWriter) startGzip() error {
if len(w.buf) > 0 { if len(w.buf) > 0 {
// Initialize the GZIP response. // Initialize the GZIP response.
w.init() w.init()
// Set random jitter based on CRC or SHA-256 of current buffer.
// Before first write.
if len(w.randomJitter) > 0 {
var jitRNG uint32
if w.jitterBuffer > 0 {
if w.sha256Jitter {
h := sha256.New()
h.Write(w.buf)
// Use only up to "w.jitterBuffer", otherwise the output depends on write sizes.
if len(remain) > 0 && len(w.buf) < w.jitterBuffer {
remain := remain
if len(remain)+len(w.buf) > w.jitterBuffer {
remain = remain[:w.jitterBuffer-len(w.buf)]
}
h.Write(remain)
}
var tmp [sha256.Size]byte
jitRNG = binary.LittleEndian.Uint32(h.Sum(tmp[:0]))
} else {
h := crc32.Update(0, castagnoliTable, w.buf)
// Use only up to "w.jitterBuffer", otherwise the output depends on write sizes.
if len(remain) > 0 && len(w.buf) < w.jitterBuffer {
remain := remain
if len(remain)+len(w.buf) > w.jitterBuffer {
remain = remain[:w.jitterBuffer-len(w.buf)]
}
h = crc32.Update(h, castagnoliTable, remain)
}
jitRNG = bits.RotateLeft32(h, 19) ^ 0xab0755de
}
} else {
// Get from rand.Reader
var tmp [4]byte
_, err := rand.Read(tmp[:])
if err != nil {
return fmt.Errorf("gzhttp: %w", err)
}
jitRNG = binary.LittleEndian.Uint32(tmp[:])
}
jit := w.randomJitter[:1+jitRNG%uint32(len(w.randomJitter)-1)]
w.gw.(writer.GzipWriterExt).SetHeader(writer.Header{Comment: jit})
}
n, err := w.gw.Write(w.buf) n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't // This should never happen (per io.Writer docs), but if the write didn't
@ -259,15 +317,21 @@ func (w *GzipResponseWriter) Close() error {
if w.ignore { if w.ignore {
return nil return nil
} }
if w.gw == nil { if w.gw == nil {
// GZIP not triggered yet, write out regular response. var (
err := w.startPlain() ct = w.Header().Get(contentType)
// Returns the error if any at write. ce = w.Header().Get(contentEncoding)
if err != nil { cr = w.Header().Get(contentRange)
err = fmt.Errorf("gziphandler: write to regular responseWriter at close gets error: %q", err.Error()) )
// fmt.Println(len(w.buf) == 0, len(w.buf) < w.minSize, len(w.Header()[HeaderNoCompression]) != 0, ce != "", cr != "", !w.contentTypeFilter(ct))
if len(w.buf) == 0 || len(w.buf) < w.minSize || len(w.Header()[HeaderNoCompression]) != 0 || ce != "" || cr != "" || !w.contentTypeFilter(ct) {
// GZIP not triggered, write out regular response.
return w.startPlain()
}
err := w.startGzip(nil)
if err != nil {
return err
} }
return err
} }
err := w.gw.Close() err := w.gw.Close()
@ -310,7 +374,7 @@ func (w *GzipResponseWriter) Flush() {
// See if we should compress... // See if we should compress...
if len(w.Header()[HeaderNoCompression]) == 0 && ce == "" && cr == "" && cl >= w.minSize && w.contentTypeFilter(ct) { if len(w.Header()[HeaderNoCompression]) == 0 && ce == "" && cr == "" && cl >= w.minSize && w.contentTypeFilter(ct) {
w.startGzip() w.startGzip(nil)
} else { } else {
w.startPlain() w.startPlain()
} }
@ -392,6 +456,9 @@ func NewWrapper(opts ...option) (func(http.Handler) http.HandlerFunc, error) {
suffixETag: c.suffixETag, suffixETag: c.suffixETag,
buf: gw.buf, buf: gw.buf,
setContentType: c.setContentType, setContentType: c.setContentType,
randomJitter: c.randomJitter,
jitterBuffer: c.jitterBuffer,
sha256Jitter: c.sha256Jitter,
} }
if len(gw.buf) > 0 { if len(gw.buf) > 0 {
gw.buf = gw.buf[:0] gw.buf = gw.buf[:0]
@ -408,6 +475,7 @@ func NewWrapper(opts ...option) (func(http.Handler) http.HandlerFunc, error) {
} else { } else {
h.ServeHTTP(gw, r) h.ServeHTTP(gw, r)
} }
w.Header().Del(HeaderNoCompression)
} else { } else {
h.ServeHTTP(newNoGzipResponseWriter(w), r) h.ServeHTTP(newNoGzipResponseWriter(w), r)
w.Header().Del(HeaderNoCompression) w.Header().Del(HeaderNoCompression)
@ -455,6 +523,9 @@ type config struct {
setContentType bool setContentType bool
suffixETag string suffixETag string
dropETag bool dropETag bool
jitterBuffer int
randomJitter string
sha256Jitter bool
} }
func (c *config) validate() error { func (c *config) validate() error {
@ -466,7 +537,16 @@ func (c *config) validate() error {
if c.minSize < 0 { if c.minSize < 0 {
return fmt.Errorf("minimum size must be more than zero") return fmt.Errorf("minimum size must be more than zero")
} }
if len(c.randomJitter) >= math.MaxUint16 {
return fmt.Errorf("random jitter size exceeded")
}
if len(c.randomJitter) > 0 {
gzw, ok := c.writer.New(io.Discard, c.level).(writer.GzipWriterExt)
if !ok {
return errors.New("the custom compressor does not allow setting headers for random jitter")
}
gzw.Close()
}
return nil return nil
} }
@ -496,8 +576,9 @@ func SetContentType(b bool) option {
// Implementation changes the implementation of GzipWriter // Implementation changes the implementation of GzipWriter
// //
// The default implementation is writer/stdlib/NewWriter // The default implementation is backed by github.com/klauspost/compress
// which is backed by standard library's compress/zlib // To support RandomJitter, the GzipWriterExt must also be
// supported by the returned writers.
func Implementation(writer writer.GzipWriterFactory) option { func Implementation(writer writer.GzipWriterFactory) option {
return func(c *config) { return func(c *config) {
c.writer = writer c.writer = writer
@ -625,6 +706,31 @@ func DropETag() option {
} }
} }
// RandomJitter adds 1->n random bytes to output based on checksum of payload.
// Specify the amount of input to buffer before applying jitter.
// This should cover the sensitive part of your response.
// This can be used to obfuscate the exact compressed size.
// Specifying 0 will use a buffer size of 64KB.
// 'paranoid' will use a slower hashing function, that MAY provide more safety.
// See README.md for more information.
// If a negative buffer is given, the amount of jitter will not be content dependent.
// This provides *less* security than applying content based jitter.
func RandomJitter(n, buffer int, paranoid bool) option {
return func(c *config) {
if n > 0 {
c.sha256Jitter = paranoid
c.randomJitter = strings.Repeat("Padding-", 1+(n/8))[:n+1]
c.jitterBuffer = buffer
if c.jitterBuffer == 0 {
c.jitterBuffer = 64 << 10
}
} else {
c.randomJitter = ""
c.jitterBuffer = 0
}
}
}
// acceptsGzip returns true if the given HTTP request indicates that it will // acceptsGzip returns true if the given HTTP request indicates that it will
// accept a gzipped response. // accept a gzipped response.
func acceptsGzip(r *http.Request) bool { func acceptsGzip(r *http.Request) bool {
@ -702,10 +808,23 @@ func parseEncodings(s string) (codings, error) {
return c, nil return c, nil
} }
var errEmptyEncoding = errors.New("empty content-coding")
// parseCoding parses a single coding (content-coding with an optional qvalue), // parseCoding parses a single coding (content-coding with an optional qvalue),
// as might appear in an Accept-Encoding header. It attempts to forgive minor // as might appear in an Accept-Encoding header. It attempts to forgive minor
// formatting errors. // formatting errors.
func parseCoding(s string) (coding string, qvalue float64, err error) { func parseCoding(s string) (coding string, qvalue float64, err error) {
// Avoid splitting if we can...
if len(s) == 0 {
return "", 0, errEmptyEncoding
}
if !strings.ContainsRune(s, ';') {
coding = strings.ToLower(strings.TrimSpace(s))
if coding == "" {
err = errEmptyEncoding
}
return coding, DefaultQValue, err
}
for n, part := range strings.Split(s, ";") { for n, part := range strings.Split(s, ";") {
part = strings.TrimSpace(part) part = strings.TrimSpace(part)
qvalue = DefaultQValue qvalue = DefaultQValue
@ -724,7 +843,7 @@ func parseCoding(s string) (coding string, qvalue float64, err error) {
} }
if coding == "" { if coding == "" {
err = fmt.Errorf("empty content-coding") err = errEmptyEncoding
} }
return return
@ -766,6 +885,9 @@ const intSize = 32 << (^uint(0) >> 63)
// atoi is equivalent to ParseInt(s, 10, 0), converted to type int. // atoi is equivalent to ParseInt(s, 10, 0), converted to type int.
func atoi(s string) (int, bool) { func atoi(s string) (int, bool) {
if len(s) == 0 {
return 0, false
}
sLen := len(s) sLen := len(s)
if intSize == 32 && (0 < sLen && sLen < 10) || if intSize == 32 && (0 < sLen && sLen < 10) ||
intSize == 64 && (0 < sLen && sLen < 19) { intSize == 64 && (0 < sLen && sLen < 19) {

View file

@ -61,6 +61,15 @@ func NewWriter(w io.Writer, level int) writer.GzipWriter {
} }
} }
// SetHeader will override the gzip header on pw.
func (pw *pooledWriter) SetHeader(h writer.Header) {
pw.Name = h.Name
pw.Extra = h.Extra
pw.Comment = h.Comment
pw.ModTime = h.ModTime
pw.OS = h.OS
}
func Levels() (min, max int) { func Levels() (min, max int) {
return gzip.StatelessCompression, gzip.BestCompression return gzip.StatelessCompression, gzip.BestCompression
} }

View file

@ -1,6 +1,9 @@
package writer package writer
import "io" import (
"io"
"time"
)
// GzipWriter implements the functions needed for compressing content. // GzipWriter implements the functions needed for compressing content.
type GzipWriter interface { type GzipWriter interface {
@ -9,6 +12,24 @@ type GzipWriter interface {
Flush() error Flush() error
} }
// GzipWriterExt implements the functions needed for compressing content
// and optional extensions.
type GzipWriterExt interface {
GzipWriter
// SetHeader will populate header fields with non-nil values in h.
SetHeader(h Header)
}
// Header is a gzip header.
type Header struct {
Comment string // comment
Extra []byte // "extra data"
ModTime time.Time // modification time
Name string // file name
OS byte // operating system type
}
// GzipWriterFactory contains the information needed for custom gzip implementations. // GzipWriterFactory contains the information needed for custom gzip implementations.
type GzipWriterFactory struct { type GzipWriterFactory struct {
// Must return the minimum and maximum supported level. // Must return the minimum and maximum supported level.

View file

@ -60,6 +60,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits b.nBits += encA.nBits + encB.nBits
} }
// encFourSymbols adds up to 32 bits from four symbols.
// It will not check if there is space for them,
// so the caller must ensure that b has been flushed recently.
func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
bitsA := encA.nBits
bitsB := bitsA + encB.nBits
bitsC := bitsB + encC.nBits
bitsD := bitsC + encD.nBits
combined := uint64(encA.val) |
(uint64(encB.val) << (bitsA & 63)) |
(uint64(encC.val) << (bitsB & 63)) |
(uint64(encD.val) << (bitsC & 63))
b.bitContainer |= combined << (b.nBits & 63)
b.nBits += bitsD
}
// flush32 will flush out, so there are at least 32 bits available for writing. // flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() { func (b *bitWriter) flush32() {
if b.nBits < 32 { if b.nBits < 32 {

View file

@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
tmp := src[n : n+4] tmp := src[n : n+4]
// tmp should be len 4 // tmp should be len 4
bw.flush32() bw.flush32()
bw.encTwoSymbols(cTable, tmp[3], tmp[2]) bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
} }
} else { } else {
for ; n >= 0; n -= 4 { for ; n >= 0; n -= 4 {

View file

@ -717,3 +717,11 @@ func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) { func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
panic("cvtLZ4BlockSnappyAsm should be unreachable") panic("cvtLZ4BlockSnappyAsm should be unreachable")
} }
func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
panic("cvtLZ4sBlockAsm should be unreachable")
}
func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) {
panic("cvtLZ4sBlockSnappyAsm should be unreachable")
}

View file

@ -212,7 +212,17 @@ func matchLen(a []byte, b []byte) int
//go:noescape //go:noescape
func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) func cvtLZ4BlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
// cvtLZ4Block converts an LZ4 block to S2 // cvtLZ4sBlock converts an LZ4s block to S2
//
//go:noescape
func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
// cvtLZ4Block converts an LZ4 block to Snappy
// //
//go:noescape //go:noescape
func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
// cvtLZ4sBlock converts an LZ4s block to Snappy
//
//go:noescape
func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)

View file

@ -19271,6 +19271,491 @@ lz4_s2_dstfull:
MOVQ SI, uncompressed+48(FP) MOVQ SI, uncompressed+48(FP)
RET RET
// func cvtLZ4sBlockAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
// Requires: SSE2
TEXT ·cvtLZ4sBlockAsm(SB), NOSPLIT, $0-64
XORQ SI, SI
MOVQ dst_base+0(FP), AX
MOVQ dst_len+8(FP), CX
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
LEAQ -10(AX)(CX*1), CX
XORQ DI, DI
lz4s_s2_loop:
CMPQ DX, BX
JAE lz4s_s2_corrupt
CMPQ AX, CX
JAE lz4s_s2_dstfull
MOVBQZX (DX), R8
MOVQ R8, R9
MOVQ R8, R10
SHRQ $0x04, R9
ANDQ $0x0f, R10
CMPQ R8, $0xf0
JB lz4s_s2_ll_end
lz4s_s2_ll_loop:
INCQ DX
CMPQ DX, BX
JAE lz4s_s2_corrupt
MOVBQZX (DX), R8
ADDQ R8, R9
CMPQ R8, $0xff
JEQ lz4s_s2_ll_loop
lz4s_s2_ll_end:
LEAQ (DX)(R9*1), R8
ADDQ $0x03, R10
CMPQ R8, BX
JAE lz4s_s2_corrupt
INCQ DX
INCQ R8
TESTQ R9, R9
JZ lz4s_s2_lits_done
LEAQ (AX)(R9*1), R11
CMPQ R11, CX
JAE lz4s_s2_dstfull
ADDQ R9, SI
LEAL -1(R9), R11
CMPL R11, $0x3c
JLT one_byte_lz4s_s2
CMPL R11, $0x00000100
JLT two_bytes_lz4s_s2
CMPL R11, $0x00010000
JLT three_bytes_lz4s_s2
CMPL R11, $0x01000000
JLT four_bytes_lz4s_s2
MOVB $0xfc, (AX)
MOVL R11, 1(AX)
ADDQ $0x05, AX
JMP memmove_long_lz4s_s2
four_bytes_lz4s_s2:
MOVL R11, R12
SHRL $0x10, R12
MOVB $0xf8, (AX)
MOVW R11, 1(AX)
MOVB R12, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_lz4s_s2
three_bytes_lz4s_s2:
MOVB $0xf4, (AX)
MOVW R11, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_lz4s_s2
two_bytes_lz4s_s2:
MOVB $0xf0, (AX)
MOVB R11, 1(AX)
ADDQ $0x02, AX
CMPL R11, $0x40
JL memmove_lz4s_s2
JMP memmove_long_lz4s_s2
one_byte_lz4s_s2:
SHLB $0x02, R11
MOVB R11, (AX)
ADDQ $0x01, AX
memmove_lz4s_s2:
LEAQ (AX)(R9*1), R11
// genMemMoveShort
CMPQ R9, $0x08
JLE emit_lit_memmove_lz4s_s2_memmove_move_8
CMPQ R9, $0x10
JBE emit_lit_memmove_lz4s_s2_memmove_move_8through16
CMPQ R9, $0x20
JBE emit_lit_memmove_lz4s_s2_memmove_move_17through32
JMP emit_lit_memmove_lz4s_s2_memmove_move_33through64
emit_lit_memmove_lz4s_s2_memmove_move_8:
MOVQ (DX), R12
MOVQ R12, (AX)
JMP memmove_end_copy_lz4s_s2
emit_lit_memmove_lz4s_s2_memmove_move_8through16:
MOVQ (DX), R12
MOVQ -8(DX)(R9*1), DX
MOVQ R12, (AX)
MOVQ DX, -8(AX)(R9*1)
JMP memmove_end_copy_lz4s_s2
emit_lit_memmove_lz4s_s2_memmove_move_17through32:
MOVOU (DX), X0
MOVOU -16(DX)(R9*1), X1
MOVOU X0, (AX)
MOVOU X1, -16(AX)(R9*1)
JMP memmove_end_copy_lz4s_s2
emit_lit_memmove_lz4s_s2_memmove_move_33through64:
MOVOU (DX), X0
MOVOU 16(DX), X1
MOVOU -32(DX)(R9*1), X2
MOVOU -16(DX)(R9*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
MOVOU X2, -32(AX)(R9*1)
MOVOU X3, -16(AX)(R9*1)
memmove_end_copy_lz4s_s2:
MOVQ R11, AX
JMP lz4s_s2_lits_emit_done
memmove_long_lz4s_s2:
LEAQ (AX)(R9*1), R11
// genMemMoveLong
MOVOU (DX), X0
MOVOU 16(DX), X1
MOVOU -32(DX)(R9*1), X2
MOVOU -16(DX)(R9*1), X3
MOVQ R9, R13
SHRQ $0x05, R13
MOVQ AX, R12
ANDL $0x0000001f, R12
MOVQ $0x00000040, R14
SUBQ R12, R14
DECQ R13
JA emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32
LEAQ -32(DX)(R14*1), R12
LEAQ -32(AX)(R14*1), R15
emit_lit_memmove_long_lz4s_s2large_big_loop_back:
MOVOU (R12), X4
MOVOU 16(R12), X5
MOVOA X4, (R15)
MOVOA X5, 16(R15)
ADDQ $0x20, R15
ADDQ $0x20, R12
ADDQ $0x20, R14
DECQ R13
JNA emit_lit_memmove_long_lz4s_s2large_big_loop_back
emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32:
MOVOU -32(DX)(R14*1), X4
MOVOU -16(DX)(R14*1), X5
MOVOA X4, -32(AX)(R14*1)
MOVOA X5, -16(AX)(R14*1)
ADDQ $0x20, R14
CMPQ R9, R14
JAE emit_lit_memmove_long_lz4s_s2large_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
MOVOU X2, -32(AX)(R9*1)
MOVOU X3, -16(AX)(R9*1)
MOVQ R11, AX
lz4s_s2_lits_emit_done:
MOVQ R8, DX
lz4s_s2_lits_done:
CMPQ DX, BX
JNE lz4s_s2_match
CMPQ R10, $0x03
JEQ lz4s_s2_done
JMP lz4s_s2_corrupt
lz4s_s2_match:
CMPQ R10, $0x03
JEQ lz4s_s2_loop
LEAQ 2(DX), R8
CMPQ R8, BX
JAE lz4s_s2_corrupt
MOVWQZX (DX), R9
MOVQ R8, DX
TESTQ R9, R9
JZ lz4s_s2_corrupt
CMPQ R9, SI
JA lz4s_s2_corrupt
CMPQ R10, $0x12
JNE lz4s_s2_ml_done
lz4s_s2_ml_loop:
MOVBQZX (DX), R8
INCQ DX
ADDQ R8, R10
CMPQ DX, BX
JAE lz4s_s2_corrupt
CMPQ R8, $0xff
JEQ lz4s_s2_ml_loop
lz4s_s2_ml_done:
ADDQ R10, SI
CMPQ R9, DI
JNE lz4s_s2_docopy
// emitRepeat
emit_repeat_again_lz4_s2:
MOVL R10, R8
LEAL -4(R10), R10
CMPL R8, $0x08
JLE repeat_two_lz4_s2
CMPL R8, $0x0c
JGE cant_repeat_two_offset_lz4_s2
CMPL R9, $0x00000800
JLT repeat_two_offset_lz4_s2
cant_repeat_two_offset_lz4_s2:
CMPL R10, $0x00000104
JLT repeat_three_lz4_s2
CMPL R10, $0x00010100
JLT repeat_four_lz4_s2
CMPL R10, $0x0100ffff
JLT repeat_five_lz4_s2
LEAL -16842747(R10), R10
MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_lz4_s2
repeat_five_lz4_s2:
LEAL -65536(R10), R10
MOVL R10, R9
MOVW $0x001d, (AX)
MOVW R10, 2(AX)
SARL $0x10, R9
MOVB R9, 4(AX)
ADDQ $0x05, AX
JMP lz4s_s2_loop
repeat_four_lz4_s2:
LEAL -256(R10), R10
MOVW $0x0019, (AX)
MOVW R10, 2(AX)
ADDQ $0x04, AX
JMP lz4s_s2_loop
repeat_three_lz4_s2:
LEAL -4(R10), R10
MOVW $0x0015, (AX)
MOVB R10, 2(AX)
ADDQ $0x03, AX
JMP lz4s_s2_loop
repeat_two_lz4_s2:
SHLL $0x02, R10
ORL $0x01, R10
MOVW R10, (AX)
ADDQ $0x02, AX
JMP lz4s_s2_loop
repeat_two_offset_lz4_s2:
XORQ R8, R8
LEAL 1(R8)(R10*4), R10
MOVB R9, 1(AX)
SARL $0x08, R9
SHLL $0x05, R9
ORL R9, R10
MOVB R10, (AX)
ADDQ $0x02, AX
JMP lz4s_s2_loop
lz4s_s2_docopy:
MOVQ R9, DI
// emitCopy
CMPL R10, $0x40
JLE two_byte_offset_short_lz4_s2
CMPL R9, $0x00000800
JAE long_offset_short_lz4_s2
MOVL $0x00000001, R8
LEAL 16(R8), R8
MOVB R9, 1(AX)
MOVL R9, R11
SHRL $0x08, R11
SHLL $0x05, R11
ORL R11, R8
MOVB R8, (AX)
ADDQ $0x02, AX
SUBL $0x08, R10
// emitRepeat
LEAL -4(R10), R10
JMP cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
emit_repeat_again_lz4_s2_emit_copy_short_2b:
MOVL R10, R8
LEAL -4(R10), R10
CMPL R8, $0x08
JLE repeat_two_lz4_s2_emit_copy_short_2b
CMPL R8, $0x0c
JGE cant_repeat_two_offset_lz4_s2_emit_copy_short_2b
CMPL R9, $0x00000800
JLT repeat_two_offset_lz4_s2_emit_copy_short_2b
cant_repeat_two_offset_lz4_s2_emit_copy_short_2b:
CMPL R10, $0x00000104
JLT repeat_three_lz4_s2_emit_copy_short_2b
CMPL R10, $0x00010100
JLT repeat_four_lz4_s2_emit_copy_short_2b
CMPL R10, $0x0100ffff
JLT repeat_five_lz4_s2_emit_copy_short_2b
LEAL -16842747(R10), R10
MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_lz4_s2_emit_copy_short_2b
repeat_five_lz4_s2_emit_copy_short_2b:
LEAL -65536(R10), R10
MOVL R10, R9
MOVW $0x001d, (AX)
MOVW R10, 2(AX)
SARL $0x10, R9
MOVB R9, 4(AX)
ADDQ $0x05, AX
JMP lz4s_s2_loop
repeat_four_lz4_s2_emit_copy_short_2b:
LEAL -256(R10), R10
MOVW $0x0019, (AX)
MOVW R10, 2(AX)
ADDQ $0x04, AX
JMP lz4s_s2_loop
repeat_three_lz4_s2_emit_copy_short_2b:
LEAL -4(R10), R10
MOVW $0x0015, (AX)
MOVB R10, 2(AX)
ADDQ $0x03, AX
JMP lz4s_s2_loop
repeat_two_lz4_s2_emit_copy_short_2b:
SHLL $0x02, R10
ORL $0x01, R10
MOVW R10, (AX)
ADDQ $0x02, AX
JMP lz4s_s2_loop
repeat_two_offset_lz4_s2_emit_copy_short_2b:
XORQ R8, R8
LEAL 1(R8)(R10*4), R10
MOVB R9, 1(AX)
SARL $0x08, R9
SHLL $0x05, R9
ORL R9, R10
MOVB R10, (AX)
ADDQ $0x02, AX
JMP lz4s_s2_loop
long_offset_short_lz4_s2:
MOVB $0xee, (AX)
MOVW R9, 1(AX)
LEAL -60(R10), R10
ADDQ $0x03, AX
// emitRepeat
emit_repeat_again_lz4_s2_emit_copy_short:
MOVL R10, R8
LEAL -4(R10), R10
CMPL R8, $0x08
JLE repeat_two_lz4_s2_emit_copy_short
CMPL R8, $0x0c
JGE cant_repeat_two_offset_lz4_s2_emit_copy_short
CMPL R9, $0x00000800
JLT repeat_two_offset_lz4_s2_emit_copy_short
cant_repeat_two_offset_lz4_s2_emit_copy_short:
CMPL R10, $0x00000104
JLT repeat_three_lz4_s2_emit_copy_short
CMPL R10, $0x00010100
JLT repeat_four_lz4_s2_emit_copy_short
CMPL R10, $0x0100ffff
JLT repeat_five_lz4_s2_emit_copy_short
LEAL -16842747(R10), R10
MOVL $0xfffb001d, (AX)
MOVB $0xff, 4(AX)
ADDQ $0x05, AX
JMP emit_repeat_again_lz4_s2_emit_copy_short
repeat_five_lz4_s2_emit_copy_short:
LEAL -65536(R10), R10
MOVL R10, R9
MOVW $0x001d, (AX)
MOVW R10, 2(AX)
SARL $0x10, R9
MOVB R9, 4(AX)
ADDQ $0x05, AX
JMP lz4s_s2_loop
repeat_four_lz4_s2_emit_copy_short:
LEAL -256(R10), R10
MOVW $0x0019, (AX)
MOVW R10, 2(AX)
ADDQ $0x04, AX
JMP lz4s_s2_loop
repeat_three_lz4_s2_emit_copy_short:
LEAL -4(R10), R10
MOVW $0x0015, (AX)
MOVB R10, 2(AX)
ADDQ $0x03, AX
JMP lz4s_s2_loop
repeat_two_lz4_s2_emit_copy_short:
SHLL $0x02, R10
ORL $0x01, R10
MOVW R10, (AX)
ADDQ $0x02, AX
JMP lz4s_s2_loop
repeat_two_offset_lz4_s2_emit_copy_short:
XORQ R8, R8
LEAL 1(R8)(R10*4), R10
MOVB R9, 1(AX)
SARL $0x08, R9
SHLL $0x05, R9
ORL R9, R10
MOVB R10, (AX)
ADDQ $0x02, AX
JMP lz4s_s2_loop
two_byte_offset_short_lz4_s2:
MOVL R10, R8
SHLL $0x02, R8
CMPL R10, $0x0c
JGE emit_copy_three_lz4_s2
CMPL R9, $0x00000800
JGE emit_copy_three_lz4_s2
LEAL -15(R8), R8
MOVB R9, 1(AX)
SHRL $0x08, R9
SHLL $0x05, R9
ORL R9, R8
MOVB R8, (AX)
ADDQ $0x02, AX
JMP lz4s_s2_loop
emit_copy_three_lz4_s2:
LEAL -2(R8), R8
MOVB R8, (AX)
MOVW R9, 1(AX)
ADDQ $0x03, AX
JMP lz4s_s2_loop
lz4s_s2_done:
MOVQ dst_base+0(FP), CX
SUBQ CX, AX
MOVQ SI, uncompressed+48(FP)
MOVQ AX, dstUsed+56(FP)
RET
lz4s_s2_corrupt:
XORQ AX, AX
LEAQ -1(AX), SI
MOVQ SI, uncompressed+48(FP)
RET
lz4s_s2_dstfull:
XORQ AX, AX
LEAQ -2(AX), SI
MOVQ SI, uncompressed+48(FP)
RET
// func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int) // func cvtLZ4BlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
// Requires: SSE2 // Requires: SSE2
TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64 TEXT ·cvtLZ4BlockSnappyAsm(SB), NOSPLIT, $0-64
@ -19536,3 +20021,271 @@ lz4_snappy_dstfull:
LEAQ -2(AX), SI LEAQ -2(AX), SI
MOVQ SI, uncompressed+48(FP) MOVQ SI, uncompressed+48(FP)
RET RET
// func cvtLZ4sBlockSnappyAsm(dst []byte, src []byte) (uncompressed int, dstUsed int)
// Requires: SSE2
TEXT ·cvtLZ4sBlockSnappyAsm(SB), NOSPLIT, $0-64
XORQ SI, SI
MOVQ dst_base+0(FP), AX
MOVQ dst_len+8(FP), CX
MOVQ src_base+24(FP), DX
MOVQ src_len+32(FP), BX
LEAQ (DX)(BX*1), BX
LEAQ -10(AX)(CX*1), CX
lz4s_snappy_loop:
CMPQ DX, BX
JAE lz4s_snappy_corrupt
CMPQ AX, CX
JAE lz4s_snappy_dstfull
MOVBQZX (DX), DI
MOVQ DI, R8
MOVQ DI, R9
SHRQ $0x04, R8
ANDQ $0x0f, R9
CMPQ DI, $0xf0
JB lz4s_snappy_ll_end
lz4s_snappy_ll_loop:
INCQ DX
CMPQ DX, BX
JAE lz4s_snappy_corrupt
MOVBQZX (DX), DI
ADDQ DI, R8
CMPQ DI, $0xff
JEQ lz4s_snappy_ll_loop
lz4s_snappy_ll_end:
LEAQ (DX)(R8*1), DI
ADDQ $0x03, R9
CMPQ DI, BX
JAE lz4s_snappy_corrupt
INCQ DX
INCQ DI
TESTQ R8, R8
JZ lz4s_snappy_lits_done
LEAQ (AX)(R8*1), R10
CMPQ R10, CX
JAE lz4s_snappy_dstfull
ADDQ R8, SI
LEAL -1(R8), R10
CMPL R10, $0x3c
JLT one_byte_lz4s_snappy
CMPL R10, $0x00000100
JLT two_bytes_lz4s_snappy
CMPL R10, $0x00010000
JLT three_bytes_lz4s_snappy
CMPL R10, $0x01000000
JLT four_bytes_lz4s_snappy
MOVB $0xfc, (AX)
MOVL R10, 1(AX)
ADDQ $0x05, AX
JMP memmove_long_lz4s_snappy
four_bytes_lz4s_snappy:
MOVL R10, R11
SHRL $0x10, R11
MOVB $0xf8, (AX)
MOVW R10, 1(AX)
MOVB R11, 3(AX)
ADDQ $0x04, AX
JMP memmove_long_lz4s_snappy
three_bytes_lz4s_snappy:
MOVB $0xf4, (AX)
MOVW R10, 1(AX)
ADDQ $0x03, AX
JMP memmove_long_lz4s_snappy
two_bytes_lz4s_snappy:
MOVB $0xf0, (AX)
MOVB R10, 1(AX)
ADDQ $0x02, AX
CMPL R10, $0x40
JL memmove_lz4s_snappy
JMP memmove_long_lz4s_snappy
one_byte_lz4s_snappy:
SHLB $0x02, R10
MOVB R10, (AX)
ADDQ $0x01, AX
memmove_lz4s_snappy:
LEAQ (AX)(R8*1), R10
// genMemMoveShort
CMPQ R8, $0x08
JLE emit_lit_memmove_lz4s_snappy_memmove_move_8
CMPQ R8, $0x10
JBE emit_lit_memmove_lz4s_snappy_memmove_move_8through16
CMPQ R8, $0x20
JBE emit_lit_memmove_lz4s_snappy_memmove_move_17through32
JMP emit_lit_memmove_lz4s_snappy_memmove_move_33through64
emit_lit_memmove_lz4s_snappy_memmove_move_8:
MOVQ (DX), R11
MOVQ R11, (AX)
JMP memmove_end_copy_lz4s_snappy
emit_lit_memmove_lz4s_snappy_memmove_move_8through16:
MOVQ (DX), R11
MOVQ -8(DX)(R8*1), DX
MOVQ R11, (AX)
MOVQ DX, -8(AX)(R8*1)
JMP memmove_end_copy_lz4s_snappy
emit_lit_memmove_lz4s_snappy_memmove_move_17through32:
MOVOU (DX), X0
MOVOU -16(DX)(R8*1), X1
MOVOU X0, (AX)
MOVOU X1, -16(AX)(R8*1)
JMP memmove_end_copy_lz4s_snappy
emit_lit_memmove_lz4s_snappy_memmove_move_33through64:
MOVOU (DX), X0
MOVOU 16(DX), X1
MOVOU -32(DX)(R8*1), X2
MOVOU -16(DX)(R8*1), X3
MOVOU X0, (AX)
MOVOU X1, 16(AX)
MOVOU X2, -32(AX)(R8*1)
MOVOU X3, -16(AX)(R8*1)
memmove_end_copy_lz4s_snappy:
MOVQ R10, AX
JMP lz4s_snappy_lits_emit_done
memmove_long_lz4s_snappy:
LEAQ (AX)(R8*1), R10
// genMemMoveLong
MOVOU (DX), X0
MOVOU 16(DX), X1
MOVOU -32(DX)(R8*1), X2
MOVOU -16(DX)(R8*1), X3
MOVQ R8, R12
SHRQ $0x05, R12
MOVQ AX, R11
ANDL $0x0000001f, R11
MOVQ $0x00000040, R13
SUBQ R11, R13
DECQ R12
JA emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32
LEAQ -32(DX)(R13*1), R11
LEAQ -32(AX)(R13*1), R14
emit_lit_memmove_long_lz4s_snappylarge_big_loop_back:
MOVOU (R11), X4
MOVOU 16(R11), X5
MOVOA X4, (R14)
MOVOA X5, 16(R14)
ADDQ $0x20, R14
ADDQ $0x20, R11
ADDQ $0x20, R13
DECQ R12
JNA emit_lit_memmove_long_lz4s_snappylarge_big_loop_back
emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32:
MOVOU -32(DX)(R13*1), X4
MOVOU -16(DX)(R13*1), X5
MOVOA X4, -32(AX)(R13*1)
MOVOA X5, -16(AX)(R13*1)
ADDQ $0x20, R13
CMPQ R8, R13
JAE emit_lit_memmove_long_lz4s_snappylarge_forward_sse_loop_32
MOVOU X0, (AX)
MOVOU X1, 16(AX)
MOVOU X2, -32(AX)(R8*1)
MOVOU X3, -16(AX)(R8*1)
MOVQ R10, AX
lz4s_snappy_lits_emit_done:
MOVQ DI, DX
lz4s_snappy_lits_done:
CMPQ DX, BX
JNE lz4s_snappy_match
CMPQ R9, $0x03
JEQ lz4s_snappy_done
JMP lz4s_snappy_corrupt
lz4s_snappy_match:
CMPQ R9, $0x03
JEQ lz4s_snappy_loop
LEAQ 2(DX), DI
CMPQ DI, BX
JAE lz4s_snappy_corrupt
MOVWQZX (DX), R8
MOVQ DI, DX
TESTQ R8, R8
JZ lz4s_snappy_corrupt
CMPQ R8, SI
JA lz4s_snappy_corrupt
CMPQ R9, $0x12
JNE lz4s_snappy_ml_done
lz4s_snappy_ml_loop:
MOVBQZX (DX), DI
INCQ DX
ADDQ DI, R9
CMPQ DX, BX
JAE lz4s_snappy_corrupt
CMPQ DI, $0xff
JEQ lz4s_snappy_ml_loop
lz4s_snappy_ml_done:
ADDQ R9, SI
// emitCopy
two_byte_offset_lz4_s2:
CMPL R9, $0x40
JLE two_byte_offset_short_lz4_s2
MOVB $0xee, (AX)
MOVW R8, 1(AX)
LEAL -60(R9), R9
ADDQ $0x03, AX
CMPQ AX, CX
JAE lz4s_snappy_loop
JMP two_byte_offset_lz4_s2
two_byte_offset_short_lz4_s2:
MOVL R9, DI
SHLL $0x02, DI
CMPL R9, $0x0c
JGE emit_copy_three_lz4_s2
CMPL R8, $0x00000800
JGE emit_copy_three_lz4_s2
LEAL -15(DI), DI
MOVB R8, 1(AX)
SHRL $0x08, R8
SHLL $0x05, R8
ORL R8, DI
MOVB DI, (AX)
ADDQ $0x02, AX
JMP lz4s_snappy_loop
emit_copy_three_lz4_s2:
LEAL -2(DI), DI
MOVB DI, (AX)
MOVW R8, 1(AX)
ADDQ $0x03, AX
JMP lz4s_snappy_loop
lz4s_snappy_done:
MOVQ dst_base+0(FP), CX
SUBQ CX, AX
MOVQ SI, uncompressed+48(FP)
MOVQ AX, dstUsed+56(FP)
RET
lz4s_snappy_corrupt:
XORQ AX, AX
LEAQ -1(AX), SI
MOVQ SI, uncompressed+48(FP)
RET
lz4s_snappy_dstfull:
XORQ AX, AX
LEAQ -2(AX), SI
MOVQ SI, uncompressed+48(FP)
RET

467
vendor/github.com/klauspost/compress/s2/lz4sconvert.go generated vendored Normal file
View file

@ -0,0 +1,467 @@
// Copyright (c) 2022 Klaus Post. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package s2
import (
"encoding/binary"
"fmt"
)
// LZ4sConverter provides conversion from LZ4s.
// (Intel modified LZ4 Blocks)
// https://cdrdv2-public.intel.com/743912/743912-qat-programmers-guide-v2.0.pdf
// LZ4s is a variant of LZ4 block format. LZ4s should be considered as an intermediate compressed block format.
// The LZ4s format is selected when the application sets the compType to CPA_DC_LZ4S in CpaDcSessionSetupData.
// The LZ4s block returned by the Intel® QAT hardware can be used by an external
// software post-processing to generate other compressed data formats.
// The following table lists the differences between LZ4 and LZ4s block format. LZ4s block format uses
// the same high-level formatting as LZ4 block format with the following encoding changes:
// For Min Match of 4 bytes, Copy length value 1-15 means length 4-18 with 18 bytes adding an extra byte.
// ONLY "Min match of 4 bytes" is supported.
type LZ4sConverter struct {
}
// ConvertBlock will convert an LZ4s block and append it as an S2
// block without block length to dst.
// The uncompressed size is returned as well.
// dst must have capacity to contain the entire compressed block.
func (l *LZ4sConverter) ConvertBlock(dst, src []byte) ([]byte, int, error) {
if len(src) == 0 {
return dst, 0, nil
}
const debug = false
const inline = true
const lz4MinMatch = 3
s, d := 0, len(dst)
dst = dst[:cap(dst)]
if !debug && hasAmd64Asm {
res, sz := cvtLZ4sBlockAsm(dst[d:], src)
if res < 0 {
const (
errCorrupt = -1
errDstTooSmall = -2
)
switch res {
case errCorrupt:
return nil, 0, ErrCorrupt
case errDstTooSmall:
return nil, 0, ErrDstTooSmall
default:
return nil, 0, fmt.Errorf("unexpected result: %d", res)
}
}
if d+sz > len(dst) {
return nil, 0, ErrDstTooSmall
}
return dst[:d+sz], res, nil
}
dLimit := len(dst) - 10
var lastOffset uint16
var uncompressed int
if debug {
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
}
for {
if s >= len(src) {
return dst[:d], 0, ErrCorrupt
}
// Read literal info
token := src[s]
ll := int(token >> 4)
ml := int(lz4MinMatch + (token & 0xf))
// If upper nibble is 15, literal length is extended
if token >= 0xf0 {
for {
s++
if s >= len(src) {
if debug {
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
}
return dst[:d], 0, ErrCorrupt
}
val := src[s]
ll += int(val)
if val != 255 {
break
}
}
}
// Skip past token
if s+ll >= len(src) {
if debug {
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
}
return nil, 0, ErrCorrupt
}
s++
if ll > 0 {
if d+ll > dLimit {
return nil, 0, ErrDstTooSmall
}
if debug {
fmt.Printf("emit %d literals\n", ll)
}
d += emitLiteralGo(dst[d:], src[s:s+ll])
s += ll
uncompressed += ll
}
// Check if we are done...
if ml == lz4MinMatch {
if s == len(src) {
break
}
// 0 bytes.
continue
}
// 2 byte offset
if s >= len(src)-2 {
if debug {
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
}
return nil, 0, ErrCorrupt
}
offset := binary.LittleEndian.Uint16(src[s:])
s += 2
if offset == 0 {
if debug {
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
}
return nil, 0, ErrCorrupt
}
if int(offset) > uncompressed {
if debug {
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
}
return nil, 0, ErrCorrupt
}
if ml == lz4MinMatch+15 {
for {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
s++
ml += int(val)
if val != 255 {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
break
}
}
}
if offset == lastOffset {
if debug {
fmt.Printf("emit repeat, length: %d, offset: %d\n", ml, offset)
}
if !inline {
d += emitRepeat16(dst[d:], offset, ml)
} else {
length := ml
dst := dst[d:]
for len(dst) > 5 {
// Repeat offset, make length cheaper
length -= 4
if length <= 4 {
dst[0] = uint8(length)<<2 | tagCopy1
dst[1] = 0
d += 2
break
}
if length < 8 && offset < 2048 {
// Encode WITH offset
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length)<<2 | tagCopy1
d += 2
break
}
if length < (1<<8)+4 {
length -= 4
dst[2] = uint8(length)
dst[1] = 0
dst[0] = 5<<2 | tagCopy1
d += 3
break
}
if length < (1<<16)+(1<<8) {
length -= 1 << 8
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 6<<2 | tagCopy1
d += 4
break
}
const maxRepeat = (1 << 24) - 1
length -= 1 << 16
left := 0
if length > maxRepeat {
left = length - maxRepeat + 4
length = maxRepeat - 4
}
dst[4] = uint8(length >> 16)
dst[3] = uint8(length >> 8)
dst[2] = uint8(length >> 0)
dst[1] = 0
dst[0] = 7<<2 | tagCopy1
if left > 0 {
d += 5 + emitRepeat16(dst[5:], offset, left)
break
}
d += 5
break
}
}
} else {
if debug {
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
}
if !inline {
d += emitCopy16(dst[d:], offset, ml)
} else {
length := ml
dst := dst[d:]
for len(dst) > 5 {
// Offset no more than 2 bytes.
if length > 64 {
off := 3
if offset < 2048 {
// emit 8 bytes as tagCopy1, rest as repeats.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(8-4)<<2 | tagCopy1
length -= 8
off = 2
} else {
// Emit a length 60 copy, encoded as 3 bytes.
// Emit remaining as repeat value (minimum 4 bytes).
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = 59<<2 | tagCopy2
length -= 60
}
// Emit remaining as repeats, at least 4 bytes remain.
d += off + emitRepeat16(dst[off:], offset, length)
break
}
if length >= 12 || offset >= 2048 {
// Emit the remaining copy, encoded as 3 bytes.
dst[2] = uint8(offset >> 8)
dst[1] = uint8(offset)
dst[0] = uint8(length-1)<<2 | tagCopy2
d += 3
break
}
// Emit the remaining copy, encoded as 2 bytes.
dst[1] = uint8(offset)
dst[0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
d += 2
break
}
}
lastOffset = offset
}
uncompressed += ml
if d > dLimit {
return nil, 0, ErrDstTooSmall
}
}
return dst[:d], uncompressed, nil
}
// ConvertBlockSnappy will convert an LZ4s block and append it
// as a Snappy block without block length to dst.
// The uncompressed size is returned as well.
// dst must have capacity to contain the entire compressed block.
func (l *LZ4sConverter) ConvertBlockSnappy(dst, src []byte) ([]byte, int, error) {
if len(src) == 0 {
return dst, 0, nil
}
const debug = false
const lz4MinMatch = 3
s, d := 0, len(dst)
dst = dst[:cap(dst)]
// Use assembly when possible
if !debug && hasAmd64Asm {
res, sz := cvtLZ4sBlockSnappyAsm(dst[d:], src)
if res < 0 {
const (
errCorrupt = -1
errDstTooSmall = -2
)
switch res {
case errCorrupt:
return nil, 0, ErrCorrupt
case errDstTooSmall:
return nil, 0, ErrDstTooSmall
default:
return nil, 0, fmt.Errorf("unexpected result: %d", res)
}
}
if d+sz > len(dst) {
return nil, 0, ErrDstTooSmall
}
return dst[:d+sz], res, nil
}
dLimit := len(dst) - 10
var uncompressed int
if debug {
fmt.Printf("convert block start: len(src): %d, len(dst):%d \n", len(src), len(dst))
}
for {
if s >= len(src) {
return nil, 0, ErrCorrupt
}
// Read literal info
token := src[s]
ll := int(token >> 4)
ml := int(lz4MinMatch + (token & 0xf))
// If upper nibble is 15, literal length is extended
if token >= 0xf0 {
for {
s++
if s >= len(src) {
if debug {
fmt.Printf("error reading ll: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
ll += int(val)
if val != 255 {
break
}
}
}
// Skip past token
if s+ll >= len(src) {
if debug {
fmt.Printf("error literals: s+ll (%d+%d) >= len(src) (%d)\n", s, ll, len(src))
}
return nil, 0, ErrCorrupt
}
s++
if ll > 0 {
if d+ll > dLimit {
return nil, 0, ErrDstTooSmall
}
if debug {
fmt.Printf("emit %d literals\n", ll)
}
d += emitLiteralGo(dst[d:], src[s:s+ll])
s += ll
uncompressed += ll
}
// Check if we are done...
if ml == lz4MinMatch {
if s == len(src) {
break
}
// 0 bytes.
continue
}
// 2 byte offset
if s >= len(src)-2 {
if debug {
fmt.Printf("s (%d) >= len(src)-2 (%d)", s, len(src)-2)
}
return nil, 0, ErrCorrupt
}
offset := binary.LittleEndian.Uint16(src[s:])
s += 2
if offset == 0 {
if debug {
fmt.Printf("error: offset 0, ml: %d, len(src)-s: %d\n", ml, len(src)-s)
}
return nil, 0, ErrCorrupt
}
if int(offset) > uncompressed {
if debug {
fmt.Printf("error: offset (%d)> uncompressed (%d)\n", offset, uncompressed)
}
return nil, 0, ErrCorrupt
}
if ml == lz4MinMatch+15 {
for {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
val := src[s]
s++
ml += int(val)
if val != 255 {
if s >= len(src) {
if debug {
fmt.Printf("error reading ml: s (%d) >= len(src) (%d)\n", s, len(src))
}
return nil, 0, ErrCorrupt
}
break
}
}
}
if debug {
fmt.Printf("emit copy, length: %d, offset: %d\n", ml, offset)
}
length := ml
// d += emitCopyNoRepeat(dst[d:], int(offset), ml)
for length > 0 {
if d >= dLimit {
return nil, 0, ErrDstTooSmall
}
// Offset no more than 2 bytes.
if length > 64 {
// Emit a length 64 copy, encoded as 3 bytes.
dst[d+2] = uint8(offset >> 8)
dst[d+1] = uint8(offset)
dst[d+0] = 63<<2 | tagCopy2
length -= 64
d += 3
continue
}
if length >= 12 || offset >= 2048 || length < 4 {
// Emit the remaining copy, encoded as 3 bytes.
dst[d+2] = uint8(offset >> 8)
dst[d+1] = uint8(offset)
dst[d+0] = uint8(length-1)<<2 | tagCopy2
d += 3
break
}
// Emit the remaining copy, encoded as 2 bytes.
dst[d+1] = uint8(offset)
dst[d+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
d += 2
break
}
uncompressed += ml
if d > dLimit {
return nil, 0, ErrDstTooSmall
}
}
return dst[:d], uncompressed, nil
}

View file

@ -9,6 +9,7 @@ import (
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
"hash/crc32"
"io" "io"
"os" "os"
"path/filepath" "path/filepath"
@ -442,6 +443,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err
} }
} }
var err error var err error
if debugDecoder {
println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
}
huff, literals, err = huff0.ReadTable(literals, huff) huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil { if err != nil {
println("reading huffman table:", err) println("reading huffman table:", err)

View file

@ -54,7 +54,7 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) {
func (b *byteBuf) readByte() (byte, error) { func (b *byteBuf) readByte() (byte, error) {
bb := *b bb := *b
if len(bb) < 1 { if len(bb) < 1 {
return 0, nil return 0, io.ErrUnexpectedEOF
} }
r := bb[0] r := bb[0]
*b = bb[1:] *b = bb[1:]

View file

@ -32,7 +32,6 @@ type match struct {
length int32 length int32
rep int32 rep int32
est int32 est int32
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
} }
const highScore = 25000 const highScore = 25000
@ -189,12 +188,6 @@ encodeLoop:
panic("offset0 was 0") panic("offset0 was 0")
} }
bestOf := func(a, b *match) *match {
if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
return a
}
return b
}
const goodEnough = 100 const goodEnough = 100
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
@ -202,40 +195,41 @@ encodeLoop:
candidateL := e.longTable[nextHashL] candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS] candidateS := e.table[nextHashS]
matchAt := func(offset int32, s int32, first uint32, rep int32) match { // Set m to a match at offset if it looks like that will improve compression.
improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
if s-offset >= e.maxMatchOff || load3232(src, offset) != first { if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
return match{s: s, est: highScore} return
} }
if debugAsserts { if debugAsserts {
if !bytes.Equal(src[s:s+4], src[offset:offset+4]) { if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
} }
} }
m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep} cand := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
m.estBits(bitsPerByte) cand.estBits(bitsPerByte)
return m if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
*m = cand
}
} }
m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) best := match{s: s, est: highScore}
m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
if canRepeat && best.length < goodEnough { if canRepeat && best.length < goodEnough {
cv32 := uint32(cv >> 8) cv32 := uint32(cv >> 8)
spp := s + 1 spp := s + 1
m1 := matchAt(spp-offset1, spp, cv32, 1) improve(&best, spp-offset1, spp, cv32, 1)
m2 := matchAt(spp-offset2, spp, cv32, 2) improve(&best, spp-offset2, spp, cv32, 2)
m3 := matchAt(spp-offset3, spp, cv32, 3) improve(&best, spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
if best.length > 0 { if best.length > 0 {
cv32 = uint32(cv >> 24) cv32 = uint32(cv >> 24)
spp += 2 spp += 2
m1 := matchAt(spp-offset1, spp, cv32, 1) improve(&best, spp-offset1, spp, cv32, 1)
m2 := matchAt(spp-offset2, spp, cv32, 2) improve(&best, spp-offset2, spp, cv32, 2)
m3 := matchAt(spp-offset3, spp, cv32, 3) improve(&best, spp-offset3, spp, cv32, 3)
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
} }
} }
// Load next and check... // Load next and check...
@ -262,18 +256,16 @@ encodeLoop:
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1 // Short at s+1
m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
// Long at s+1, s+2 // Long at s+1, s+2
m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) improve(&best, candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) improve(&best, candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
if false { if false {
// Short at s+3. // Short at s+3.
// Too often worse... // Too often worse...
m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
best = bestOf(best, &m)
} }
// See if we can find a better match by checking where the current best ends. // See if we can find a better match by checking where the current best ends.
// Use that offset to see if we can find a better full match. // Use that offset to see if we can find a better full match.
@ -284,13 +276,10 @@ encodeLoop:
// For this compression level 2 yields the best results. // For this compression level 2 yields the best results.
const skipBeginning = 2 const skipBeginning = 2
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd := bestOf(best, &m)
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) improve(&best, pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
bestEnd = bestOf(bestEnd, &m)
} }
best = bestEnd
} }
} }
} }

View file

@ -314,9 +314,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
} }
size := ll + ml + len(out) size := ll + ml + len(out)
if size-startSize > maxBlockSize { if size-startSize > maxBlockSize {
if size-startSize == 424242 {
panic("here")
}
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
} }
if size > cap(out) { if size > cap(out) {
@ -427,8 +424,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
} }
} }
// Check if space for literals if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
} }

View file

@ -148,7 +148,6 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
s.seqSize += ctx.litRemain s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize { if s.seqSize > maxBlockSize {
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
} }
err := br.close() err := br.close()
if err != nil { if err != nil {

View file

@ -88,15 +88,15 @@ func parseNumber(input []byte) number {
neg = true neg = true
s = s[1:] s = s[1:]
size++ size++
if len(s) == 0 {
return number{}
}
// Consume any whitespace or comments between the // Consume any whitespace or comments between the
// negative sign and the rest of the number // negative sign and the rest of the number
lenBefore := len(s) lenBefore := len(s)
s = consume(s, 0) s = consume(s, 0)
sep = lenBefore - len(s) sep = lenBefore - len(s)
size += sep size += sep
if len(s) == 0 {
return number{}
}
} }
switch { switch {

View file

@ -52,7 +52,7 @@ import (
const ( const (
Major = 1 Major = 1
Minor = 29 Minor = 29
Patch = 0 Patch = 1
PreRelease = "" PreRelease = ""
) )

18
vendor/modules.txt vendored
View file

@ -81,7 +81,7 @@ github.com/VividCortex/ewma
# github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 # github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
## explicit; go 1.15 ## explicit; go 1.15
github.com/alecthomas/units github.com/alecthomas/units
# github.com/aws/aws-sdk-go v1.44.219 # github.com/aws/aws-sdk-go v1.44.221
## explicit; go 1.11 ## explicit; go 1.11
github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/awserr
@ -150,10 +150,10 @@ github.com/aws/aws-sdk-go-v2/internal/timeconv
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi
# github.com/aws/aws-sdk-go-v2/config v1.18.16 # github.com/aws/aws-sdk-go-v2/config v1.18.17
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/config github.com/aws/aws-sdk-go-v2/config
# github.com/aws/aws-sdk-go-v2/credentials v1.13.16 # github.com/aws/aws-sdk-go-v2/credentials v1.13.17
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials
github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds
@ -162,11 +162,11 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client
github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/processcreds
github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds
github.com/aws/aws-sdk-go-v2/credentials/stscreds github.com/aws/aws-sdk-go-v2/credentials/stscreds
# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.24 # github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.0
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds
github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config
# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.56 # github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.57
## explicit; go 1.15 ## explicit; go 1.15
github.com/aws/aws-sdk-go-v2/feature/s3/manager github.com/aws/aws-sdk-go-v2/feature/s3/manager
# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30 # github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.30
@ -259,7 +259,7 @@ github.com/davecgh/go-spew/spew
# github.com/dennwc/varint v1.0.0 # github.com/dennwc/varint v1.0.0
## explicit; go 1.12 ## explicit; go 1.12
github.com/dennwc/varint github.com/dennwc/varint
# github.com/fatih/color v1.14.1 # github.com/fatih/color v1.15.0
## explicit; go 1.17 ## explicit; go 1.17
github.com/fatih/color github.com/fatih/color
# github.com/felixge/httpsnoop v1.0.3 # github.com/felixge/httpsnoop v1.0.3
@ -338,7 +338,7 @@ github.com/jmespath/go-jmespath
# github.com/jpillora/backoff v1.0.0 # github.com/jpillora/backoff v1.0.0
## explicit; go 1.13 ## explicit; go 1.13
github.com/jpillora/backoff github.com/jpillora/backoff
# github.com/klauspost/compress v1.16.0 # github.com/klauspost/compress v1.16.3
## explicit; go 1.18 ## explicit; go 1.18
github.com/klauspost/compress github.com/klauspost/compress
github.com/klauspost/compress/flate github.com/klauspost/compress/flate
@ -528,7 +528,7 @@ go.uber.org/atomic
## explicit; go 1.18 ## explicit; go 1.18
go.uber.org/goleak go.uber.org/goleak
go.uber.org/goleak/internal/stack go.uber.org/goleak/internal/stack
# golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 # golang.org/x/exp v0.0.0-20230314191032-db074128a8ec
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/exp/constraints golang.org/x/exp/constraints
golang.org/x/exp/slices golang.org/x/exp/slices
@ -680,7 +680,7 @@ google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats google.golang.org/grpc/stats
google.golang.org/grpc/status google.golang.org/grpc/status
google.golang.org/grpc/tap google.golang.org/grpc/tap
# google.golang.org/protobuf v1.29.0 # google.golang.org/protobuf v1.29.1
## explicit; go 1.11 ## explicit; go 1.11
google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/prototext