Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2022-05-07 02:02:31 +03:00
commit 2685992ca9
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
69 changed files with 885 additions and 573 deletions

View file

@ -151,7 +151,7 @@ func (c *client) init(argIdx, concurrency int, sanitizedURL string) {
logger.Infof("applying %d bytes per second rate limit for -remoteWrite.url=%q", bytesPerSec, sanitizedURL)
c.rl.perSecondLimit = int64(bytesPerSec)
}
c.rl.limitReached = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remote_write_rate_limit_reached_total{url=%q}`, c.sanitizedURL))
c.rl.limitReached = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_rate_limit_reached_total{url=%q}`, c.sanitizedURL))
c.bytesSent = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_bytes_sent_total{url=%q}`, c.sanitizedURL))
c.blocksSent = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_blocks_sent_total{url=%q}`, c.sanitizedURL))

View file

@ -274,6 +274,8 @@ func PushWithAuthToken(at *auth.Token, wr *prompbmarshal.WriteRequest) {
rctx = getRelabelCtx()
}
tss := wr.Timeseries
rowsCount := getRowsCount(tss)
globalRowsPushedBeforeRelabel.Add(rowsCount)
maxSamplesPerBlock := *maxRowsPerBlock
// Allow up to 10x of labels per each block on average.
maxLabelsPerBlock := 10 * maxSamplesPerBlock
@ -298,9 +300,10 @@ func PushWithAuthToken(at *auth.Token, wr *prompbmarshal.WriteRequest) {
tss = nil
}
if rctx != nil {
tssBlockLen := len(tssBlock)
rowsCountBeforeRelabel := getRowsCount(tssBlock)
tssBlock = rctx.applyRelabeling(tssBlock, labelsGlobal, pcsGlobal)
globalRelabelMetricsDropped.Add(tssBlockLen - len(tssBlock))
rowsCountAfterRelabel := getRowsCount(tssBlock)
rowsDroppedByGlobalRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel)
}
sortLabelsIfNeeded(tssBlock)
tssBlock = limitSeriesCardinality(tssBlock)
@ -414,7 +417,10 @@ func labelsToString(labels []prompbmarshal.Label) string {
return string(b)
}
var globalRelabelMetricsDropped = metrics.NewCounter("vmagent_remotewrite_global_relabel_metrics_dropped_total")
var (
globalRowsPushedBeforeRelabel = metrics.NewCounter("vmagent_remotewrite_global_rows_pushed_before_relabel_total")
rowsDroppedByGlobalRelabel = metrics.NewCounter("vmagent_remotewrite_global_relabel_metrics_dropped_total")
)
type remoteWriteCtx struct {
idx int
@ -423,7 +429,8 @@ type remoteWriteCtx struct {
pss []*pendingSeries
pssNextIdx uint64
relabelMetricsDropped *metrics.Counter
rowsPushedAfterRelabel *metrics.Counter
rowsDroppedByRelabel *metrics.Counter
}
func newRemoteWriteCtx(argIdx int, at *auth.Token, remoteWriteURL *url.URL, maxInmemoryBlocks int, sanitizedURL string) *remoteWriteCtx {
@ -467,7 +474,8 @@ func newRemoteWriteCtx(argIdx int, at *auth.Token, remoteWriteURL *url.URL, maxI
c: c,
pss: pss,
relabelMetricsDropped: metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_relabel_metrics_dropped_total{path=%q, url=%q}`, queuePath, sanitizedURL)),
rowsPushedAfterRelabel: metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_rows_pushed_after_relabel_total{path=%q, url=%q}`, queuePath, sanitizedURL)),
rowsDroppedByRelabel: metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_relabel_metrics_dropped_total{path=%q, url=%q}`, queuePath, sanitizedURL)),
}
}
@ -483,7 +491,8 @@ func (rwctx *remoteWriteCtx) MustStop() {
rwctx.fq.MustClose()
rwctx.fq = nil
rwctx.relabelMetricsDropped = nil
rwctx.rowsPushedAfterRelabel = nil
rwctx.rowsDroppedByRelabel = nil
}
func (rwctx *remoteWriteCtx) Push(tss []prompbmarshal.TimeSeries) {
@ -499,12 +508,15 @@ func (rwctx *remoteWriteCtx) Push(tss []prompbmarshal.TimeSeries) {
// and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/599
v = tssRelabelPool.Get().(*[]prompbmarshal.TimeSeries)
tss = append(*v, tss...)
tssLen := len(tss)
rowsCountBeforeRelabel := getRowsCount(tss)
tss = rctx.applyRelabeling(tss, nil, pcs)
rwctx.relabelMetricsDropped.Add(tssLen - len(tss))
rowsCountAfterRelabel := getRowsCount(tss)
rwctx.rowsDroppedByRelabel.Add(rowsCountBeforeRelabel - rowsCountAfterRelabel)
}
pss := rwctx.pss
idx := atomic.AddUint64(&rwctx.pssNextIdx, 1) % uint64(len(pss))
rowsCount := getRowsCount(tss)
rwctx.rowsPushedAfterRelabel.Add(rowsCount)
pss[idx].Push(tss)
if rctx != nil {
*v = prompbmarshal.ResetTimeSeries(tss)
@ -519,3 +531,11 @@ var tssRelabelPool = &sync.Pool{
return &a
},
}
func getRowsCount(tss []prompbmarshal.TimeSeries) int {
rowsCount := 0
for _, ts := range tss {
rowsCount += len(ts.Samples)
}
return rowsCount
}

View file

@ -7,7 +7,7 @@ Supported storage systems for backups:
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
* Local filesystem. Example: `fs://</absolute/path/to/backup>`
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
`vmbackup` supports incremental and full backups. Incremental backups are created automatically if the destination path already contains data from the previous backup.
Full backups can be sped up with `-origin` pointing to an already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared

View file

@ -4,6 +4,7 @@ import (
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"time"
@ -155,9 +156,28 @@ func newDstFS() (common.RemoteFS, error) {
if err != nil {
return nil, fmt.Errorf("cannot parse `-dst`=%q: %w", *dst, err)
}
if hasFilepathPrefix(*dst, *storageDataPath) {
return nil, fmt.Errorf("-dst=%q can not point to the directory with VictoriaMetrics data (aka -storageDataPath=%q)", *dst, *storageDataPath)
}
return fs, nil
}
func hasFilepathPrefix(path, prefix string) bool {
if !strings.HasPrefix(path, "fs://") {
return false
}
path = path[len("fs://"):]
pathAbs, err := filepath.Abs(path)
if err != nil {
return false
}
prefixAbs, err := filepath.Abs(prefix)
if err != nil {
return false
}
return strings.HasPrefix(pathAbs, prefixAbs)
}
func newOriginFS() (common.OriginFS, error) {
if len(*origin) == 0 {
return &fsnil.FS{}, nil

29
app/vmbackup/main_test.go Normal file
View file

@ -0,0 +1,29 @@
package main
import (
"path/filepath"
"testing"
)
func TestHasFilepathPrefix(t *testing.T) {
f := func(dst, storageDataPath string, resultExpected bool) {
t.Helper()
result := hasFilepathPrefix(dst, storageDataPath)
if result != resultExpected {
t.Errorf("unexpected hasFilepathPrefix(%q, %q); got: %v; want: %v", dst, storageDataPath, result, resultExpected)
}
}
pwd, err := filepath.Abs("")
if err != nil {
t.Fatalf("cannot determine working directory: %s", err)
}
f("s3://foo/bar", "foo", false)
f("fs://"+pwd+"/foo", "foo", true)
f("fs://"+pwd+"/foo", "foo/bar", false)
f("fs://"+pwd+"/foo/bar", "foo", true)
f("fs://"+pwd+"/foo", "bar", false)
f("fs://"+pwd+"/foo", pwd+"/foo", true)
f("fs://"+pwd+"/foo", pwd+"/foo/bar", false)
f("fs://"+pwd+"/foo/bar", pwd+"/foo", true)
f("fs://"+pwd+"/foo", pwd+"/bar", false)
}

View file

@ -497,15 +497,18 @@ Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
Importing tips:
1. Migrating all the metrics from one VM to another may collide with existing application metrics
1. Migrating big volumes of data may result in reaching the safety limits on `src` side.
Please verify that `-search.maxExportDuration` and `-search.maxExportSeries` were set with
proper values for `src`. If hitting the limits, follow the recommendations [here](https://docs.victoriametrics.com/#how-to-export-data-in-native-format).
2. Migrating all the metrics from one VM to another may collide with existing application metrics
(prefixed with `vm_`) at destination and lead to confusion when using
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag.
2. Migration is a backfilling process, so it is recommended to read
3. Migration is a backfilling process, so it is recommended to read
[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section.
3. `vmctl` doesn't provide relabeling or other types of labels management in this mode.
4. `vmctl` doesn't provide relabeling or other types of labels management in this mode.
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
4. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
and specify `accountID` param.
## Verifying exported blocks from VictoriaMetrics

View file

@ -201,6 +201,7 @@ const (
influxFilterTimeStart = "influx-filter-time-start"
influxFilterTimeEnd = "influx-filter-time-end"
influxMeasurementFieldSeparator = "influx-measurement-field-separator"
influxSkipDatabaseLabel = "influx-skip-database-label"
)
var (
@ -258,6 +259,11 @@ var (
Usage: "The {separator} symbol used to concatenate {measurement} and {field} names into series name {measurement}{separator}{field}.",
Value: "_",
},
&cli.BoolFlag{
Name: influxSkipDatabaseLabel,
Usage: "Wether to skip adding the label 'db' to timeseries.",
Value: false,
},
}
)

View file

@ -12,21 +12,23 @@ import (
)
type influxProcessor struct {
ic *influx.Client
im *vm.Importer
cc int
separator string
ic *influx.Client
im *vm.Importer
cc int
separator string
skipDbLabel bool
}
func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator string) *influxProcessor {
func newInfluxProcessor(ic *influx.Client, im *vm.Importer, cc int, separator string, skipDbLabel bool) *influxProcessor {
if cc < 1 {
cc = 1
}
return &influxProcessor{
ic: ic,
im: im,
cc: cc,
separator: separator,
ic: ic,
im: im,
cc: cc,
separator: separator,
skipDbLabel: skipDbLabel,
}
}
@ -120,14 +122,13 @@ func (ip *influxProcessor) do(s *influx.Series) error {
for i, lp := range s.LabelPairs {
if lp.Name == dbLabel {
containsDBLabel = true
break
}
labels[i] = vm.LabelPair{
Name: lp.Name,
Value: lp.Value,
}
}
if !containsDBLabel {
if !containsDBLabel && !ip.skipDbLabel {
labels = append(labels, vm.LabelPair{
Name: dbLabel,
Value: ip.ic.Database(),

View file

@ -104,7 +104,8 @@ func main() {
influxClient,
importer,
c.Int(influxConcurrency),
c.String(influxMeasurementFieldSeparator))
c.String(influxMeasurementFieldSeparator),
c.Bool(influxSkipDatabaseLabel))
return processor.run(c.Bool(globalSilent), c.Bool(globalVerbose))
},
},

View file

@ -1,7 +1,7 @@
{
"files": {
"main.css": "./static/css/main.d8362c27.css",
"main.js": "./static/js/main.214cd305.js",
"main.js": "./static/js/main.f64c8675.js",
"static/js/362.1f16598a.chunk.js": "./static/js/362.1f16598a.chunk.js",
"static/js/27.939f971b.chunk.js": "./static/js/27.939f971b.chunk.js",
"static/media/README.md": "./static/media/README.40ebc3a1f4adae949154.md",
@ -9,6 +9,6 @@
},
"entrypoints": [
"static/css/main.d8362c27.css",
"static/js/main.214cd305.js"
"static/js/main.f64c8675.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.214cd305.js"></script><link href="./static/css/main.d8362c27.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1"/><meta name="theme-color" content="#000000"/><meta name="description" content="VM-UI is a metric explorer for Victoria Metrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,400,500,700&display=swap"/><script defer="defer" src="./static/js/main.f64c8675.js"></script><link href="./static/css/main.d8362c27.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -27,7 +27,7 @@ var (
snapshotAuthKey = flag.String("snapshotAuthKey", "", "authKey, which must be passed in query string to /snapshot* pages")
forceMergeAuthKey = flag.String("forceMergeAuthKey", "", "authKey, which must be passed in query string to /internal/force_merge pages")
forceFlushAuthKey = flag.String("forceFlushAuthKey", "", "authKey, which must be passed in query string to /internal/force_flush pages")
snapshotsMaxAge = flag.Duration("snapshotsMaxAge", 0, "Automatically delete snapshots older than -snapshotsMaxAge if it is set to non-zero duration. Make sure that backup process has enough time to finish the backup before the corresponding snapshot is automatically deleted")
snapshotsMaxAge = flagutil.NewDuration("snapshotsMaxAge", "0", "Automatically delete snapshots older than -snapshotsMaxAge if it is set to non-zero duration. Make sure that backup process has enough time to finish the backup before the corresponding snapshot is automatically deleted")
precisionBits = flag.Int("precisionBits", 64, "The number of precision bits to store per each value. Lower precision bits improves data compression at the cost of precision loss")
@ -96,6 +96,9 @@ func InitWithoutMetrics(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
mergeset.SetIndexBlocksCacheSize(cacheSizeIndexDBIndexBlocks.N)
mergeset.SetDataBlocksCacheSize(cacheSizeIndexDBDataBlocks.N)
if retentionPeriod.Msecs < 24*3600*1000 {
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod)
}
logger.Infof("opening storage at %q with -retentionPeriod=%s", *DataPath, retentionPeriod)
startTime := time.Now()
WG = syncwg.WaitGroup{}
@ -383,9 +386,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
func initStaleSnapshotsRemover(strg *storage.Storage) {
staleSnapshotsRemoverCh = make(chan struct{})
if *snapshotsMaxAge <= 0 {
if snapshotsMaxAge.Msecs <= 0 {
return
}
snapshotsMaxAgeDur := time.Duration(snapshotsMaxAge.Msecs) * time.Millisecond
staleSnapshotsRemoverWG.Add(1)
go func() {
defer staleSnapshotsRemoverWG.Done()
@ -397,7 +401,7 @@ func initStaleSnapshotsRemover(strg *storage.Storage) {
return
case <-t.C:
}
if err := strg.DeleteStaleSnapshots(*snapshotsMaxAge); err != nil {
if err := strg.DeleteStaleSnapshots(snapshotsMaxAgeDur); err != nil {
// Use logger.Errorf instead of logger.Fatalf in the hope the error is temporary.
logger.Errorf("cannot delete stale snapshots: %s", err)
}

File diff suppressed because it is too large Load diff

View file

@ -19,9 +19,6 @@
"@types/marked": "^4.0.2",
"@types/node": "^17.0.21",
"@types/qs": "^6.9.7",
"@types/react": "^17.0.43",
"@types/react-dom": "^18.0.0",
"@types/react-measure": "^2.0.8",
"@types/react-router-dom": "^5.3.3",
"@types/webpack-env": "^1.16.3",
"dayjs": "^1.11.0",

View file

@ -1,7 +1,9 @@
import React from "preact/compat";
import Box from "@mui/material/Box";
import {ReactNode} from "react";
interface TabPanelProps {
children?: ReactNode;
index: number;
value: number;
}

View file

@ -56,19 +56,21 @@ export type Action =
| { type: "TOGGLE_AUTOCOMPLETE"}
| { type: "NO_CACHE"}
const {relativeDuration, relativeUntil, relativeTimeId} = getRelativeTime();
const duration = relativeDuration || getQueryStringValue("g0.range_input", "1h") as string;
const endInput = relativeUntil || formatDateToLocal(getQueryStringValue("g0.end_input", getDateNowUTC()) as Date);
const {duration, endInput, relativeTimeId} = getRelativeTime({
defaultDuration: getQueryStringValue("g0.range_input", "1h") as string,
defaultEndInput: new Date(formatDateToLocal(getQueryStringValue("g0.end_input", getDateNowUTC()) as Date)),
});
const query = getQueryArray();
export const initialState: AppState = {
serverUrl: getDefaultServer(),
displayType: getQueryStringValue("g0.tab", "chart") as DisplayType,
displayType: getQueryStringValue("g0.tab", "chart") as DisplayType || "chart",
query: query, // demo_memory_usage_bytes
queryHistory: query.map(q => ({index: 0, values: [q]})),
time: {
duration,
period: getTimeperiodForDuration(duration, new Date(endInput)),
period: getTimeperiodForDuration(duration, endInput),
relativeTime: relativeTimeId,
},
queryControls: {
@ -152,7 +154,7 @@ export function reducer(state: AppState, action: Action): AppState {
};
case "SET_PERIOD":
// eslint-disable-next-line no-case-declarations
const duration = getDurationFromPeriod(action.payload);
const durationPeriod = getDurationFromPeriod(action.payload);
return {
...state,
queryControls: {
@ -161,8 +163,8 @@ export function reducer(state: AppState, action: Action): AppState {
},
time: {
...state.time,
duration,
period: getTimeperiodForDuration(duration, action.payload.to),
duration: durationPeriod,
period: getTimeperiodForDuration(durationPeriod, action.payload.to),
relativeTime: ""
}
};
@ -191,11 +193,17 @@ export function reducer(state: AppState, action: Action): AppState {
}
};
case "RUN_QUERY":
// eslint-disable-next-line no-case-declarations
const {duration: durationRunQuery, endInput} = getRelativeTime({
relativeTimeId: state.time.relativeTime,
defaultDuration: state.time.duration,
defaultEndInput: dateFromSeconds(state.time.period.end),
});
return {
...state,
time: {
...state.time,
period: getTimeperiodForDuration(state.time.duration, dateFromSeconds(state.time.period.end))
period: getTimeperiodForDuration(durationRunQuery, endInput)
}
};
case "RUN_QUERY_TO_NOW":

View file

@ -131,13 +131,13 @@ export const relativeTimeOptions: RelativeTimeOption[] = [
...o
}));
export const getRelativeTime = (relativeTimeId?: string) => {
export const getRelativeTime = ({relativeTimeId, defaultDuration, defaultEndInput}:
{ relativeTimeId?: string, defaultDuration: string, defaultEndInput: Date }) => {
const id = relativeTimeId || getQueryStringValue("g0.relative_time", "") as string;
const target = relativeTimeOptions.find(d => d.id === id);
if (!target) return {};
return {
relativeTimeId: id,
relativeDuration: target.duration,
relativeUntil: target.until()
duration: target ? target.duration : defaultDuration,
endInput: target ? target.until() : defaultEndInput
};
};

View file

@ -34,6 +34,7 @@ services:
- "--opentsdbListenAddr=:4242"
- "--httpListenAddr=:8428"
- "--influxListenAddr=:8089"
- "--vmalert.proxyURL=http://vmalert:8880"
networks:
- vm_net
restart: always

View file

@ -15,6 +15,23 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip
## [v1.77.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.77.1)
Released at 07-05-2022
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add ability to specify filters for Availability Zones in [ec2_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#ec2_sd_config) via `az_filters` section. This section can contain AZ-specific set of filters in the same way as the existing `filters` section, which is used for filtering EC2 instances. The list of supported AZ-specific filters is available [here](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): expose `vmagent_remotewrite_global_rows_pushed_before_relabel_total` and `vmagent_remotewrite_rows_pushed_after_relabel_total` metrics at `http://vmagent:8429/metrics`, which can be used for monitoring the rate of rows (aka samples) pushed to remote storage before and after the relabeling via `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`. See [relabeling docs](https://docs.victoriametrics.com/vmagent.html#relabeling) for details.
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to skip `db` label during InfluxDB data import when `influx-skip-database-label` option is used. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2544). Thanks to @mback2k .
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly process passwords and secrets specified in the file pointed by `-promscrape.config` command-line flag. All the passwords and secrets were mistakenly replaced with `<secret>` string in `v1.77.0`. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2551) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2550).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): rename `vmagent_remote_write_rate_limit_reached_total` metric to `vmagent_remotewrite_rate_limit_reached_total`, so its name is consistent with the rest of `vmagent_remotewrite_` metrics.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): rename `promscrape_stale_samples_created_total` metric to `vm_promscrape_stale_samples_created_total`, so its name is consistent with the rest of `vm_promscrape_` metrics.
* BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl.html): properly import InfluxDB measurements if they contain `db` tag. Previously this could result in incomplete import of measurmenet tags. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2536). Thanks to @mback2k for the bugfix.
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): do not reset the selected relative time range when entering new query. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2402#issuecomment-1115817302).
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): disallow writing backups to `-storageDataPath` directory, since this directory is managed solely by VictoriaMetrics or `vmstorage`. Other apps shouldn't write into this directory. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2503).
* BUGFIX: do not allow setting `-retentionPeriod` smaller than one day, since VictoriaMetrics doesn't support properly such small retention periods. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2496).
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): do not drop samples routed to readonly `vmstorage` nodes if `-dropSamplesOnOverload` command-line flag is set. Try re-routing them to healthy `vmstorage` nodes instead. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2478).
## [v1.77.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.77.0)

View file

@ -324,11 +324,13 @@ It is recommended leaving the following amounts of spare resources:
Some capacity planning tips for VictoriaMetrics cluster:
- The [replication](#replication-and-data-safety) increases the amounts of needed resources for the cluster by up to `N` times where `N` is replication factor.
- Cluster capacity for [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) can be increased by adding more `vmstorage` nodes and/or by increasing RAM and CPU resources per each `vmstorage` node.
- Query latency can be reduced by increasing the number of `vmstorage` nodes and/or by increasing RAM and CPU resources per each `vmselect` node.
- The total number of CPU cores needed for all the `vminsert` nodes can be calculated from the ingestion rate: `CPUs = ingestion_rate / 100K`.
- The `-rpc.disableCompression` command-line flag at `vminsert` nodes can increase ingestion capacity at the cost of higher network bandwidth usage between `vminsert` and `vmstorage`.
- The [replication](#replication-and-data-safety) increases the amounts of needed resources for the cluster by up to `N` times where `N` is replication factor. This is because `vminsert` stores `N` copies of every ingested sample on distinct `vmstorage` nodes. These copies are de-duplicated by `vmselect` during querying. The most cost-efficient and performant solution for data durability is to rely on replicated durable persistent disks such as [Google Compute persistent disks](https://cloud.google.com/compute/docs/disks#pdspecs) instead of using the [replication at VictoriaMetrics level](#replication-and-data-safety).
- It is recommended to run a cluster with big number of small `vmstorage` nodes instead of a cluster with small number of big `vmstorage` nodes. This increases chances that the cluster remains available and stable when some of `vmstorage` nodes are temporarily unavailable during maintenance events such as upgrades, configuration changes or migrations. For example, when a cluster contains 10 `vmstorage` nodes and a single node becomes temporarily unavailable, then the workload on the remaining 9 nodes increases by `1/9=11%`. When a cluster contains 3 `vmstorage` nodes and a single node becomes temporarily unavailable, then the workload on the remaining 2 nodes increases by `1/2=50%`. The remaining `vmstorage` nodes may have no enough free capacity for handling the increased workload. In this case the cluster may become overloaded, which may result to decreased availability and stability.
- Cluster capacity for [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) can be increased by increasing RAM and CPU resources per each `vmstorage` node or by by adding new `vmstorage` nodes.
- Query latency can be reduced by increasing CPU resources per each `vmselect` node, since each incoming query is processed by a single `vmselect` node. Performance for heavy queries scales with the number of available CPU cores at `vmselect` node, since `vmselect` processes time series referred by the query on all the available CPU cores.
- If the cluster needs to process incoming queries at a high rate, then its capacity can be increased by adding more `vmselect` nodes, so incoming queries could be spread among bigger number of `vmselect` nodes.
- By default `vminsert` compresses the data it sends to `vmstorage` in order to reduce network bandwidth usage. The compression takes additional CPU resources at `vminsert`. If `vminsert` nodes have limited CPU, then the compression can be disabled by passing `-rpc.disableCompression` command-line flag at `vminsert` nodes.
- By default `vmstorage` compresses the data it sends to `vmselect` during queries in order to reduce network bandwidth usage. The compression takes additional CPU resources at `vmstorage`. If `vmstorage` nodes have limited CPU, then the compression can be disabled by passing `-rpc.disableCompression` command-line flag at `vmstorage` nodes.
## High availability
@ -368,30 +370,17 @@ It is available in the [helm-charts](https://github.com/VictoriaMetrics/helm-cha
## Replication and data safety
By default VictoriaMetrics offloads replication to the underlying storage pointed by `-storageDataPath`.
By default VictoriaMetrics offloads replication to the underlying storage pointed by `-storageDataPath` such as [Google compute persistent disk](https://cloud.google.com/compute/docs/disks#pdspecs), which guarantees data durability. VictoriaMetrics supports application-level replication if replicated durable persistent disks cannot be used for some reason.
The replication can be enabled by passing `-replicationFactor=N` command-line flag to `vminsert`.
This guarantees that all the data remains available for querying if up to `N-1` `vmstorage` nodes are unavailable.
The cluster must contain at least `2*N-1` `vmstorage` nodes, where `N`
is replication factor, in order to maintain the given replication factor for newly ingested data when `N-1` of storage nodes are lost.
For example, when `-replicationFactor=3` is passed to `vminsert`, then it replicates all the ingested data to 3 distinct `vmstorage` nodes,
so up to 2 `vmstorage` nodes can be lost without data loss. The minimum number of `vmstorage` nodes should be equal to `2*3-1 = 5`, so when 2 `vmstorage` nodes are lost,
the remaining 3 `vmstorage` nodes could provide the `-replicationFactor=3` for newly ingested data.
The replication can be enabled by passing `-replicationFactor=N` command-line flag to `vminsert`. This instructs `vminsert` to store `N` copies for every ingested sample on `N` distinct `vmstorage` nodes. This guarantees that all the stored data remains available for querying if up to `N-1` `vmstorage` nodes are unavailable.
When the replication is enabled, `-dedup.minScrapeInterval=1ms` command-line flag must be passed to `vmselect` nodes.
Optional `-replicationFactor=N` command-line flag can be passed to `vmselect` for improving query performance when up to `N-1` vmstorage nodes respond slowly and/or temporarily unavailable, since `vmselect` doesn't wait for responses from up to `N-1` `vmstorage` nodes. Sometimes `-replicationFactor` at `vmselect` nodes can result in partial responses. See [this issues](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1207) for details.
The `-dedup.minScrapeInterval=1ms` de-duplicates replicated data during queries. If duplicate data is pushed to VictoriaMetrics from identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) instances or Prometheus instances, then the `-dedup.minScrapeInterval` must be set to bigger values according to [deduplication docs](#deduplication).
The cluster must contain at least `2*N-1` `vmstorage` nodes, where `N` is replication factor, in order to maintain the given replication factor for newly ingested data when `N-1` of storage nodes are unavailable.
Note that [replication doesn't save from disaster](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883),
so it is recommended performing regular backups. See [these docs](#backups) for details.
When the replication is enabled, `-dedup.minScrapeInterval=1ms` command-line flag must be passed to `vmselect` nodes, so they could de-duplicate replicated samples obtained from distinct `vmstorage` nodes during querying. If duplicate data is pushed to VictoriaMetrics from identically configured [vmagent](https://docs.victoriametrics.com/vmagent.html) instances or Prometheus instances, then the `-dedup.minScrapeInterval` must be set to bigger values according to [deduplication docs](#deduplication).
Note that the replication increases resource usage - CPU, RAM, disk space, network bandwidth - by up to `-replicationFactor` times. So it may be worth
offloading the replication to underlying storage pointed by `-storageDataPath` such as [Google Compute Engine persistent disk](https://cloud.google.com/compute/docs/disks/#pdspecs),
which is protected from data loss and data corruption. It also provide consistently high performance
and [may be resized](https://cloud.google.com/compute/docs/disks/add-persistent-disk) without downtime.
HDD-based persistent disks should be enough for the majority of use cases.
Note that [replication doesn't save from disaster](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883), so it is recommended performing regular backups. See [these docs](#backups) for details.
It is recommended using durable replicated persistent volumes in Kubernetes.
Note that the replication increases resource usage - CPU, RAM, disk space, network bandwidth - by up to `-replicationFactor=N` times, because `vminsert` stores `N` copies of incoming data to distinct `vmstorage` nodes and `vmselect` needs to de-duplicate the replicated data obtained from `vmstorage` nodes during querying. So it is more cost-effective to offload the replication to underlying replicated durable storage pointed by `-storageDataPath` such as [Google Compute Engine persistent disk](https://cloud.google.com/compute/docs/disks/#pdspecs), which is protected from data loss and data corruption. It also provide consistently high performance and [may be resized](https://cloud.google.com/compute/docs/disks/add-persistent-disk) without downtime. HDD-based persistent disks should be enough for the majority of use cases. It is recommended using durable replicated persistent volumes in Kubernetes.
## Deduplication
@ -613,7 +602,7 @@ Below is the output for `/path/to/vminsert -help`:
-replicationFactor int
Replication factor for the ingested data, i.e. how many copies to make among distinct -storageNode instances. Note that vmselect must run with -dedup.minScrapeInterval=1ms for data de-duplication when replicationFactor is greater than 1. Higher values for -dedup.minScrapeInterval at vmselect is OK (default 1)
-rpc.disableCompression
Whether to disable compression of RPC traffic. This reduces CPU usage at the cost of higher network bandwidth usage
Whether to disable compression for the data sent from vminsert to vmstorage. This reduces CPU usage at the cost of higher network bandwidth usage
-sortLabels
Whether to sort labels for incoming samples before writing them to storage. This may be needed for reducing memory usage at storage when the order of labels in incoming samples is random. For example, if m{k1="v1",k2="v2"} may be sent as m{k2="v2",k1="v1"}. Enabled sorting for labels can slow down ingestion performance a bit
-storageNode array
@ -869,7 +858,7 @@ Below is the output for `/path/to/vmstorage -help`:
Data with timestamps outside the retentionPeriod is automatically deleted
The following optional suffixes are supported: h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 1)
-rpc.disableCompression
Disable compression of RPC traffic. This reduces CPU usage at the cost of higher network bandwidth usage
Whether to disable compression of the data sent from vmstorage to vmselect. This reduces CPU usage at the cost of higher network bandwidth usage
-search.maxTagKeys int
The maximum number of tag keys returned per search (default 100000)
-search.maxTagValueSuffixesPerSearch int
@ -882,8 +871,9 @@ Below is the output for `/path/to/vmstorage -help`:
The maximum number of CPU cores to use for small merges. Default value is used if set to 0
-snapshotAuthKey string
authKey, which must be passed in query string to /snapshot* pages
-snapshotsMaxAge duration
-snapshotsMaxAge value
Automatically delete snapshots older than -snapshotsMaxAge if it is set to non-zero duration. Make sure that backup process has enough time to finish the backup before the corresponding snapshot is automatically deleted
The following optional suffixes are supported: h (hour), d (day), w (week), y (year). If suffix isn't set, then the duration is counted in months (default 0)
-storage.cacheSizeIndexDBDataBlocks size
Overrides max size for indexdb/dataBlocks cache. See https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#cache-tuning
Supports the following optional suffixes for size values: KB, MB, GB, KiB, MiB, GiB (default 0)

View file

@ -724,7 +724,7 @@ See also [implicit query conversions](#implicit-query-conversions).
#### drop_common_labels
`drop_common_labels(q1, ...., qN)` drops common `label="value"` paris among time series returned from `q1, ..., qN`.
`drop_common_labels(q1, ...., qN)` drops common `label="value"` pairs among time series returned from `q1, ..., qN`.
#### label_copy

View file

@ -11,7 +11,7 @@ Supported storage systems for backups:
* [GCS](https://cloud.google.com/storage/). Example: `gs://<bucket>/<path/to/backup>`
* [S3](https://aws.amazon.com/s3/). Example: `s3://<bucket>/<path/to/backup>`
* Any S3-compatible storage such as [MinIO](https://github.com/minio/minio), [Ceph](https://docs.ceph.com/en/pacific/radosgw/s3/) or [Swift](https://platform.swiftstack.com/docs/admin/middleware/s3_middleware.html). See [these docs](#advanced-usage) for details.
* Local filesystem. Example: `fs://</absolute/path/to/backup>`
* Local filesystem. Example: `fs://</absolute/path/to/backup>`. Note that `vmbackup` prevents from storing the backup into the directory pointed by `-storageDataPath` command-line flag, since this directory should be managed solely by VictoriaMetrics or `vmstorage`.
`vmbackup` supports incremental and full backups. Incremental backups are created automatically if the destination path already contains data from the previous backup.
Full backups can be sped up with `-origin` pointing to an already existing backup on the same remote storage. In this case `vmbackup` makes server-side copy for the shared

View file

@ -501,15 +501,18 @@ Total: 336.75 KiB ↖ Speed: 454.46 KiB p/s
Importing tips:
1. Migrating all the metrics from one VM to another may collide with existing application metrics
1. Migrating big volumes of data may result in reaching the safety limits on `src` side.
Please verify that `-search.maxExportDuration` and `-search.maxExportSeries` were set with
proper values for `src`. If hitting the limits, follow the recommendations [here](https://docs.victoriametrics.com/#how-to-export-data-in-native-format).
2. Migrating all the metrics from one VM to another may collide with existing application metrics
(prefixed with `vm_`) at destination and lead to confusion when using
[official Grafana dashboards](https://grafana.com/orgs/victoriametrics/dashboards).
To avoid such situation try to filter out VM process metrics via `--vm-native-filter-match` flag.
2. Migration is a backfilling process, so it is recommended to read
3. Migration is a backfilling process, so it is recommended to read
[Backfilling tips](https://github.com/VictoriaMetrics/VictoriaMetrics#backfilling) section.
3. `vmctl` doesn't provide relabeling or other types of labels management in this mode.
4. `vmctl` doesn't provide relabeling or other types of labels management in this mode.
Instead, use [relabeling in VictoriaMetrics](https://github.com/VictoriaMetrics/vmctl/issues/4#issuecomment-683424375).
4. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
5. When importing in or from cluster version remember to use correct [URL format](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format)
and specify `accountID` param.
## Verifying exported blocks from VictoriaMetrics

8
go.mod
View file

@ -11,7 +11,7 @@ require (
github.com/VictoriaMetrics/fasthttp v1.1.0
github.com/VictoriaMetrics/metrics v1.18.1
github.com/VictoriaMetrics/metricsql v0.43.0
github.com/aws/aws-sdk-go v1.44.7
github.com/aws/aws-sdk-go v1.44.9
github.com/cespare/xxhash/v2 v2.1.2
github.com/cheggaaa/pb/v3 v3.0.9-0.20211222075416-90c02fa07ea4
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
@ -19,13 +19,13 @@ require (
github.com/go-kit/kit v0.12.0
github.com/golang/snappy v0.0.4
github.com/influxdata/influxdb v1.9.6
github.com/klauspost/compress v1.15.2
github.com/klauspost/compress v1.15.3
github.com/mattn/go-colorable v0.1.12 // indirect
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/oklog/ulid v1.3.1
github.com/prometheus/common v0.34.0 // indirect
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
github.com/urfave/cli/v2 v2.5.1
github.com/urfave/cli/v2 v2.6.0
github.com/valyala/fastjson v1.6.3
github.com/valyala/fastrand v1.1.0
github.com/valyala/fasttemplate v1.2.1
@ -69,7 +69,7 @@ require (
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220504150022-98cd25cafc72 // indirect
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3 // indirect
google.golang.org/grpc v1.46.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect

16
go.sum
View file

@ -166,8 +166,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
github.com/aws/aws-sdk-go v1.44.7 h1:LpCM8Fpw/L58vgdve6A+UqJr8dzo6Xj7HX7DIIGHg2A=
github.com/aws/aws-sdk-go v1.44.7/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go v1.44.9 h1:s3lsEFbc8i7ghQmcEpcdyvoO/WMwyCVa9pUq3Lq//Ok=
github.com/aws/aws-sdk-go v1.44.9/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
@ -670,8 +670,8 @@ github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw=
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/compress v1.15.3 h1:wmfu2iqj9q22SyMINp1uQ8C2/V4M1phJdmH9fG4nba0=
github.com/klauspost/compress v1.15.3/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -977,8 +977,8 @@ github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.5.1 h1:YKwdkyA0xTBzOaP2G0DVxBnCheHGP+Y9VbKAs4K1Ess=
github.com/urfave/cli/v2 v2.5.1/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDqXIEJs=
github.com/urfave/cli/v2 v2.6.0 h1:yj2Drkflh8X/zUrkWlWlUjZYHyWN7WMmpVxyxXIUyv8=
github.com/urfave/cli/v2 v2.6.0/go.mod h1:oDzoM7pVwz6wHn5ogWgFUU1s4VJayeQS+aEZDqXIEJs=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus=
@ -1585,8 +1585,8 @@ google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220504150022-98cd25cafc72 h1:iif0mpUetMBqcQPUoq+JnCcmzvfpp8wRx515va8wP1c=
google.golang.org/genproto v0.0.0-20220504150022-98cd25cafc72/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3 h1:q1kiSVscqoDeqTF27eQ2NnLLDmqF0I373qQNXYMy0fo=
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=

View file

@ -26,7 +26,7 @@ import (
// This is needed for hiding secret strings in /config page output.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1764
type Secret struct {
s string
S string
}
// NewSecret returns new secret for s.
@ -35,7 +35,7 @@ func NewSecret(s string) *Secret {
return nil
}
return &Secret{
s: s,
S: s,
}
}
@ -52,7 +52,7 @@ func (s *Secret) UnmarshalYAML(f func(interface{}) error) error {
if err := f(&secret); err != nil {
return fmt.Errorf("cannot parse secret: %w", err)
}
s.s = secret
s.S = secret
return nil
}
@ -61,7 +61,7 @@ func (s *Secret) String() string {
if s == nil {
return ""
}
return s.s
return s.S
}
// TLSConfig represents TLS config.

View file

@ -38,7 +38,7 @@ type RelabelConfig struct {
//
// regex: "foo|bar"
type MultiLineRegex struct {
s string
S string
}
// UnmarshalYAML unmarshals mlr from YAML passed to f.
@ -51,7 +51,7 @@ func (mlr *MultiLineRegex) UnmarshalYAML(f func(interface{}) error) error {
if err != nil {
return err
}
mlr.s = s
mlr.S = s
return nil
}
@ -88,7 +88,7 @@ func stringValue(v interface{}) (string, error) {
// MarshalYAML marshals mlr to YAML.
func (mlr *MultiLineRegex) MarshalYAML() (interface{}, error) {
a := strings.Split(mlr.s, "|")
a := strings.Split(mlr.S, "|")
if len(a) == 1 {
return a[0], nil
}
@ -179,7 +179,7 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
regexCompiled := defaultRegexForRelabelConfig
regexOriginalCompiled := defaultOriginalRegexForRelabelConfig
if rc.Regex != nil {
regex := rc.Regex.s
regex := rc.Regex.S
regexOrig := regex
if rc.Action != "replace_all" && rc.Action != "labelmap_all" {
regex = "^(?:" + regex + ")$"
@ -243,7 +243,7 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
return nil, fmt.Errorf("unexpected `modulus` for `action=hashmod`: %d; must be greater than 0", modulus)
}
case "keep_metrics":
if (rc.Regex == nil || rc.Regex.s == "") && rc.If == nil {
if (rc.Regex == nil || rc.Regex.S == "") && rc.If == nil {
return nil, fmt.Errorf("`regex` must be non-empty for `action=keep_metrics`")
}
if len(sourceLabels) > 0 {
@ -252,7 +252,7 @@ func parseRelabelConfig(rc *RelabelConfig) (*parsedRelabelConfig, error) {
sourceLabels = []string{"__name__"}
action = "keep"
case "drop_metrics":
if (rc.Regex == nil || rc.Regex.s == "") && rc.If == nil {
if (rc.Regex == nil || rc.Regex.S == "") && rc.If == nil {
return nil, fmt.Errorf("`regex` must be non-empty for `action=drop_metrics`")
}
if len(sourceLabels) > 0 {

View file

@ -128,7 +128,7 @@ func TestParseRelabelConfigsFailure(t *testing.T) {
SourceLabels: []string{"aaa"},
TargetLabel: "xxx",
Regex: &MultiLineRegex{
s: "foo[bar",
S: "foo[bar",
},
},
})
@ -248,7 +248,7 @@ func TestParseRelabelConfigsFailure(t *testing.T) {
Action: "drop_metrics",
SourceLabels: []string{"foo"},
Regex: &MultiLineRegex{
s: "bar",
S: "bar",
},
},
})
@ -266,7 +266,7 @@ func TestParseRelabelConfigsFailure(t *testing.T) {
Action: "keep_metrics",
SourceLabels: []string{"foo"},
Regex: &MultiLineRegex{
s: "bar",
S: "bar",
},
},
})

View file

@ -1,6 +1,7 @@
package promrelabel
import (
"encoding/json"
"fmt"
"regexp"
@ -36,6 +37,20 @@ func (ie *IfExpression) Parse(s string) error {
return nil
}
// UnmarshalJSON unmarshals ie from JSON data.
func (ie *IfExpression) UnmarshalJSON(data []byte) error {
var s string
if err := json.Unmarshal(data, &s); err != nil {
return err
}
return ie.Parse(s)
}
// MarshalJSON marshals ie to JSON.
func (ie *IfExpression) MarshalJSON() ([]byte, error) {
return json.Marshal(ie.s)
}
// UnmarshalYAML unmarshals ie from YAML passed to f.
func (ie *IfExpression) UnmarshalYAML(f func(interface{}) error) error {
var s string

View file

@ -71,7 +71,7 @@ func newClient(sw *ScrapeWork) *client {
// Send full sw.ScrapeURL in requests to a proxy host for non-TLS scrape targets
// like net/http package from Go does.
// See https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers
pu := proxyURL.URL()
pu := proxyURL.GetURL()
host = pu.Host
requestURI = sw.ScrapeURL
isTLS = pu.Scheme == "https"
@ -110,7 +110,7 @@ func newClient(sw *ScrapeWork) *client {
}
var sc *http.Client
var proxyURLFunc func(*http.Request) (*url.URL, error)
if pu := sw.ProxyURL.URL(); pu != nil {
if pu := sw.ProxyURL.GetURL(); pu != nil {
proxyURLFunc = http.ProxyURL(pu)
}
sc = &http.Client{

View file

@ -1,6 +1,7 @@
package promscrape
import (
"encoding/json"
"flag"
"fmt"
"net/url"
@ -161,17 +162,17 @@ func (cfg *Config) mustRestart(prevCfg *Config) {
}
func areEqualScrapeConfigs(a, b *ScrapeConfig) bool {
sa := a.marshal()
sb := b.marshal()
sa := a.marshalJSON()
sb := b.marshalJSON()
return string(sa) == string(sb)
}
func (sc *ScrapeConfig) unmarshal(data []byte) error {
return yaml.UnmarshalStrict(data, sc)
func (sc *ScrapeConfig) unmarshalJSON(data []byte) error {
return json.Unmarshal(data, sc)
}
func (sc *ScrapeConfig) marshal() []byte {
data, err := yaml.Marshal(sc)
func (sc *ScrapeConfig) marshalJSON() []byte {
data, err := json.Marshal(sc)
if err != nil {
logger.Panicf("BUG: cannot marshal ScrapeConfig: %s", err)
}
@ -430,9 +431,9 @@ func (cfg *Config) parseData(data []byte, path string) ([]byte, error) {
}
func (sc *ScrapeConfig) clone() *ScrapeConfig {
data := sc.marshal()
data := sc.marshalJSON()
var scCopy ScrapeConfig
if err := scCopy.unmarshal(data); err != nil {
if err := scCopy.unmarshalJSON(data); err != nil {
logger.Panicf("BUG: cannot unmarshal scrape config: %s", err)
}
return &scCopy

View file

@ -11,6 +11,9 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/gce"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
)
@ -1853,3 +1856,69 @@ func equalStaticConfigForScrapeWorks(a, b []*ScrapeWork) bool {
}
return true
}
func TestScrapeConfigClone(t *testing.T) {
f := func(sc *ScrapeConfig) {
t.Helper()
scCopy := sc.clone()
if !reflect.DeepEqual(sc, scCopy) {
t.Fatalf("unexpected result after unmarshalJSON() for JSON:\n%s", sc.marshalJSON())
}
}
f(&ScrapeConfig{})
bFalse := false
var ie promrelabel.IfExpression
if err := ie.Parse(`{foo=~"bar",baz!="z"}`); err != nil {
t.Fatalf("unexpected error: %s", err)
}
f(&ScrapeConfig{
JobName: "foo",
ScrapeInterval: promutils.NewDuration(time.Second * 47),
HonorLabels: true,
HonorTimestamps: &bFalse,
Params: map[string][]string{
"foo": {"bar", "baz"},
},
HTTPClientConfig: promauth.HTTPClientConfig{
Authorization: &promauth.Authorization{
Credentials: promauth.NewSecret("foo"),
},
BasicAuth: &promauth.BasicAuthConfig{
Username: "user_x",
Password: promauth.NewSecret("pass_x"),
},
BearerToken: promauth.NewSecret("zx"),
OAuth2: &promauth.OAuth2Config{
ClientSecret: promauth.NewSecret("aa"),
Scopes: []string{"foo", "bar"},
TLSConfig: &promauth.TLSConfig{
CertFile: "foo",
},
},
TLSConfig: &promauth.TLSConfig{
KeyFile: "aaa",
},
},
ProxyURL: proxy.MustNewURL("https://foo.bar:3434/assdf/dsfd?sdf=dsf"),
RelabelConfigs: []promrelabel.RelabelConfig{{
SourceLabels: []string{"foo", "aaa"},
Regex: &promrelabel.MultiLineRegex{
S: "foo\nbar",
},
If: &ie,
}},
SampleLimit: 10,
GCESDConfigs: []gce.SDConfig{{
Project: "foo",
Zone: gce.ZoneYAML{
Zones: []string{"a", "b"},
},
}},
StreamParse: true,
ProxyClientConfig: promauth.ProxyClientConfig{
BearerTokenFile: "foo",
},
})
}

View file

@ -8,9 +8,10 @@ import (
)
type apiConfig struct {
awsConfig *awsapi.Config
filters []awsapi.Filter
port int
awsConfig *awsapi.Config
instanceFilters []awsapi.Filter
azFilters []awsapi.Filter
port int
// A map from AZ name to AZ id.
azMap map[string]string
@ -37,9 +38,10 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
return nil, err
}
cfg := &apiConfig{
awsConfig: awsCfg,
filters: sdc.Filters,
port: port,
awsConfig: awsCfg,
instanceFilters: sdc.InstanceFilters,
azFilters: sdc.AZFilters,
port: port,
}
return cfg, nil
}

View file

@ -30,7 +30,7 @@ func getAZMap(cfg *apiConfig) map[string]string {
func getAvailabilityZones(cfg *apiConfig) ([]AvailabilityZone, error) {
// See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeAvailabilityZones.html
azFilters := awsapi.GetFiltersQueryString(cfg.filters, azFiltersWhitelist)
azFilters := awsapi.GetFiltersQueryString(cfg.azFilters, azFiltersWhitelist)
data, err := cfg.awsConfig.GetEC2APIResponse("DescribeAvailabilityZones", azFilters, "")
if err != nil {
return nil, fmt.Errorf("cannot obtain availability zones: %w", err)

View file

@ -27,8 +27,9 @@ type SDConfig struct {
RoleARN string `yaml:"role_arn,omitempty"`
// RefreshInterval time.Duration `yaml:"refresh_interval"`
// refresh_interval is obtained from `-promscrape.ec2SDCheckInterval` command-line option.
Port *int `yaml:"port,omitempty"`
Filters []awsapi.Filter `yaml:"filters,omitempty"`
Port *int `yaml:"port,omitempty"`
InstanceFilters []awsapi.Filter `yaml:"filters,omitempty"`
AZFilters []awsapi.Filter `yaml:"az_filters,omitempty"`
}
// GetLabels returns ec2 labels according to sdc.

View file

@ -29,7 +29,7 @@ func getReservations(cfg *apiConfig) ([]Reservation, error) {
// See https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html
var rs []Reservation
pageToken := ""
instanceFilters := awsapi.GetFiltersQueryString(cfg.filters, nil)
instanceFilters := awsapi.GetFiltersQueryString(cfg.instanceFilters, nil)
for {
data, err := cfg.awsConfig.GetEC2APIResponse("DescribeInstances", instanceFilters, pageToken)
if err != nil {

View file

@ -47,7 +47,7 @@ func newAPIConfig(sdc *SDConfig) (*apiConfig, error) {
project = proj
logger.Infof("autodetected the current GCE project: %q", project)
}
zones := sdc.Zone.zones
zones := sdc.Zone.Zones
if len(zones) == 0 {
// Autodetect the current zone.
zone, err := getCurrentZone()

View file

@ -26,7 +26,7 @@ type SDConfig struct {
// ZoneYAML holds info about zones.
type ZoneYAML struct {
zones []string
Zones []string
}
// UnmarshalYAML implements yaml.Unmarshaler
@ -50,13 +50,13 @@ func (z *ZoneYAML) UnmarshalYAML(unmarshal func(interface{}) error) error {
default:
return fmt.Errorf("unexpected type unmarshaled for ZoneYAML: %T; contents: %#v", v, v)
}
z.zones = zones
z.Zones = zones
return nil
}
// MarshalYAML implements yaml.Marshaler
func (z ZoneYAML) MarshalYAML() (interface{}, error) {
return z.zones, nil
return z.Zones, nil
}
// GetLabels returns gce labels according to sdc.

View file

@ -10,7 +10,7 @@ func TestMarshallingSDConfigWithZoneYAML(t *testing.T) {
sdConfig := SDConfig{
Project: "test-project",
Zone: ZoneYAML{
zones: []string{"zone-a", "zone-b"},
Zones: []string{"zone-a", "zone-b"},
},
}

View file

@ -75,7 +75,7 @@ func newAPIWatcher(apiServer string, ac *promauth.Config, sdc *SDConfig, swcFunc
}
selectors := sdc.Selectors
attachNodeMetadata := sdc.AttachMetadata.Node
proxyURL := sdc.ProxyURL.URL()
proxyURL := sdc.ProxyURL.GetURL()
gw := getGroupWatcher(apiServer, ac, namespaces, selectors, attachNodeMetadata, proxyURL)
role := sdc.role()
return &apiWatcher{

View file

@ -75,7 +75,7 @@ func NewClient(apiServer string, ac *promauth.Config, proxyURL *proxy.URL, proxy
// Send full urls in requests to a proxy host for non-TLS apiServer
// like net/http package from Go does.
// See https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers
pu := proxyURL.URL()
pu := proxyURL.GetURL()
hostPort = pu.Host
isTLS = pu.Scheme == "https"
if isTLS {

View file

@ -733,7 +733,7 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i
sw.pushData(&wc.writeRequest)
}
var staleSamplesCreated = metrics.NewCounter(`promscrape_stale_samples_created_total`)
var staleSamplesCreated = metrics.NewCounter(`vm_promscrape_stale_samples_created_total`)
func (sw *scrapeWork) getLabelsHash(labels []prompbmarshal.Label) uint64 {
// It is OK if there will be hash collisions for distinct sets of labels,

View file

@ -8,19 +8,19 @@ import (
// Duration is duration, which must be used in Prometheus-compatible yaml configs.
type Duration struct {
d time.Duration
D time.Duration
}
// NewDuration returns Duration for given d.
func NewDuration(d time.Duration) *Duration {
return &Duration{
d: d,
D: d,
}
}
// MarshalYAML implements yaml.Marshaler interface.
func (pd Duration) MarshalYAML() (interface{}, error) {
return pd.d.String(), nil
return pd.D.String(), nil
}
// UnmarshalYAML implements yaml.Unmarshaler interface.
@ -33,7 +33,7 @@ func (pd *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return err
}
pd.d = time.Duration(ms) * time.Millisecond
pd.D = time.Duration(ms) * time.Millisecond
return nil
}
@ -42,7 +42,7 @@ func (pd *Duration) Duration() time.Duration {
if pd == nil {
return 0
}
return pd.d
return pd.D
}
// ParseDuration parses duration string in Prometheus format

View file

@ -19,7 +19,7 @@ import (
// URL implements YAML.Marshaler and yaml.Unmarshaler interfaces for url.URL.
type URL struct {
url *url.URL
URL *url.URL
}
// MustNewURL returns new URL for the given u.
@ -29,31 +29,31 @@ func MustNewURL(u string) *URL {
logger.Panicf("BUG: cannot parse u=%q: %s", u, err)
}
return &URL{
url: pu,
URL: pu,
}
}
// URL return the underlying url.
func (u *URL) URL() *url.URL {
if u == nil || u.url == nil {
// GetURL return the underlying url.
func (u *URL) GetURL() *url.URL {
if u == nil || u.URL == nil {
return nil
}
return u.url
return u.URL
}
// IsHTTPOrHTTPS returns true if u is http or https
func (u *URL) IsHTTPOrHTTPS() bool {
pu := u.URL()
pu := u.GetURL()
if pu == nil {
return false
}
scheme := u.url.Scheme
scheme := u.URL.Scheme
return scheme == "http" || scheme == "https"
}
// String returns string representation of u.
func (u *URL) String() string {
pu := u.URL()
pu := u.GetURL()
if pu == nil {
return ""
}
@ -66,10 +66,10 @@ func (u *URL) GetAuthHeader(ac *promauth.Config) string {
if ac != nil {
authHeader = ac.GetAuthHeader()
}
if u == nil || u.url == nil {
if u == nil || u.URL == nil {
return authHeader
}
pu := u.url
pu := u.URL
if pu.User != nil && len(pu.User.Username()) > 0 {
userPasswordEncoded := base64.StdEncoding.EncodeToString([]byte(pu.User.String()))
authHeader = "Basic " + userPasswordEncoded
@ -79,10 +79,10 @@ func (u *URL) GetAuthHeader(ac *promauth.Config) string {
// MarshalYAML implements yaml.Marshaler interface.
func (u *URL) MarshalYAML() (interface{}, error) {
if u.url == nil {
if u.URL == nil {
return nil, nil
}
return u.url.String(), nil
return u.URL.String(), nil
}
// UnmarshalYAML implements yaml.Unmarshaler interface.
@ -95,16 +95,16 @@ func (u *URL) UnmarshalYAML(unmarshal func(interface{}) error) error {
if err != nil {
return fmt.Errorf("cannot parse proxy_url=%q as *url.URL: %w", s, err)
}
u.url = parsedURL
u.URL = parsedURL
return nil
}
// NewDialFunc returns dial func for the given u and ac.
func (u *URL) NewDialFunc(ac *promauth.Config) (fasthttp.DialFunc, error) {
if u == nil || u.url == nil {
if u == nil || u.URL == nil {
return defaultDialFunc, nil
}
pu := u.url
pu := u.URL
switch pu.Scheme {
case "http", "https", "socks5", "tls+socks5":
default:

View file

@ -548,6 +548,9 @@ var awsPartition = partition{
endpointKey{
Region: "ap-southeast-2",
}: endpoint{},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{},
endpointKey{
Region: "ca-central-1",
}: endpoint{},
@ -2642,6 +2645,9 @@ var awsPartition = partition{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "eu-west-2",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -3643,9 +3649,6 @@ var awsPartition = partition{
},
"cloudhsm": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "eu-west-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -6896,7 +6899,7 @@ var awsPartition = partition{
Region: "ap-south-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "api.ec2.ap-south-1.aws",
Hostname: "ec2.ap-south-1.api.aws",
},
endpointKey{
Region: "ap-southeast-1",
@ -6932,7 +6935,7 @@ var awsPartition = partition{
Region: "eu-west-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "api.ec2.eu-west-1.aws",
Hostname: "ec2.eu-west-1.api.aws",
},
endpointKey{
Region: "eu-west-2",
@ -6995,7 +6998,7 @@ var awsPartition = partition{
Region: "sa-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "api.ec2.sa-east-1.aws",
Hostname: "ec2.sa-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
@ -7004,7 +7007,7 @@ var awsPartition = partition{
Region: "us-east-1",
Variant: dualStackVariant,
}: endpoint{
Hostname: "api.ec2.us-east-1.aws",
Hostname: "ec2.us-east-1.api.aws",
},
endpointKey{
Region: "us-east-1",
@ -7019,7 +7022,7 @@ var awsPartition = partition{
Region: "us-east-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "api.ec2.us-east-2.aws",
Hostname: "ec2.us-east-2.api.aws",
},
endpointKey{
Region: "us-east-2",
@ -7043,7 +7046,7 @@ var awsPartition = partition{
Region: "us-west-2",
Variant: dualStackVariant,
}: endpoint{
Hostname: "api.ec2.us-west-2.aws",
Hostname: "ec2.us-west-2.api.aws",
},
endpointKey{
Region: "us-west-2",
@ -10585,6 +10588,9 @@ var awsPartition = partition{
},
"identity-chime": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -13755,6 +13761,9 @@ var awsPartition = partition{
},
"messaging-chime": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "eu-central-1",
}: endpoint{},
endpointKey{
Region: "us-east-1",
}: endpoint{},
@ -21936,6 +21945,23 @@ var awsPartition = partition{
Region: "ap-southeast-2",
},
},
endpointKey{
Region: "ap-southeast-3",
}: endpoint{
Hostname: "waf-regional.ap-southeast-3.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-3",
},
},
endpointKey{
Region: "ap-southeast-3",
Variant: fipsVariant,
}: endpoint{
Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-3",
},
},
endpointKey{
Region: "ca-central-1",
}: endpoint{
@ -22127,6 +22153,15 @@ var awsPartition = partition{
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-ap-southeast-3",
}: endpoint{
Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com",
CredentialScope: credentialScope{
Region: "ap-southeast-3",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "fips-ca-central-1",
}: endpoint{
@ -27675,6 +27710,13 @@ var awsusgovPartition = partition{
},
},
},
"robomaker": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "us-gov-west-1",
}: endpoint{},
},
},
"route53": service{
PartitionEndpoint: "aws-us-gov-global",
IsRegionalized: boxedFalse,
@ -30111,6 +30153,28 @@ var awsisobPartition = partition{
}: endpoint{},
},
},
"elasticfilesystem": service{
Endpoints: serviceEndpoints{
endpointKey{
Region: "fips-us-isob-east-1",
}: endpoint{
Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov",
CredentialScope: credentialScope{
Region: "us-isob-east-1",
},
Deprecated: boxedTrue,
},
endpointKey{
Region: "us-isob-east-1",
}: endpoint{},
endpointKey{
Region: "us-isob-east-1",
Variant: fipsVariant,
}: endpoint{
Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov",
},
},
},
"elasticloadbalancing": service{
Endpoints: serviceEndpoints{
endpointKey{

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.44.7"
const SDKVersion = "1.44.9"

View file

@ -17,6 +17,12 @@ This package provides various compression algorithms.
# changelog
* Apr 26, 2022 (v1.15.2)
* zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537)
* zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539)
* s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555)
* Minimum version is Go 1.16, added CI test on 1.18.
* Mar 11, 2022 (v1.15.1)
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512)
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514)

View file

@ -439,7 +439,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
if len(next.b) > 0 {
if !d.o.ignoreChecksum && len(next.b) > 0 {
n, err := d.current.crc.Write(next.b)
if err == nil {
if n != len(next.b) {
@ -451,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
got := d.current.crc.Sum64()
var tmp [4]byte
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
if debugDecoder {
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
}
@ -534,7 +534,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
}
// Update/Check CRC
if d.frame.HasCheckSum {
if !d.o.ignoreChecksum && d.frame.HasCheckSum {
d.frame.crc.Write(d.current.b)
if d.current.d.Last {
d.current.err = d.frame.checkCRC()

View file

@ -19,6 +19,7 @@ type decoderOptions struct {
maxDecodedSize uint64
maxWindowSize uint64
dicts []dict
ignoreChecksum bool
}
func (o *decoderOptions) setDefault() {
@ -112,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption {
return nil
}
}
// IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
o.ignoreChecksum = b
return nil
}
}

View file

@ -290,13 +290,6 @@ func (d *frameDec) checkCRC() error {
if !d.HasCheckSum {
return nil
}
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
// We can overwrite upper tmp now
want, err := d.rawInput.readSmall(4)
@ -305,6 +298,18 @@ func (d *frameDec) checkCRC() error {
return err
}
if d.o.ignoreChecksum {
return nil
}
var tmp [4]byte
got := d.crc.Sum64()
// Flip to match file order.
tmp[0] = byte(got >> 0)
tmp[1] = byte(got >> 8)
tmp[2] = byte(got >> 16)
tmp[3] = byte(got >> 24)
if !bytes.Equal(tmp[:], want) && !ignoreCRC {
if debugDecoder {
println("CRC Check Failed:", tmp[:], "!=", want)
@ -317,6 +322,19 @@ func (d *frameDec) checkCRC() error {
return nil
}
// consumeCRC reads the checksum data if the frame has one.
func (d *frameDec) consumeCRC() error {
if d.HasCheckSum {
_, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
}
return nil
}
// runDecoder will create a sync decoder that will decode a block of data.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b
@ -373,13 +391,17 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
err = ErrFrameSizeMismatch
} else if d.HasCheckSum {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
if d.o.ignoreChecksum {
err = d.consumeCRC()
} else {
var n int
n, err = d.crc.Write(dst[crcStart:])
if err == nil {
if n != len(dst)-crcStart {
err = io.ErrShortWrite
} else {
err = d.checkCRC()
}
}
}
}

View file

@ -102,6 +102,11 @@ func (f *BoolFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *BoolFlag) Get(ctx *Context) bool {
return ctx.Bool(f.Name)
}
// Bool looks up the value of a local BoolFlag, returns
// false if not found
func (cCtx *Context) Bool(name string) bool {

View file

@ -101,6 +101,11 @@ func (f *DurationFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *DurationFlag) Get(ctx *Context) time.Duration {
return ctx.Duration(f.Name)
}
// Duration looks up the value of a local DurationFlag, returns
// 0 if not found
func (cCtx *Context) Duration(name string) time.Duration {

View file

@ -101,6 +101,11 @@ func (f *Float64Flag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *Float64Flag) Get(ctx *Context) float64 {
return ctx.Float64(f.Name)
}
// Float64 looks up the value of a local Float64Flag, returns
// 0 if not found
func (cCtx *Context) Float64(name string) float64 {

View file

@ -177,6 +177,11 @@ func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *Float64SliceFlag) Get(ctx *Context) []float64 {
return ctx.Float64Slice(f.Name)
}
// Float64Slice looks up the value of a local Float64SliceFlag, returns
// nil if not found
func (cCtx *Context) Float64Slice(name string) []float64 {

View file

@ -104,6 +104,11 @@ func (f GenericFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *GenericFlag) Get(ctx *Context) interface{} {
return ctx.Generic(f.Name)
}
// Generic looks up the value of a local GenericFlag, returns
// nil if not found
func (cCtx *Context) Generic(name string) interface{} {

View file

@ -102,6 +102,11 @@ func (f *IntFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *IntFlag) Get(ctx *Context) int {
return ctx.Int(f.Name)
}
// Int looks up the value of a local IntFlag, returns
// 0 if not found
func (cCtx *Context) Int(name string) int {

View file

@ -101,6 +101,11 @@ func (f *Int64Flag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *Int64Flag) Get(ctx *Context) int64 {
return ctx.Int64(f.Name)
}
// Int64 looks up the value of a local Int64Flag, returns
// 0 if not found
func (cCtx *Context) Int64(name string) int64 {

View file

@ -176,6 +176,11 @@ func (f *Int64SliceFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *Int64SliceFlag) Get(ctx *Context) []int64 {
return ctx.Int64Slice(f.Name)
}
// Int64Slice looks up the value of a local Int64SliceFlag, returns
// nil if not found
func (cCtx *Context) Int64Slice(name string) []int64 {

View file

@ -187,6 +187,11 @@ func (f *IntSliceFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *IntSliceFlag) Get(ctx *Context) []int {
return ctx.IntSlice(f.Name)
}
// IntSlice looks up the value of a local IntSliceFlag, returns
// nil if not found
func (cCtx *Context) IntSlice(name string) []int {

View file

@ -96,6 +96,11 @@ func (f *PathFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *PathFlag) Get(ctx *Context) string {
return ctx.Path(f.Name)
}
// Path looks up the value of a local PathFlag, returns
// "" if not found
func (cCtx *Context) Path(name string) string {

View file

@ -97,6 +97,11 @@ func (f *StringFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *StringFlag) Get(ctx *Context) string {
return ctx.String(f.Name)
}
// String looks up the value of a local StringFlag, returns
// "" if not found
func (cCtx *Context) String(name string) string {

View file

@ -188,6 +188,11 @@ func (f *StringSliceFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *StringSliceFlag) Get(ctx *Context) []string {
return ctx.StringSlice(f.Name)
}
// StringSlice looks up the value of a local StringSliceFlag, returns
// nil if not found
func (cCtx *Context) StringSlice(name string) []string {

View file

@ -164,6 +164,11 @@ func (f *TimestampFlag) Apply(set *flag.FlagSet) error {
return nil
}
// Get returns the flags value in the given Context.
func (f *TimestampFlag) Get(ctx *Context) *time.Time {
return ctx.Timestamp(f.Name)
}
// Timestamp gets the timestamp from a flag name
func (cCtx *Context) Timestamp(name string) *time.Time {
if fs := cCtx.lookupFlagSet(name); fs != nil {

View file

@ -101,6 +101,11 @@ func (f *UintFlag) GetEnvVars() []string {
return f.EnvVars
}
// Get returns the flags value in the given Context.
func (f *UintFlag) Get(ctx *Context) uint {
return ctx.Uint(f.Name)
}
// Uint looks up the value of a local UintFlag, returns
// 0 if not found
func (cCtx *Context) Uint(name string) uint {

View file

@ -101,6 +101,11 @@ func (f *Uint64Flag) GetEnvVars() []string {
return f.EnvVars
}
// Get returns the flags value in the given Context.
func (f *Uint64Flag) Get(ctx *Context) uint64 {
return ctx.Uint64(f.Name)
}
// Uint64 looks up the value of a local Uint64Flag, returns
// 0 if not found
func (cCtx *Context) Uint64(name string) uint64 {

8
vendor/modules.txt vendored
View file

@ -34,7 +34,7 @@ github.com/VictoriaMetrics/metricsql/binaryop
# github.com/VividCortex/ewma v1.2.0
## explicit; go 1.12
github.com/VividCortex/ewma
# github.com/aws/aws-sdk-go v1.44.7
# github.com/aws/aws-sdk-go v1.44.9
## explicit; go 1.11
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/arn
@ -151,7 +151,7 @@ github.com/influxdata/influxdb/pkg/escape
# github.com/jmespath/go-jmespath v0.4.0
## explicit; go 1.14
github.com/jmespath/go-jmespath
# github.com/klauspost/compress v1.15.2
# github.com/klauspost/compress v1.15.3
## explicit; go 1.16
github.com/klauspost/compress
github.com/klauspost/compress/flate
@ -221,7 +221,7 @@ github.com/rivo/uniseg
# github.com/russross/blackfriday/v2 v2.1.0
## explicit
github.com/russross/blackfriday/v2
# github.com/urfave/cli/v2 v2.5.1
# github.com/urfave/cli/v2 v2.6.0
## explicit; go 1.18
github.com/urfave/cli/v2
# github.com/valyala/bytebufferpool v1.0.0
@ -343,7 +343,7 @@ google.golang.org/appengine/internal/socket
google.golang.org/appengine/internal/urlfetch
google.golang.org/appengine/socket
google.golang.org/appengine/urlfetch
# google.golang.org/genproto v0.0.0-20220504150022-98cd25cafc72
# google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3
## explicit; go 1.15
google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/iam/v1