From 05c65bd83f4a414b043d4f330c7e3f60a5b62a62 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 20:57:19 -0800 Subject: [PATCH 01/38] lib/storage: speed up search for data block for the given tsids Use binary search instead of linear scan for looking up the needed data block inside index block. Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3425 --- lib/storage/part_search.go | 31 ++++++++++++++++++------------- 1 file changed, 18 insertions(+), 13 deletions(-) diff --git a/lib/storage/part_search.go b/lib/storage/part_search.go index 1ceacd8284..88e92c501b 100644 --- a/lib/storage/part_search.go +++ b/lib/storage/part_search.go @@ -228,24 +228,29 @@ func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) { } func (ps *partSearch) searchBHS() bool { - for i := range ps.bhs { - bh := &ps.bhs[i] - - nextTSID: - if bh.TSID.Less(&ps.BlockRef.bh.TSID) { - // Skip blocks with small tsid values. - continue + bhs := ps.bhs + for len(bhs) > 0 { + // Skip block headers with tsids smaller than the given tsid. + tsid := &ps.BlockRef.bh.TSID + n := sort.Search(len(bhs), func(i int) bool { + return !bhs[i].TSID.Less(tsid) + }) + if n == len(bhs) { + // Nothing found. + break } + bhs = bhs[n:] - // Invariant: ps.BlockRef.bh.TSID <= bh.TSID + // Invariant: tsid <= bh.TSID - if bh.TSID.MetricID != ps.BlockRef.bh.TSID.MetricID { - // ps.BlockRef.bh.TSID < bh.TSID: no more blocks with the given tsid. + bh := &bhs[0] + if bh.TSID.MetricID != tsid.MetricID { + // tsid < bh.TSID: no more blocks with the given tsid. // Proceed to the next (bigger) tsid. if !ps.nextTSID() { return false } - goto nextTSID + continue } // Found the block with the given tsid. Verify timestamp range. @@ -254,6 +259,7 @@ func (ps *partSearch) searchBHS() bool { // So use linear search instead of binary search. if bh.MaxTimestamp < ps.tr.MinTimestamp { // Skip the block with too small timestamps. + bhs = bhs[1:] continue } if bh.MinTimestamp > ps.tr.MaxTimestamp { @@ -269,10 +275,9 @@ func (ps *partSearch) searchBHS() bool { // Read it. ps.BlockRef.init(ps.p, bh) - ps.bhs = ps.bhs[i+1:] + ps.bhs = bhs[1:] return true } - ps.bhs = nil return false } From ddc3d6b5c3604b77ccad128b8334c948bf06c2f2 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 21:17:27 -0800 Subject: [PATCH 02/38] lib/mergeset: drop the crufty code responsible for direct upgrade from releases prior v1.28.0 Upgrade to v1.84.0, wait until the "finished round 2 of background conversion" message appears in the log and then upgrade to newer release. --- docs/CHANGELOG.md | 2 ++ lib/mergeset/table.go | 84 +++---------------------------------------- 2 files changed, 7 insertions(+), 79 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index eb4d4d9434..d1b5507a27 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -15,6 +15,8 @@ The following tip changes can be tested by building VictoriaMetrics components f ## tip +**Update note 1:** this release drops support for direct upgrade from VictoriaMetrics versions prior [v1.28.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.28.0). Please upgrade to `v1.84.0`, wait until `finished round 2 of background conversion` line is emitted to log by single-node VictoriaMetrics or by `vmstorage`, and then upgrade to newer releases. + * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): improve [service discovery](https://docs.victoriametrics.com/sd_configs.html) performance when discovering big number of targets (10K and more). * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `exported_` prefix to metric names exported by scrape targets if these metric names clash with [automatically generated metrics](https://docs.victoriametrics.com/vmagent.html#automatically-generated-metrics) such as `up`, `scrape_samples_scraped`, etc. This prevents from corruption of automatically generated metrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3406). * FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve error message when the requested path cannot be properly parsed, so users could identify the issue and properly fix the path. Now the error message links to [url format docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3402). diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index aa4b595cca..87e4f56437 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -112,8 +112,6 @@ type Table struct { rawItemsFlusherWG sync.WaitGroup - convertersWG sync.WaitGroup - // Use syncwg instead of sync, since Add/Wait may be called from concurrent goroutines. rawItemsPendingFlushesWG syncwg.WaitGroup } @@ -306,12 +304,6 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb logger.Infof("table %q has been opened in %.3f seconds; partsCount: %d; blocksCount: %d, itemsCount: %d; sizeBytes: %d", path, time.Since(startTime).Seconds(), m.PartsCount, m.BlocksCount, m.ItemsCount, m.SizeBytes) - tb.convertersWG.Add(1) - go func() { - tb.convertToV1280() - tb.convertersWG.Done() - }() - if flushCallback != nil { tb.flushCallbackWorkerWG.Add(1) go func() { @@ -345,11 +337,6 @@ func (tb *Table) MustClose() { tb.rawItemsFlusherWG.Wait() logger.Infof("raw items flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path) - logger.Infof("waiting for converters to stop on %q...", tb.path) - startTime = time.Now() - tb.convertersWG.Wait() - logger.Infof("converters stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path) - logger.Infof("waiting for part mergers to stop on %q...", tb.path) startTime = time.Now() tb.partMergersWG.Wait() @@ -376,7 +363,7 @@ func (tb *Table) MustClose() { } tb.partsLock.Unlock() - if err := tb.mergePartsOptimal(pws, nil); err != nil { + if err := tb.mergePartsOptimal(pws); err != nil { logger.Panicf("FATAL: cannot flush inmemory parts to files in %q: %s", tb.path, err) } logger.Infof("%d inmemory parts have been flushed to files in %.3f seconds on %q", len(pws), time.Since(startTime).Seconds(), tb.path) @@ -533,63 +520,11 @@ func (tb *Table) rawItemsFlusher() { } } -const convertToV1280FileName = "converted-to-v1.28.0" - -func (tb *Table) convertToV1280() { - // Convert tag->metricID rows into tag->metricIDs rows when upgrading to v1.28.0+. - flagFilePath := tb.path + "/" + convertToV1280FileName - if fs.IsPathExist(flagFilePath) { - // The conversion has been already performed. - return - } - - getAllPartsForMerge := func() []*partWrapper { - var pws []*partWrapper - tb.partsLock.Lock() - for _, pw := range tb.parts { - if pw.isInMerge { - continue - } - pw.isInMerge = true - pws = append(pws, pw) - } - tb.partsLock.Unlock() - return pws - } - pws := getAllPartsForMerge() - if len(pws) > 0 { - logger.Infof("started round 1 of background conversion of %q to v1.28.0 format; merge %d parts", tb.path, len(pws)) - startTime := time.Now() - if err := tb.mergePartsOptimal(pws, tb.stopCh); err != nil { - logger.Errorf("failed round 1 of background conversion of %q to v1.28.0 format: %s", tb.path, err) - return - } - logger.Infof("finished round 1 of background conversion of %q to v1.28.0 format in %.3f seconds", tb.path, time.Since(startTime).Seconds()) - - // The second round is needed in order to merge small blocks - // with tag->metricIDs rows left after the first round. - pws = getAllPartsForMerge() - logger.Infof("started round 2 of background conversion of %q to v1.28.0 format; merge %d parts", tb.path, len(pws)) - startTime = time.Now() - if len(pws) > 0 { - if err := tb.mergePartsOptimal(pws, tb.stopCh); err != nil { - logger.Errorf("failed round 2 of background conversion of %q to v1.28.0 format: %s", tb.path, err) - return - } - } - logger.Infof("finished round 2 of background conversion of %q to v1.28.0 format in %.3f seconds", tb.path, time.Since(startTime).Seconds()) - } - - if err := fs.WriteFileAtomically(flagFilePath, []byte("ok"), false); err != nil { - logger.Panicf("FATAL: cannot create %q: %s", flagFilePath, err) - } -} - -func (tb *Table) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error { +func (tb *Table) mergePartsOptimal(pws []*partWrapper) error { for len(pws) > defaultPartsToMerge { pwsChunk := pws[:defaultPartsToMerge] pws = pws[defaultPartsToMerge:] - if err := tb.mergeParts(pwsChunk, stopCh, false); err != nil { + if err := tb.mergeParts(pwsChunk, nil, false); err != nil { tb.releasePartsToMerge(pws) return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err) } @@ -597,7 +532,7 @@ func (tb *Table) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) e if len(pws) == 0 { return nil } - if err := tb.mergeParts(pws, stopCh, false); err != nil { + if err := tb.mergeParts(pws, nil, false); err != nil { return fmt.Errorf("cannot merge %d parts: %w", len(pws), err) } return nil @@ -1188,16 +1123,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error { for _, fi := range fis { fn := fi.Name() if !fs.IsDirOrSymlink(fi) { - switch fn { - case convertToV1280FileName: - srcPath := srcDir + "/" + fn - dstPath := dstDir + "/" + fn - if err := os.Link(srcPath, dstPath); err != nil { - return fmt.Errorf("cannot hard link from %q to %q: %w", srcPath, dstPath, err) - } - default: - // Skip other non-directories. - } + // Skip non-directories. continue } if isSpecialDir(fn) { From 14660d4df5914fba15f1979caed7139edd708e18 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 21:53:01 -0800 Subject: [PATCH 03/38] all: typo fix: `the the` -> `the` --- app/vmagent/remotewrite/remotewrite.go | 2 +- lib/promrelabel/graphite.go | 2 +- lib/promscrape/config.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/app/vmagent/remotewrite/remotewrite.go b/app/vmagent/remotewrite/remotewrite.go index ae5234ff4c..d64d381138 100644 --- a/app/vmagent/remotewrite/remotewrite.go +++ b/app/vmagent/remotewrite/remotewrite.go @@ -251,7 +251,7 @@ func Stop() { // Push sends wr to remote storage systems set via `-remoteWrite.url`. // // If at is nil, then the data is pushed to the configured `-remoteWrite.url`. -// If at isn't nil, the the data is pushed to the configured `-remoteWrite.multitenantURL`. +// If at isn't nil, the data is pushed to the configured `-remoteWrite.multitenantURL`. // // Note that wr may be modified by Push due to relabeling and rounding. func Push(at *auth.Token, wr *prompbmarshal.WriteRequest) { diff --git a/lib/promrelabel/graphite.go b/lib/promrelabel/graphite.go index 015f6bcdc2..de39634b2e 100644 --- a/lib/promrelabel/graphite.go +++ b/lib/promrelabel/graphite.go @@ -106,7 +106,7 @@ func (gmt *graphiteMatchTemplate) Match(dst []string, s string) ([]string, bool) dst = append(dst, s) return dst, true } - // Search for the the start of the next part. + // Search for the start of the next part. p = parts[i+1] i++ n := strings.Index(s, p) diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go index aaa6f8a8a6..eba623caec 100644 --- a/lib/promscrape/config.go +++ b/lib/promscrape/config.go @@ -137,7 +137,7 @@ func (cfg *Config) mustRestart(prevCfg *Config) { // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884 needGlobalRestart := !areEqualGlobalConfigs(&cfg.Global, &prevCfg.Global) - // Loop over the the new jobs, start new ones and restart updated ones. + // Loop over the new jobs, start new ones and restart updated ones. var started, stopped, restarted int currentJobNames := make(map[string]struct{}, len(cfg.ScrapeConfigs)) for i, sc := range cfg.ScrapeConfigs { From 7c3c08d102e0da93778950127699ecaa676cfda8 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 21:55:06 -0800 Subject: [PATCH 04/38] lib/backup: remove logging duplicate path values in a single error message --- lib/backup/fscommon/fscommon.go | 2 +- lib/backup/fslocal/fslocal.go | 2 +- lib/backup/fsremote/fsremote.go | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/backup/fscommon/fscommon.go b/lib/backup/fscommon/fscommon.go index fd9e3506b9..37d688aeb9 100644 --- a/lib/backup/fscommon/fscommon.go +++ b/lib/backup/fscommon/fscommon.go @@ -45,7 +45,7 @@ func fsync(path string) error { func AppendFiles(dst []string, dir string) ([]string, error) { d, err := os.Open(dir) if err != nil { - return nil, fmt.Errorf("cannot open %q: %w", dir, err) + return nil, fmt.Errorf("cannot open directory: %w", err) } dst, err = appendFilesInternal(dst, d) if err1 := d.Close(); err1 != nil { diff --git a/lib/backup/fslocal/fslocal.go b/lib/backup/fslocal/fslocal.go index 051182a624..055c38123c 100644 --- a/lib/backup/fslocal/fslocal.go +++ b/lib/backup/fslocal/fslocal.go @@ -159,7 +159,7 @@ func (fs *FS) DeletePath(path string) (uint64, error) { // The file could be deleted earlier via symlink. return 0, nil } - return 0, fmt.Errorf("cannot open %q at %q: %w", path, fullPath, err) + return 0, fmt.Errorf("cannot open %q: %w", path, err) } fi, err := f.Stat() _ = f.Close() diff --git a/lib/backup/fsremote/fsremote.go b/lib/backup/fsremote/fsremote.go index 4e4939f912..d2a7ce8512 100644 --- a/lib/backup/fsremote/fsremote.go +++ b/lib/backup/fsremote/fsremote.go @@ -107,12 +107,12 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error { // Cannot create hardlink. Just copy file contents srcFile, err := os.Open(srcPath) if err != nil { - return fmt.Errorf("cannot open file %q: %w", srcPath, err) + return fmt.Errorf("cannot open source file: %w", err) } dstFile, err := os.Create(dstPath) if err != nil { _ = srcFile.Close() - return fmt.Errorf("cannot create file %q: %w", dstPath, err) + return fmt.Errorf("cannot create destination file: %w", err) } n, err := io.Copy(dstFile, srcFile) if err1 := dstFile.Close(); err1 != nil { @@ -141,7 +141,7 @@ func (fs *FS) DownloadPart(p common.Part, w io.Writer) error { path := fs.path(p) r, err := os.Open(path) if err != nil { - return fmt.Errorf("cannot open %q: %w", path, err) + return err } n, err := io.Copy(w, r) if err1 := r.Close(); err1 != nil && err == nil { From 4f28513b1a06207ff3113c1e224fd33b1de11a84 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 22:00:20 -0800 Subject: [PATCH 05/38] lib/fs: remove logging redundant path values in a single error message --- lib/fs/fs.go | 57 +++++++++++++++++++++++++++++--------------- lib/fs/fs_solaris.go | 4 ++-- lib/fs/fs_unix.go | 4 ++-- lib/fs/reader_at.go | 2 +- 4 files changed, 43 insertions(+), 24 deletions(-) diff --git a/lib/fs/fs.go b/lib/fs/fs.go index 14ae2d6823..92ae1e67d5 100644 --- a/lib/fs/fs.go +++ b/lib/fs/fs.go @@ -25,11 +25,38 @@ func MustSyncPath(path string) { mustSyncPath(path) } +// WriteFileAndSync writes data to the file at path and then calls fsync on the created file. +// +// The fsync guarantees that the written data survives hardware reset after successful call. +// +// This function may leave the file at the path in inconsistent state on app crash +// in the middle of the write. +// Use WriteFileAtomically if the file at the path must be either written in full +// or not written at all on app crash in the middle of the write. +func WriteFileAndSync(path string, data []byte) error { + f, err := filestream.Create(path, false) + if err != nil { + return err + } + if _, err := f.Write(data); err != nil { + f.MustClose() + // Do not call MustRemoveAll(path), so the user could inpsect + // the file contents during investigation of the issue. + return fmt.Errorf("cannot write %d bytes to %q: %w", len(data), path, err) + } + // Sync and close the file. + f.MustClose() + return nil +} + // WriteFileAtomically atomically writes data to the given file path. // -// WriteFileAtomically returns only after the file is fully written and synced +// This function returns only after the file is fully written and synced // to the underlying storage. // +// This function guarantees that the file at path either fully written or not written at all on app crash +// in the middle of the write. +// // If the file at path already exists, then the file is overwritten atomically if canOverwrite is true. // Otherwise error is returned. func WriteFileAtomically(path string, data []byte, canOverwrite bool) error { @@ -40,26 +67,18 @@ func WriteFileAtomically(path string, data []byte, canOverwrite bool) error { return fmt.Errorf("cannot create file %q, since it already exists", path) } + // Write data to a temporary file. n := atomic.AddUint64(&tmpFileNum, 1) tmpPath := fmt.Sprintf("%s.tmp.%d", path, n) - f, err := filestream.Create(tmpPath, false) - if err != nil { - return fmt.Errorf("cannot create file %q: %w", tmpPath, err) - } - if _, err := f.Write(data); err != nil { - f.MustClose() - MustRemoveAll(tmpPath) - return fmt.Errorf("cannot write %d bytes to file %q: %w", len(data), tmpPath, err) + if err := WriteFileAndSync(tmpPath, data); err != nil { + return fmt.Errorf("cannot write data to temporary file: %w", err) } - // Sync and close the file. - f.MustClose() - - // Atomically move the file from tmpPath to path. + // Atomically move the temporary file from tmpPath to path. if err := os.Rename(tmpPath, path); err != nil { // do not call MustRemoveAll(tmpPath) here, so the user could inspect - // the file contents during investigating the issue. - return fmt.Errorf("cannot move %q to %q: %w", tmpPath, path, err) + // the file contents during investigation of the issue. + return fmt.Errorf("cannot move temporary file %q to %q: %w", tmpPath, path, err) } // Sync the containing directory, so the file is guaranteed to appear in the directory. @@ -123,7 +142,7 @@ func RemoveDirContents(dir string) { } d, err := os.Open(dir) if err != nil { - logger.Panicf("FATAL: cannot open dir %q: %s", dir, err) + logger.Panicf("FATAL: cannot open dir: %s", err) } defer MustClose(d) names, err := d.Readdirnames(-1) @@ -185,7 +204,7 @@ func IsEmptyDir(path string) bool { // See https://stackoverflow.com/a/30708914/274937 f, err := os.Open(path) if err != nil { - logger.Panicf("FATAL: unexpected error when opening directory %q: %s", path, err) + logger.Panicf("FATAL: cannot open dir: %s", err) } _, err = f.Readdirnames(1) MustClose(f) @@ -230,7 +249,7 @@ var atomicDirRemoveCounter = uint64(time.Now().UnixNano()) func MustRemoveTemporaryDirs(dir string) { d, err := os.Open(dir) if err != nil { - logger.Panicf("FATAL: cannot open dir %q: %s", dir, err) + logger.Panicf("FATAL: cannot open dir: %s", err) } defer MustClose(d) fis, err := d.Readdir(-1) @@ -259,7 +278,7 @@ func HardLinkFiles(srcDir, dstDir string) error { d, err := os.Open(srcDir) if err != nil { - return fmt.Errorf("cannot open srcDir=%q: %w", srcDir, err) + return fmt.Errorf("cannot open srcDir: %w", err) } defer func() { if err := d.Close(); err != nil { diff --git a/lib/fs/fs_solaris.go b/lib/fs/fs_solaris.go index 8cddca829d..ac94ea406c 100644 --- a/lib/fs/fs_solaris.go +++ b/lib/fs/fs_solaris.go @@ -19,7 +19,7 @@ func mUnmap(data []byte) error { func mustSyncPath(path string) { d, err := os.Open(path) if err != nil { - logger.Panicf("FATAL: cannot open %q: %s", path, err) + logger.Panicf("FATAL: cannot open file for fsync: %s", err) } if err := d.Sync(); err != nil { _ = d.Close() @@ -51,7 +51,7 @@ func createFlockFile(flockFile string) (*os.File, error) { func mustGetFreeSpace(path string) uint64 { d, err := os.Open(path) if err != nil { - logger.Panicf("FATAL: cannot determine free disk space on %q: %s", path, err) + logger.Panicf("FATAL: cannot open dir for determining free disk space: %s", err) } defer MustClose(d) diff --git a/lib/fs/fs_unix.go b/lib/fs/fs_unix.go index 20cf6f6c08..bcb789c94a 100644 --- a/lib/fs/fs_unix.go +++ b/lib/fs/fs_unix.go @@ -22,7 +22,7 @@ func mUnmap(data []byte) error { func mustSyncPath(path string) { d, err := os.Open(path) if err != nil { - logger.Panicf("FATAL: cannot open %q: %s", path, err) + logger.Panicf("FATAL: cannot open file for fsync: %s", err) } if err := d.Sync(); err != nil { _ = d.Close() @@ -47,7 +47,7 @@ func createFlockFile(flockFile string) (*os.File, error) { func mustGetFreeSpace(path string) uint64 { d, err := os.Open(path) if err != nil { - logger.Panicf("FATAL: cannot determine free disk space on %q: %s", path, err) + logger.Panicf("FATAL: cannot open dir for determining free disk space: %s", err) } defer MustClose(d) diff --git a/lib/fs/reader_at.go b/lib/fs/reader_at.go index 53ccb44c37..abc1c46990 100644 --- a/lib/fs/reader_at.go +++ b/lib/fs/reader_at.go @@ -89,7 +89,7 @@ func (r *ReaderAt) MustFadviseSequentialRead(prefetch bool) { func MustOpenReaderAt(path string) *ReaderAt { f, err := os.Open(path) if err != nil { - logger.Panicf("FATAL: cannot open file %q for reading: %s", path, err) + logger.Panicf("FATAL: cannot open file for reading: %s", err) } var r ReaderAt r.f = f From 93764746c27c19a05e6eabc5b7559eae48639056 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 22:01:51 -0800 Subject: [PATCH 06/38] lib/filestream: remove logging redundant path values in a single error message --- lib/filestream/filestream.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/filestream/filestream.go b/lib/filestream/filestream.go index 8af10f5cec..2f0e4e5953 100644 --- a/lib/filestream/filestream.go +++ b/lib/filestream/filestream.go @@ -79,7 +79,7 @@ func OpenReaderAt(path string, offset int64, nocache bool) (*Reader, error) { func Open(path string, nocache bool) (*Reader, error) { f, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("cannot open file %q: %w", path, err) + return nil, err } r := &Reader{ f: f, @@ -179,7 +179,7 @@ type Writer struct { func OpenWriterAt(path string, offset int64, nocache bool) (*Writer, error) { f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0600) if err != nil { - return nil, fmt.Errorf("cannot open %q: %w", path, err) + return nil, err } n, err := f.Seek(offset, io.SeekStart) if err != nil { From 152ac564ab82dca8011f2df3160bc1afe8bf89ca Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 22:13:13 -0800 Subject: [PATCH 07/38] lib/storage: remove logging redundant path values in a single error message --- lib/storage/block_stream_reader.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/storage/block_stream_reader.go b/lib/storage/block_stream_reader.go index aa3149d5b2..2f9e6fc59d 100644 --- a/lib/storage/block_stream_reader.go +++ b/lib/storage/block_stream_reader.go @@ -252,7 +252,7 @@ func (bsr *blockStreamReader) readBlock() error { if err == io.EOF { return io.EOF } - return fmt.Errorf("cannot read index block from index data: %w", err) + return fmt.Errorf("cannot read index block: %w", err) } } @@ -354,11 +354,11 @@ func (bsr *blockStreamReader) readIndexBlock() error { // Read index block. bsr.compressedIndexData = bytesutil.ResizeNoCopyMayOverallocate(bsr.compressedIndexData, int(bsr.mr.IndexBlockSize)) if err := fs.ReadFullData(bsr.indexReader, bsr.compressedIndexData); err != nil { - return fmt.Errorf("cannot read index block from index data at offset %d: %w", bsr.indexBlockOffset, err) + return fmt.Errorf("cannot read index block at offset %d: %w", bsr.indexBlockOffset, err) } tmpData, err := encoding.DecompressZSTD(bsr.indexData[:0], bsr.compressedIndexData) if err != nil { - return fmt.Errorf("cannot decompress index block read at offset %d: %w", bsr.indexBlockOffset, err) + return fmt.Errorf("cannot decompress index block at offset %d: %w", bsr.indexBlockOffset, err) } bsr.indexData = tmpData bsr.indexCursor = bsr.indexData From 5ca58cc4fb6c313a43ab305d8a8242580a47dc5f Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 22:14:12 -0800 Subject: [PATCH 08/38] lib/storage: optimization: do not scan block for rows outside retention if it is covered by the retention --- lib/storage/merge.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/storage/merge.go b/lib/storage/merge.go index 7207fd2135..8759c09926 100644 --- a/lib/storage/merge.go +++ b/lib/storage/merge.go @@ -178,6 +178,10 @@ func mergeBlocks(ob, ib1, ib2 *Block, retentionDeadline int64, rowsDeleted *uint } func skipSamplesOutsideRetention(b *Block, retentionDeadline int64, rowsDeleted *uint64) { + if b.bh.MinTimestamp >= retentionDeadline { + // Fast path - the block contains only samples with timestamps bigger than retentionDeadline. + return + } timestamps := b.timestamps nextIdx := b.nextIdx nextIdxOrig := nextIdx From 45299efe22ed9a1a800d04c1fc6d56c1e2e4702f Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 22:17:46 -0800 Subject: [PATCH 09/38] lib/{storage,mergeset}: consistency rename: `flushRaw{Rows,Items} -> flushPending{Rows,Items} --- lib/mergeset/table.go | 10 +++++----- lib/storage/partition.go | 8 ++++---- lib/storage/partition_search_test.go | 2 +- lib/storage/storage.go | 2 +- lib/storage/table.go | 6 +++--- lib/storage/table_search_test.go | 2 +- 6 files changed, 15 insertions(+), 15 deletions(-) diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index 87e4f56437..4f1e224ae6 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -346,7 +346,7 @@ func (tb *Table) MustClose() { startTime = time.Now() // Flush raw items the last time before exit. - tb.flushRawItems(true) + tb.flushPendingItems(true) // Flush inmemory parts to disk. var pws []*partWrapper @@ -515,7 +515,7 @@ func (tb *Table) rawItemsFlusher() { case <-tb.stopCh: return case <-ticker.C: - tb.flushRawItems(false) + tb.flushPendingItems(false) } } } @@ -543,13 +543,13 @@ func (tb *Table) mergePartsOptimal(pws []*partWrapper) error { // // This function is only for debugging and testing. func (tb *Table) DebugFlush() { - tb.flushRawItems(true) + tb.flushPendingItems(true) // Wait for background flushers to finish. tb.rawItemsPendingFlushesWG.Wait() } -func (tb *Table) flushRawItems(isFinal bool) { +func (tb *Table) flushPendingItems(isFinal bool) { tb.rawItems.flush(tb, isFinal) } @@ -1099,7 +1099,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error { } // Flush inmemory items to disk. - tb.flushRawItems(true) + tb.flushPendingItems(true) // The snapshot must be created under the lock in order to prevent from // concurrent modifications via runTransaction. diff --git a/lib/storage/partition.go b/lib/storage/partition.go index 6f91cba1c2..296b8af68a 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -649,7 +649,7 @@ func (pt *partition) MustClose() { startTime = time.Now() // Flush raw rows the last time before exit. - pt.flushRawRows(true) + pt.flushPendingRows(true) // Flush inmemory parts to disk. var pws []*partWrapper @@ -710,12 +710,12 @@ func (pt *partition) rawRowsFlusher() { case <-pt.stopCh: return case <-ticker.C: - pt.flushRawRows(false) + pt.flushPendingRows(false) } } } -func (pt *partition) flushRawRows(isFinal bool) { +func (pt *partition) flushPendingRows(isFinal bool) { pt.rawRows.flush(pt, isFinal) } @@ -1639,7 +1639,7 @@ func (pt *partition) CreateSnapshotAt(smallPath, bigPath string) error { startTime := time.Now() // Flush inmemory data to disk. - pt.flushRawRows(true) + pt.flushPendingRows(true) if _, err := pt.flushInmemoryParts(nil, true); err != nil { return fmt.Errorf("cannot flush inmemory parts: %w", err) } diff --git a/lib/storage/partition_search_test.go b/lib/storage/partition_search_test.go index 9649770dfd..b0e39ef7d0 100644 --- a/lib/storage/partition_search_test.go +++ b/lib/storage/partition_search_test.go @@ -185,7 +185,7 @@ func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, ma pt.AddRows(rows) // Flush just added rows to a separate partition. - pt.flushRawRows(true) + pt.flushPendingRows(true) } testPartitionSearch(t, pt, tsids, tr, rbsExpected, -1) pt.MustClose() diff --git a/lib/storage/storage.go b/lib/storage/storage.go index 1a50316f3c..9971db2b50 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -306,7 +306,7 @@ func (s *Storage) updateDeletedMetricIDs(metricIDs *uint64set.Set) { // DebugFlush flushes recently added storage data, so it becomes visible to search. func (s *Storage) DebugFlush() { - s.tb.flushRawRows() + s.tb.flushPendingRows() s.idb().tb.DebugFlush() } diff --git a/lib/storage/table.go b/lib/storage/table.go index 5b7ffbce71..e4b6030449 100644 --- a/lib/storage/table.go +++ b/lib/storage/table.go @@ -215,15 +215,15 @@ func (tb *table) MustClose() { } } -// flushRawRows flushes all the pending rows, so they become visible to search. +// flushPendingRows flushes all the pending rows, so they become visible to search. // // This function is for debug purposes only. -func (tb *table) flushRawRows() { +func (tb *table) flushPendingRows() { ptws := tb.GetPartitions(nil) defer tb.PutPartitions(ptws) for _, ptw := range ptws { - ptw.pt.flushRawRows(true) + ptw.pt.flushPendingRows(true) } } diff --git a/lib/storage/table_search_test.go b/lib/storage/table_search_test.go index c9b1119dcf..17e1f5b86c 100644 --- a/lib/storage/table_search_test.go +++ b/lib/storage/table_search_test.go @@ -197,7 +197,7 @@ func testTableSearchEx(t *testing.T, trData, trSearch TimeRange, partitionsCount } // Flush rows to parts. - tb.flushRawRows() + tb.flushPendingRows() } testTableSearch(t, tb, tsids, trSearch, rbsExpected, -1) tb.MustClose() From c4150995ad5a003f0cbce436cb42acddc7a5eeca Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 22:26:33 -0800 Subject: [PATCH 10/38] lib/mergeset: reduce the time needed for the slowest tests --- lib/mergeset/encoding.go | 4 +++- lib/mergeset/encoding_test.go | 2 +- lib/mergeset/table_test.go | 26 ++++++++++---------------- 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/lib/mergeset/encoding.go b/lib/mergeset/encoding.go index 09d1e0f762..b93383e817 100644 --- a/lib/mergeset/encoding.go +++ b/lib/mergeset/encoding.go @@ -47,7 +47,9 @@ func (it Item) String(data []byte) string { return *(*string)(unsafe.Pointer(sh)) } -func (ib *inmemoryBlock) Len() int { return len(ib.items) } +func (ib *inmemoryBlock) Len() int { + return len(ib.items) +} func (ib *inmemoryBlock) Less(i, j int) bool { items := ib.items diff --git a/lib/mergeset/encoding_test.go b/lib/mergeset/encoding_test.go index 48398ae2fc..8e6ecffd0f 100644 --- a/lib/mergeset/encoding_test.go +++ b/lib/mergeset/encoding_test.go @@ -115,7 +115,7 @@ func TestInmemoryBlockMarshalUnmarshal(t *testing.T) { var itemsLen uint32 var mt marshalType - for i := 0; i < 1000; i++ { + for i := 0; i < 1000; i += 10 { var items []string totalLen := 0 ib.Reset() diff --git a/lib/mergeset/table_test.go b/lib/mergeset/table_test.go index f99ab937c2..0756a13c79 100644 --- a/lib/mergeset/table_test.go +++ b/lib/mergeset/table_test.go @@ -31,7 +31,7 @@ func TestTableOpenClose(t *testing.T) { tb.MustClose() // Re-open created table multiple times. - for i := 0; i < 10; i++ { + for i := 0; i < 4; i++ { tb, err := OpenTable(path, nil, nil, &isReadOnly) if err != nil { t.Fatalf("cannot open created table: %s", err) @@ -53,7 +53,7 @@ func TestTableOpenMultipleTimes(t *testing.T) { } defer tb1.MustClose() - for i := 0; i < 10; i++ { + for i := 0; i < 4; i++ { tb2, err := OpenTable(path, nil, nil, &isReadOnly) if err == nil { tb2.MustClose() @@ -62,8 +62,8 @@ func TestTableOpenMultipleTimes(t *testing.T) { } } -func TestTableAddItemSerial(t *testing.T) { - const path = "TestTableAddItemSerial" +func TestTableAddItemsSerial(t *testing.T) { + const path = "TestTableAddItemsSerial" if err := os.RemoveAll(path); err != nil { t.Fatalf("cannot remove %q: %s", path, err) } @@ -81,7 +81,7 @@ func TestTableAddItemSerial(t *testing.T) { t.Fatalf("cannot open %q: %s", path, err) } - const itemsCount = 1e5 + const itemsCount = 10e3 testAddItemsSerial(tb, itemsCount) // Verify items count after pending items flush. @@ -98,7 +98,7 @@ func TestTableAddItemSerial(t *testing.T) { tb.MustClose() - // Re-open the table and make sure ItemsCount remains the same. + // Re-open the table and make sure itemsCount remains the same. testReopenTable(t, path, itemsCount) // Add more items in order to verify merge between inmemory parts and file-based parts. @@ -110,7 +110,7 @@ func TestTableAddItemSerial(t *testing.T) { testAddItemsSerial(tb, moreItemsCount) tb.MustClose() - // Re-open the table and verify ItemsCount again. + // Re-open the table and verify itemsCount again. testReopenTable(t, path, itemsCount+moreItemsCount) } @@ -221,9 +221,7 @@ func TestTableAddItemsConcurrent(t *testing.T) { flushCallback := func() { atomic.AddUint64(&flushes, 1) } - var itemsMerged uint64 prepareBlock := func(data []byte, items []Item) ([]byte, []Item) { - atomic.AddUint64(&itemsMerged, uint64(len(items))) return data, items } var isReadOnly uint32 @@ -232,7 +230,7 @@ func TestTableAddItemsConcurrent(t *testing.T) { t.Fatalf("cannot open %q: %s", path, err) } - const itemsCount = 1e5 + const itemsCount = 10e3 testAddItemsConcurrent(tb, itemsCount) // Verify items count after pending items flush. @@ -240,10 +238,6 @@ func TestTableAddItemsConcurrent(t *testing.T) { if atomic.LoadUint64(&flushes) == 0 { t.Fatalf("unexpected zero flushes") } - n := atomic.LoadUint64(&itemsMerged) - if n < itemsCount { - t.Fatalf("too low number of items merged; got %v; must be at least %v", n, itemsCount) - } var m TableMetrics tb.UpdateMetrics(&m) @@ -253,7 +247,7 @@ func TestTableAddItemsConcurrent(t *testing.T) { tb.MustClose() - // Re-open the table and make sure ItemsCount remains the same. + // Re-open the table and make sure itemsCount remains the same. testReopenTable(t, path, itemsCount) // Add more items in order to verify merge between inmemory parts and file-based parts. @@ -265,7 +259,7 @@ func TestTableAddItemsConcurrent(t *testing.T) { testAddItemsConcurrent(tb, moreItemsCount) tb.MustClose() - // Re-open the table and verify ItemsCount again. + // Re-open the table and verify itemsCount again. testReopenTable(t, path, itemsCount+moreItemsCount) } From f3e3a3daeb78bfc0e06e13f0fbe782d3ae17e44f Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 22:30:30 -0800 Subject: [PATCH 11/38] lib/{mergeset,storage}: take into account byte slice capacity when returning the size of in-memory part This results in more correct reporting of memory usage for in-memory parts --- lib/mergeset/inmemory_part.go | 5 ++--- lib/storage/inmemory_part.go | 9 ++++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/lib/mergeset/inmemory_part.go b/lib/mergeset/inmemory_part.go index 9b41faca82..012d6dcc20 100644 --- a/lib/mergeset/inmemory_part.go +++ b/lib/mergeset/inmemory_part.go @@ -76,9 +76,8 @@ var inmemoryPartBytePool bytesutil.ByteBufferPool // It is safe calling NewPart multiple times. // It is unsafe re-using mp while the returned part is in use. func (mp *inmemoryPart) NewPart() *part { - ph := mp.ph size := mp.size() - p, err := newPart(&ph, "", size, mp.metaindexData.NewReader(), &mp.indexData, &mp.itemsData, &mp.lensData) + p, err := newPart(&mp.ph, "", size, mp.metaindexData.NewReader(), &mp.indexData, &mp.itemsData, &mp.lensData) if err != nil { logger.Panicf("BUG: cannot create a part from inmemoryPart: %s", err) } @@ -86,5 +85,5 @@ func (mp *inmemoryPart) NewPart() *part { } func (mp *inmemoryPart) size() uint64 { - return uint64(len(mp.metaindexData.B) + len(mp.indexData.B) + len(mp.itemsData.B) + len(mp.lensData.B)) + return uint64(cap(mp.metaindexData.B) + cap(mp.indexData.B) + cap(mp.itemsData.B) + cap(mp.lensData.B)) } diff --git a/lib/storage/inmemory_part.go b/lib/storage/inmemory_part.go index 3f78e24b7b..b0681c9849 100644 --- a/lib/storage/inmemory_part.go +++ b/lib/storage/inmemory_part.go @@ -49,9 +49,12 @@ func (mp *inmemoryPart) InitFromRows(rows []rawRow) { // It is safe calling NewPart multiple times. // It is unsafe re-using mp while the returned part is in use. func (mp *inmemoryPart) NewPart() (*part, error) { - ph := mp.ph - size := uint64(len(mp.timestampsData.B) + len(mp.valuesData.B) + len(mp.indexData.B) + len(mp.metaindexData.B)) - return newPart(&ph, "", size, mp.metaindexData.NewReader(), &mp.timestampsData, &mp.valuesData, &mp.indexData) + size := mp.size() + return newPart(&mp.ph, "", size, mp.metaindexData.NewReader(), &mp.timestampsData, &mp.valuesData, &mp.indexData) +} + +func (mp *inmemoryPart) size() uint64 { + return uint64(cap(mp.timestampsData.B) + cap(mp.valuesData.B) + cap(mp.indexData.B) + cap(mp.metaindexData.B)) } func getInmemoryPart() *inmemoryPart { From 6d87462f4b7e703dad3f780ad86eb594e77ef86b Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 22:34:43 -0800 Subject: [PATCH 12/38] lib/mergeset: use the given compressLevel for index and metaindex compression in in-memory part Previously only data was compressed with the given compressLevel --- lib/mergeset/inmemory_part.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/mergeset/inmemory_part.go b/lib/mergeset/inmemory_part.go index 012d6dcc20..c3caca201c 100644 --- a/lib/mergeset/inmemory_part.go +++ b/lib/mergeset/inmemory_part.go @@ -60,14 +60,14 @@ func (mp *inmemoryPart) Init(ib *inmemoryBlock) { bb := inmemoryPartBytePool.Get() bb.B = mp.bh.Marshal(bb.B[:0]) - mp.indexData.B = encoding.CompressZSTDLevel(mp.indexData.B[:0], bb.B, 0) + mp.indexData.B = encoding.CompressZSTDLevel(mp.indexData.B[:0], bb.B, compressLevel) mp.mr.firstItem = append(mp.mr.firstItem[:0], mp.bh.firstItem...) mp.mr.blockHeadersCount = 1 mp.mr.indexBlockOffset = 0 mp.mr.indexBlockSize = uint32(len(mp.indexData.B)) bb.B = mp.mr.Marshal(bb.B[:0]) - mp.metaindexData.B = encoding.CompressZSTDLevel(mp.metaindexData.B[:0], bb.B, 0) + mp.metaindexData.B = encoding.CompressZSTDLevel(mp.metaindexData.B[:0], bb.B, compressLevel) inmemoryPartBytePool.Put(bb) } From 343c69fc153591a914f2636bc1076e8386fb44e9 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 22:45:53 -0800 Subject: [PATCH 13/38] lib/{mergeset,storage}: pass compressLevel to blockStreamWriter.InitFromInmemoryPart This allows packing in-memory blocks with different compression levels depending on its contents. This may save memory usage. --- lib/mergeset/block_stream_writer.go | 7 ++----- lib/mergeset/merge_test.go | 10 +++++----- lib/mergeset/part_search_test.go | 2 +- lib/mergeset/table.go | 14 +++++++++----- lib/storage/block_stream_writer.go | 7 ++----- lib/storage/block_stream_writer_timing_test.go | 2 +- lib/storage/merge_test.go | 4 ++-- lib/storage/merge_timing_test.go | 2 +- lib/storage/partition.go | 4 ++-- lib/storage/raw_row.go | 5 ++++- 10 files changed, 29 insertions(+), 28 deletions(-) diff --git a/lib/mergeset/block_stream_writer.go b/lib/mergeset/block_stream_writer.go index b25e473257..9c348fbc23 100644 --- a/lib/mergeset/block_stream_writer.go +++ b/lib/mergeset/block_stream_writer.go @@ -63,13 +63,10 @@ func (bsw *blockStreamWriter) reset() { bsw.mrFirstItemCaught = false } -func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart) { +func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart, compressLevel int) { bsw.reset() - // Use the minimum compression level for in-memory blocks, - // since they are going to be re-compressed during the merge into file-based blocks. - bsw.compressLevel = -5 // See https://github.com/facebook/zstd/releases/tag/v1.3.4 - + bsw.compressLevel = compressLevel bsw.metaindexWriter = &mp.metaindexData bsw.indexWriter = &mp.indexData bsw.itemsWriter = &mp.itemsData diff --git a/lib/mergeset/merge_test.go b/lib/mergeset/merge_test.go index f042bae0fc..6ba874a676 100644 --- a/lib/mergeset/merge_test.go +++ b/lib/mergeset/merge_test.go @@ -30,14 +30,14 @@ func TestMultilevelMerge(t *testing.T) { // First level merge var dstIP1 inmemoryPart var bsw1 blockStreamWriter - bsw1.InitFromInmemoryPart(&dstIP1) + bsw1.InitFromInmemoryPart(&dstIP1, -5) if err := mergeBlockStreams(&dstIP1.ph, &bsw1, bsrs[:5], nil, nil, &itemsMerged); err != nil { t.Fatalf("cannot merge first level part 1: %s", err) } var dstIP2 inmemoryPart var bsw2 blockStreamWriter - bsw2.InitFromInmemoryPart(&dstIP2) + bsw2.InitFromInmemoryPart(&dstIP2, -5) if err := mergeBlockStreams(&dstIP2.ph, &bsw2, bsrs[5:], nil, nil, &itemsMerged); err != nil { t.Fatalf("cannot merge first level part 2: %s", err) } @@ -54,7 +54,7 @@ func TestMultilevelMerge(t *testing.T) { newTestBlockStreamReader(&dstIP1), newTestBlockStreamReader(&dstIP2), } - bsw.InitFromInmemoryPart(&dstIP) + bsw.InitFromInmemoryPart(&dstIP, 1) if err := mergeBlockStreams(&dstIP.ph, &bsw, bsrsTop, nil, nil, &itemsMerged); err != nil { t.Fatalf("cannot merge second level: %s", err) } @@ -73,7 +73,7 @@ func TestMergeForciblyStop(t *testing.T) { bsrs, _ := newTestInmemoryBlockStreamReaders(20, 4000) var dstIP inmemoryPart var bsw blockStreamWriter - bsw.InitFromInmemoryPart(&dstIP) + bsw.InitFromInmemoryPart(&dstIP, 1) ch := make(chan struct{}) var itemsMerged uint64 close(ch) @@ -120,7 +120,7 @@ func testMergeBlockStreamsSerial(blocksToMerge, maxItemsPerBlock int) error { var itemsMerged uint64 var dstIP inmemoryPart var bsw blockStreamWriter - bsw.InitFromInmemoryPart(&dstIP) + bsw.InitFromInmemoryPart(&dstIP, -4) if err := mergeBlockStreams(&dstIP.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil { return fmt.Errorf("cannot merge block streams: %w", err) } diff --git a/lib/mergeset/part_search_test.go b/lib/mergeset/part_search_test.go index fb178bcde0..c042d44acd 100644 --- a/lib/mergeset/part_search_test.go +++ b/lib/mergeset/part_search_test.go @@ -149,7 +149,7 @@ func newTestPart(blocksCount, maxItemsPerBlock int) (*part, []string, error) { var itemsMerged uint64 var ip inmemoryPart var bsw blockStreamWriter - bsw.InitFromInmemoryPart(&ip) + bsw.InitFromInmemoryPart(&ip, -3) if err := mergeBlockStreams(&ip.ph, &bsw, bsrs, nil, nil, &itemsMerged); err != nil { return nil, nil, fmt.Errorf("cannot merge blocks: %w", err) } diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index 4f1e224ae6..25fc20cc41 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -661,6 +661,11 @@ func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper { atomic.AddUint64(&tb.activeMerges, 1) defer atomic.AddUint64(&tb.activeMerges, ^uint64(0)) + outItemsCount := uint64(0) + for _, ib := range ibs { + outItemsCount += uint64(ib.Len()) + } + // Prepare blockStreamReaders for source blocks. bsrs := make([]*blockStreamReader, 0, len(ibs)) for _, ib := range ibs { @@ -688,9 +693,10 @@ func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper { } // Prepare blockStreamWriter for destination part. + compressLevel := getCompressLevel(outItemsCount) bsw := getBlockStreamWriter() mpDst := &inmemoryPart{} - bsw.InitFromInmemoryPart(mpDst) + bsw.InitFromInmemoryPart(mpDst, compressLevel) // Merge parts. // The merge shouldn't be interrupted by stopCh, @@ -869,7 +875,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP mergeIdx := tb.nextMergeIdx() tmpPartPath := fmt.Sprintf("%s/tmp/%016X", tb.path, mergeIdx) bsw := getBlockStreamWriter() - compressLevel := getCompressLevelForPartItems(outItemsCount, outBlocksCount) + compressLevel := getCompressLevel(outItemsCount) if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil { return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err) } @@ -958,9 +964,7 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP return nil } -func getCompressLevelForPartItems(itemsCount, blocksCount uint64) int { - // There is no need in using blocksCount here, since mergeset blocks are usually full. - +func getCompressLevel(itemsCount uint64) int { if itemsCount <= 1<<16 { // -5 is the minimum supported compression for zstd. // See https://github.com/facebook/zstd/releases/tag/v1.3.4 diff --git a/lib/storage/block_stream_writer.go b/lib/storage/block_stream_writer.go index 790c363668..ff43aa3cd2 100644 --- a/lib/storage/block_stream_writer.go +++ b/lib/storage/block_stream_writer.go @@ -80,13 +80,10 @@ func (bsw *blockStreamWriter) reset() { } // InitFromInmemoryPart initialzes bsw from inmemory part. -func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart) { +func (bsw *blockStreamWriter) InitFromInmemoryPart(mp *inmemoryPart, compressLevel int) { bsw.reset() - // Use the minimum compression level for in-memory blocks, - // since they are going to be re-compressed during the merge into file-based blocks. - bsw.compressLevel = -5 // See https://github.com/facebook/zstd/releases/tag/v1.3.4 - + bsw.compressLevel = compressLevel bsw.timestampsWriter = &mp.timestampsData bsw.valuesWriter = &mp.valuesData bsw.indexWriter = &mp.indexData diff --git a/lib/storage/block_stream_writer_timing_test.go b/lib/storage/block_stream_writer_timing_test.go index 8e51d06a20..95ecbbe358 100644 --- a/lib/storage/block_stream_writer_timing_test.go +++ b/lib/storage/block_stream_writer_timing_test.go @@ -47,7 +47,7 @@ func benchmarkBlockStreamWriter(b *testing.B, ebs []Block, rowsCount int, writeR } } - bsw.InitFromInmemoryPart(&mp) + bsw.InitFromInmemoryPart(&mp, -5) for i := range ebsCopy { bsw.WriteExternalBlock(&ebsCopy[i], &ph, &rowsMerged) } diff --git a/lib/storage/merge_test.go b/lib/storage/merge_test.go index 189935c41f..276abd3aa5 100644 --- a/lib/storage/merge_test.go +++ b/lib/storage/merge_test.go @@ -361,7 +361,7 @@ func TestMergeForciblyStop(t *testing.T) { var mp inmemoryPart var bsw blockStreamWriter - bsw.InitFromInmemoryPart(&mp) + bsw.InitFromInmemoryPart(&mp, -5) ch := make(chan struct{}) var rowsMerged, rowsDeleted uint64 close(ch) @@ -384,7 +384,7 @@ func testMergeBlockStreams(t *testing.T, bsrs []*blockStreamReader, expectedBloc var mp inmemoryPart var bsw blockStreamWriter - bsw.InitFromInmemoryPart(&mp) + bsw.InitFromInmemoryPart(&mp, -5) strg := newTestStorage() var rowsMerged, rowsDeleted uint64 diff --git a/lib/storage/merge_timing_test.go b/lib/storage/merge_timing_test.go index 5cbbe54552..cfc440c705 100644 --- a/lib/storage/merge_timing_test.go +++ b/lib/storage/merge_timing_test.go @@ -41,7 +41,7 @@ func benchmarkMergeBlockStreams(b *testing.B, mps []*inmemoryPart, rowsPerLoop i bsrs[i].InitFromInmemoryPart(mp) } mpOut.Reset() - bsw.InitFromInmemoryPart(&mpOut) + bsw.InitFromInmemoryPart(&mpOut, -5) if err := mergeBlockStreams(&mpOut.ph, &bsw, bsrs, nil, strg, 0, &rowsMerged, &rowsDeleted); err != nil { panic(fmt.Errorf("cannot merge block streams: %w", err)) } diff --git a/lib/storage/partition.go b/lib/storage/partition.go index 296b8af68a..d73a69837d 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -1181,7 +1181,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro mergeIdx := pt.nextMergeIdx() tmpPartPath := fmt.Sprintf("%s/tmp/%016X", ptPath, mergeIdx) bsw := getBlockStreamWriter() - compressLevel := getCompressLevelForRowsCount(outRowsCount, outBlocksCount) + compressLevel := getCompressLevel(outRowsCount, outBlocksCount) if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil { return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err) } @@ -1301,7 +1301,7 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro return nil } -func getCompressLevelForRowsCount(rowsCount, blocksCount uint64) int { +func getCompressLevel(rowsCount, blocksCount uint64) int { avgRowsPerBlock := rowsCount / blocksCount // See https://github.com/facebook/zstd/releases/tag/v1.3.4 about negative compression levels. if avgRowsPerBlock <= 10 { diff --git a/lib/storage/raw_row.go b/lib/storage/raw_row.go index b1d978d333..206805d628 100644 --- a/lib/storage/raw_row.go +++ b/lib/storage/raw_row.go @@ -86,7 +86,10 @@ func (rrm *rawRowsMarshaler) marshalToInmemoryPart(mp *inmemoryPart, rows []rawR logger.Panicf("BUG: rows count must be smaller than 2^32; got %d", len(rows)) } - rrm.bsw.InitFromInmemoryPart(mp) + // Use the minimum compression level for first-level in-memory blocks, + // since they are going to be re-compressed during subsequent merges. + const compressLevel = -5 // See https://github.com/facebook/zstd/releases/tag/v1.3.4 + rrm.bsw.InitFromInmemoryPart(mp, compressLevel) ph := &mp.ph ph.Reset() From 28e6d9e1fffbfcd9c6ed66dc8bf4a8ad74b51579 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 23:02:10 -0800 Subject: [PATCH 14/38] lib/storage: properly pass retentionMsecs to OpenStorage() at TestIndexDBRepopulateAfterRotation --- lib/storage/index_db_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/storage/index_db_test.go b/lib/storage/index_db_test.go index 9f00a579cb..b0dbb8a2c6 100644 --- a/lib/storage/index_db_test.go +++ b/lib/storage/index_db_test.go @@ -1549,11 +1549,10 @@ func TestMatchTagFilters(t *testing.T) { func TestIndexDBRepopulateAfterRotation(t *testing.T) { path := "TestIndexRepopulateAfterRotation" - s, err := OpenStorage(path, 0, 1e5, 1e5) + s, err := OpenStorage(path, msecsPerMonth, 1e5, 1e5) if err != nil { t.Fatalf("cannot open storage: %s", err) } - s.retentionMsecs = msecsPerMonth defer func() { s.MustClose() if err := os.RemoveAll(path); err != nil { From cb449767162a5b1ed1bf19ee58d3419236d75188 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 23:03:05 -0800 Subject: [PATCH 15/38] lib/{storage,mergeset}: use a single sync.WaitGroup for all background workers This simplifies the code --- lib/mergeset/table.go | 26 +++++++------------- lib/storage/partition.go | 52 +++++++++++----------------------------- 2 files changed, 22 insertions(+), 56 deletions(-) diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index 25fc20cc41..f0203eabd2 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -107,10 +107,7 @@ type Table struct { stopCh chan struct{} - // Use syncwg instead of sync, since Add/Wait may be called from concurrent goroutines. - partMergersWG syncwg.WaitGroup - - rawItemsFlusherWG sync.WaitGroup + wg sync.WaitGroup // Use syncwg instead of sync, since Add/Wait may be called from concurrent goroutines. rawItemsPendingFlushesWG syncwg.WaitGroup @@ -332,15 +329,10 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb func (tb *Table) MustClose() { close(tb.stopCh) - logger.Infof("waiting for raw items flusher to stop on %q...", tb.path) + logger.Infof("waiting for background workers to stop on %q...", tb.path) startTime := time.Now() - tb.rawItemsFlusherWG.Wait() - logger.Infof("raw items flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path) - - logger.Infof("waiting for part mergers to stop on %q...", tb.path) - startTime = time.Now() - tb.partMergersWG.Wait() - logger.Infof("part mergers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path) + tb.wg.Wait() + logger.Infof("background workers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path) logger.Infof("flushing inmemory parts to files on %q...", tb.path) startTime = time.Now() @@ -500,10 +492,10 @@ func (tb *Table) putParts(pws []*partWrapper) { } func (tb *Table) startRawItemsFlusher() { - tb.rawItemsFlusherWG.Add(1) + tb.wg.Add(1) go func() { tb.rawItemsFlusher() - tb.rawItemsFlusherWG.Done() + tb.wg.Done() }() } @@ -592,8 +584,6 @@ func (tb *Table) mergeRawItemsBlocks(ibs []*inmemoryBlock, isFinal bool) { if len(ibs) == 0 { return } - tb.partMergersWG.Add(1) - defer tb.partMergersWG.Done() pws := make([]*partWrapper, 0, (len(ibs)+defaultPartsToMerge-1)/defaultPartsToMerge) var pwsLock sync.Mutex @@ -720,12 +710,12 @@ func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper { func (tb *Table) startPartMergers() { for i := 0; i < mergeWorkersCount; i++ { - tb.partMergersWG.Add(1) + tb.wg.Add(1) go func() { if err := tb.partMerger(); err != nil { logger.Panicf("FATAL: unrecoverable error when merging parts in %q: %s", tb.path, err) } - tb.partMergersWG.Done() + tb.wg.Done() }() } } diff --git a/lib/storage/partition.go b/lib/storage/partition.go index d73a69837d..57354802b9 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -145,11 +145,7 @@ type partition struct { stopCh chan struct{} - smallPartsMergerWG sync.WaitGroup - bigPartsMergerWG sync.WaitGroup - rawRowsFlusherWG sync.WaitGroup - inmemoryPartsFlusherWG sync.WaitGroup - stalePartsRemoverWG sync.WaitGroup + wg sync.WaitGroup } // partWrapper is a wrapper for the part. @@ -620,30 +616,10 @@ func (pt *partition) MustClose() { // Wait until all the pending transaction deletions are finished. pendingTxnDeletionsWG.Wait() - logger.Infof("waiting for stale parts remover to stop on %q...", pt.smallPartsPath) + logger.Infof("waiting for service workers to stop on %q...", pt.smallPartsPath) startTime := time.Now() - pt.stalePartsRemoverWG.Wait() - logger.Infof("stale parts remover stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath) - - logger.Infof("waiting for inmemory parts flusher to stop on %q...", pt.smallPartsPath) - startTime = time.Now() - pt.inmemoryPartsFlusherWG.Wait() - logger.Infof("inmemory parts flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath) - - logger.Infof("waiting for raw rows flusher to stop on %q...", pt.smallPartsPath) - startTime = time.Now() - pt.rawRowsFlusherWG.Wait() - logger.Infof("raw rows flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath) - - logger.Infof("waiting for small part mergers to stop on %q...", pt.smallPartsPath) - startTime = time.Now() - pt.smallPartsMergerWG.Wait() - logger.Infof("small part mergers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath) - - logger.Infof("waiting for big part mergers to stop on %q...", pt.bigPartsPath) - startTime = time.Now() - pt.bigPartsMergerWG.Wait() - logger.Infof("big part mergers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.bigPartsPath) + pt.wg.Wait() + logger.Infof("service workers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath) logger.Infof("flushing inmemory parts to files on %q...", pt.smallPartsPath) startTime = time.Now() @@ -695,10 +671,10 @@ func (pt *partition) MustClose() { } func (pt *partition) startRawRowsFlusher() { - pt.rawRowsFlusherWG.Add(1) + pt.wg.Add(1) go func() { pt.rawRowsFlusher() - pt.rawRowsFlusherWG.Done() + pt.wg.Done() }() } @@ -748,10 +724,10 @@ func (rrs *rawRowsShard) appendRawRowsToFlush(dst []rawRow, pt *partition, isFin } func (pt *partition) startInmemoryPartsFlusher() { - pt.inmemoryPartsFlusherWG.Add(1) + pt.wg.Add(1) go func() { pt.inmemoryPartsFlusher() - pt.inmemoryPartsFlusherWG.Done() + pt.wg.Done() }() } @@ -909,17 +885,17 @@ func SetSmallMergeWorkersCount(n int) { func (pt *partition) startMergeWorkers() { for i := 0; i < smallMergeWorkersCount; i++ { - pt.smallPartsMergerWG.Add(1) + pt.wg.Add(1) go func() { pt.smallPartsMerger() - pt.smallPartsMergerWG.Done() + pt.wg.Done() }() } for i := 0; i < bigMergeWorkersCount; i++ { - pt.bigPartsMergerWG.Add(1) + pt.wg.Add(1) go func() { pt.bigPartsMerger() - pt.bigPartsMergerWG.Done() + pt.wg.Done() }() } } @@ -1346,10 +1322,10 @@ func removeParts(pws []*partWrapper, partsToRemove map[*partWrapper]bool, isBig } func (pt *partition) startStalePartsRemover() { - pt.stalePartsRemoverWG.Add(1) + pt.wg.Add(1) go func() { pt.stalePartsRemover() - pt.stalePartsRemoverWG.Done() + pt.wg.Done() }() } From 044a304adb4a2ccaa7d0773dd16bcbc7a225f6f8 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 23:10:16 -0800 Subject: [PATCH 16/38] lib/storage: pass a single arg - rowsPerBlock - to getCompressLevel() function instead of two args --- lib/storage/partition.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/storage/partition.go b/lib/storage/partition.go index 57354802b9..5c28f9a97d 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -1157,7 +1157,8 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro mergeIdx := pt.nextMergeIdx() tmpPartPath := fmt.Sprintf("%s/tmp/%016X", ptPath, mergeIdx) bsw := getBlockStreamWriter() - compressLevel := getCompressLevel(outRowsCount, outBlocksCount) + rowsPerBlock := float64(outRowsCount) / float64(outBlocksCount) + compressLevel := getCompressLevel(rowsPerBlock) if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil { return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err) } @@ -1277,28 +1278,27 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro return nil } -func getCompressLevel(rowsCount, blocksCount uint64) int { - avgRowsPerBlock := rowsCount / blocksCount +func getCompressLevel(rowsPerBlock float64) int { // See https://github.com/facebook/zstd/releases/tag/v1.3.4 about negative compression levels. - if avgRowsPerBlock <= 10 { + if rowsPerBlock <= 10 { return -5 } - if avgRowsPerBlock <= 50 { + if rowsPerBlock <= 50 { return -2 } - if avgRowsPerBlock <= 200 { + if rowsPerBlock <= 200 { return -1 } - if avgRowsPerBlock <= 500 { + if rowsPerBlock <= 500 { return 1 } - if avgRowsPerBlock <= 1000 { + if rowsPerBlock <= 1000 { return 2 } - if avgRowsPerBlock <= 2000 { + if rowsPerBlock <= 2000 { return 3 } - if avgRowsPerBlock <= 4000 { + if rowsPerBlock <= 4000 { return 4 } return 5 From 932c1f90ae76eea4805cca0910508f85a5d587ce Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 23:15:22 -0800 Subject: [PATCH 17/38] lib/storage: remove duplicate logging for filepath on errors --- lib/storage/storage.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/storage/storage.go b/lib/storage/storage.go index 9971db2b50..f7fb6169cb 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -378,13 +378,13 @@ func (s *Storage) ListSnapshots() ([]string, error) { snapshotsPath := s.path + "/snapshots" d, err := os.Open(snapshotsPath) if err != nil { - return nil, fmt.Errorf("cannot open %q: %w", snapshotsPath, err) + return nil, fmt.Errorf("cannot open snapshots directory: %w", err) } defer fs.MustClose(d) fnames, err := d.Readdirnames(-1) if err != nil { - return nil, fmt.Errorf("cannot read contents of %q: %w", snapshotsPath, err) + return nil, fmt.Errorf("cannot read snapshots directory at %q: %w", snapshotsPath, err) } snapshotNames := make([]string, 0, len(fnames)) for _, fname := range fnames { From 33dda2809b63f990dce03211722903146ed13c8c Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 3 Dec 2022 23:30:31 -0800 Subject: [PATCH 18/38] lib/mergeset: panic when too long item is passed to Table.AddItems() --- lib/mergeset/table.go | 38 ++++++----- lib/mergeset/table_search_test.go | 4 +- lib/mergeset/table_test.go | 14 +--- lib/storage/index_db.go | 42 ++++-------- lib/storage/index_db_test.go | 103 +----------------------------- lib/storage/storage.go | 7 +- 6 files changed, 38 insertions(+), 170 deletions(-) diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index f0203eabd2..5dbbb4ad90 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -138,12 +138,11 @@ func (riss *rawItemsShards) init() { riss.shards = make([]rawItemsShard, rawItemsShardsPerTable) } -func (riss *rawItemsShards) addItems(tb *Table, items [][]byte) error { +func (riss *rawItemsShards) addItems(tb *Table, items [][]byte) { n := atomic.AddUint32(&riss.shardIdx, 1) shards := riss.shards idx := n % uint32(len(shards)) - shard := &shards[idx] - return shard.addItems(tb, items) + shards[idx].addItems(tb, items) } func (riss *rawItemsShards) Len() int { @@ -180,8 +179,7 @@ func (ris *rawItemsShard) Len() int { return n } -func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) error { - var err error +func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) { var blocksToFlush []*inmemoryBlock ris.mu.Lock() @@ -193,17 +191,18 @@ func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) error { } ib := ibs[len(ibs)-1] for _, item := range items { - if !ib.Add(item) { - ib = getInmemoryBlock() - if !ib.Add(item) { - putInmemoryBlock(ib) - err = fmt.Errorf("cannot insert an item %q into an empty inmemoryBlock; it looks like the item is too large? len(item)=%d", item, len(item)) - break - } - ibs = append(ibs, ib) - ris.ibs = ibs + if ib.Add(item) { + continue } + ib = getInmemoryBlock() + if ib.Add(item) { + ibs = append(ibs, ib) + continue + } + putInmemoryBlock(ib) + logger.Panicf("BUG: cannot insert too big item into an empty inmemoryBlock len(item)=%d; the caller should be responsible for avoiding too big items", len(item)) } + ris.ibs = ibs if len(ibs) >= maxBlocksPerShard { blocksToFlush = append(blocksToFlush, ibs...) for i := range ibs { @@ -215,7 +214,6 @@ func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) error { ris.mu.Unlock() tb.mergeRawItemsBlocks(blocksToFlush, false) - return err } type partWrapper struct { @@ -457,17 +455,17 @@ func (tb *Table) UpdateMetrics(m *TableMetrics) { } // AddItems adds the given items to the tb. -func (tb *Table) AddItems(items [][]byte) error { - if err := tb.rawItems.addItems(tb, items); err != nil { - return fmt.Errorf("cannot insert data into %q: %w", tb.path, err) - } +// +// The function panics when items contains an item with length exceeding maxInmemoryBlockSize. +// It is caller's responsibility to make sure there are no too long items. +func (tb *Table) AddItems(items [][]byte) { + tb.rawItems.addItems(tb, items) atomic.AddUint64(&tb.itemsAdded, uint64(len(items))) n := 0 for _, item := range items { n += len(item) } atomic.AddUint64(&tb.itemsAddedSizeBytes, uint64(n)) - return nil } // getParts appends parts snapshot to dst and returns it. diff --git a/lib/mergeset/table_search_test.go b/lib/mergeset/table_search_test.go index 249aa3109a..f0ec1f8882 100644 --- a/lib/mergeset/table_search_test.go +++ b/lib/mergeset/table_search_test.go @@ -161,9 +161,7 @@ func newTestTable(path string, itemsCount int) (*Table, []string, error) { items := make([]string, itemsCount) for i := 0; i < itemsCount; i++ { item := fmt.Sprintf("%d:%d", rand.Intn(1e9), i) - if err := tb.AddItems([][]byte{[]byte(item)}); err != nil { - return nil, nil, fmt.Errorf("cannot add item: %w", err) - } + tb.AddItems([][]byte{[]byte(item)}) items[i] = item } tb.DebugFlush() diff --git a/lib/mergeset/table_test.go b/lib/mergeset/table_test.go index 0756a13c79..6a79685378 100644 --- a/lib/mergeset/table_test.go +++ b/lib/mergeset/table_test.go @@ -7,8 +7,6 @@ import ( "sync" "sync/atomic" "testing" - - "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" ) func TestTableOpenClose(t *testing.T) { @@ -120,9 +118,7 @@ func testAddItemsSerial(tb *Table, itemsCount int) { if len(item) > maxInmemoryBlockSize { item = item[:maxInmemoryBlockSize] } - if err := tb.AddItems([][]byte{item}); err != nil { - logger.Panicf("BUG: cannot add item to table: %s", err) - } + tb.AddItems([][]byte{item}) } } @@ -146,9 +142,7 @@ func TestTableCreateSnapshotAt(t *testing.T) { const itemsCount = 3e5 for i := 0; i < itemsCount; i++ { item := []byte(fmt.Sprintf("item %d", i)) - if err := tb.AddItems([][]byte{item}); err != nil { - t.Fatalf("cannot add item to table: %s", err) - } + tb.AddItems([][]byte{item}) } tb.DebugFlush() @@ -276,9 +270,7 @@ func testAddItemsConcurrent(tb *Table, itemsCount int) { if len(item) > maxInmemoryBlockSize { item = item[:maxInmemoryBlockSize] } - if err := tb.AddItems([][]byte{item}); err != nil { - logger.Panicf("BUG: cannot add item to table: %s", err) - } + tb.AddItems([][]byte{item}) } }() } diff --git a/lib/storage/index_db.go b/lib/storage/index_db.go index 29658f4144..a28a8492f4 100644 --- a/lib/storage/index_db.go +++ b/lib/storage/index_db.go @@ -400,12 +400,8 @@ func (is *indexSearch) maybeCreateIndexes(tsid *TSID, metricNameRaw []byte, date return false, fmt.Errorf("cannot unmarshal metricNameRaw %q: %w", metricNameRaw, err) } mn.sortTags() - if err := is.createGlobalIndexes(tsid, mn); err != nil { - return false, fmt.Errorf("cannot create global indexes: %w", err) - } - if err := is.createPerDayIndexes(date, tsid.MetricID, mn); err != nil { - return false, fmt.Errorf("cannot create per-day indexes for date=%s: %w", dateToString(date), err) - } + is.createGlobalIndexes(tsid, mn) + is.createPerDayIndexes(date, tsid.MetricID, mn) PutMetricName(mn) atomic.AddUint64(&is.db.timeseriesRepopulated, 1) return true, nil @@ -599,12 +595,8 @@ func (is *indexSearch) createTSIDByName(dst *TSID, metricName, metricNameRaw []b if err := is.db.s.registerSeriesCardinality(dst.MetricID, metricNameRaw); err != nil { return err } - if err := is.createGlobalIndexes(dst, mn); err != nil { - return fmt.Errorf("cannot create global indexes: %w", err) - } - if err := is.createPerDayIndexes(date, dst.MetricID, mn); err != nil { - return fmt.Errorf("cannot create per-day indexes for date=%s: %w", dateToString(date), err) - } + is.createGlobalIndexes(dst, mn) + is.createPerDayIndexes(date, dst.MetricID, mn) // There is no need in invalidating tag cache, since it is invalidated // on db.tb flush via invalidateTagFiltersCache flushCallback passed to OpenTable. @@ -668,7 +660,7 @@ func generateTSID(dst *TSID, mn *MetricName) { dst.MetricID = generateUniqueMetricID() } -func (is *indexSearch) createGlobalIndexes(tsid *TSID, mn *MetricName) error { +func (is *indexSearch) createGlobalIndexes(tsid *TSID, mn *MetricName) { // The order of index items is important. // It guarantees index consistency. @@ -699,7 +691,7 @@ func (is *indexSearch) createGlobalIndexes(tsid *TSID, mn *MetricName) error { ii.registerTagIndexes(prefix.B, mn, tsid.MetricID) kbPool.Put(prefix) - return is.db.tb.AddItems(ii.Items) + is.db.tb.AddItems(ii.Items) } type indexItems struct { @@ -1640,9 +1632,7 @@ func (db *indexDB) searchMetricNameWithCache(dst []byte, metricID uint64) ([]byt // Mark the metricID as deleted, so it will be created again when new data point // for the given time series will arrive. - if err := db.deleteMetricIDs([]uint64{metricID}); err != nil { - return dst, fmt.Errorf("cannot delete metricID for missing metricID->metricName entry; metricID=%d; error: %w", metricID, err) - } + db.deleteMetricIDs([]uint64{metricID}) return dst, io.EOF } @@ -1669,9 +1659,7 @@ func (db *indexDB) DeleteTSIDs(qt *querytracer.Tracer, tfss []*TagFilters) (int, if err != nil { return 0, err } - if err := db.deleteMetricIDs(metricIDs); err != nil { - return 0, err - } + db.deleteMetricIDs(metricIDs) // Delete TSIDs in the extDB. deletedCount := len(metricIDs) @@ -1689,10 +1677,10 @@ func (db *indexDB) DeleteTSIDs(qt *querytracer.Tracer, tfss []*TagFilters) (int, return deletedCount, nil } -func (db *indexDB) deleteMetricIDs(metricIDs []uint64) error { +func (db *indexDB) deleteMetricIDs(metricIDs []uint64) { if len(metricIDs) == 0 { // Nothing to delete - return nil + return } // atomically add deleted metricIDs to an inmemory map. @@ -1717,9 +1705,8 @@ func (db *indexDB) deleteMetricIDs(metricIDs []uint64) error { items.B = encoding.MarshalUint64(items.B, metricID) items.Next() } - err := db.tb.AddItems(items.Items) + db.tb.AddItems(items.Items) putIndexItems(items) - return err } func (db *indexDB) loadDeletedMetricIDs() (*uint64set.Set, error) { @@ -2793,7 +2780,7 @@ const ( int64Max = int64((1 << 63) - 1) ) -func (is *indexSearch) createPerDayIndexes(date, metricID uint64, mn *MetricName) error { +func (is *indexSearch) createPerDayIndexes(date, metricID uint64, mn *MetricName) { ii := getIndexItems() defer putIndexItems(ii) @@ -2808,11 +2795,8 @@ func (is *indexSearch) createPerDayIndexes(date, metricID uint64, mn *MetricName kb.B = marshalCommonPrefix(kb.B[:0], nsPrefixDateTagToMetricIDs) kb.B = encoding.MarshalUint64(kb.B, date) ii.registerTagIndexes(kb.B, mn, metricID) - if err := is.db.tb.AddItems(ii.Items); err != nil { - return fmt.Errorf("cannot add per-day entires for metricID %d: %w", metricID, err) - } + is.db.tb.AddItems(ii.Items) is.db.s.dateMetricIDCache.Set(date, metricID) - return nil } func (ii *indexItems) registerTagIndexes(prefix []byte, mn *MetricName, metricID uint64) { diff --git a/lib/storage/index_db_test.go b/lib/storage/index_db_test.go index b0dbb8a2c6..5268a24fb5 100644 --- a/lib/storage/index_db_test.go +++ b/lib/storage/index_db_test.go @@ -523,22 +523,13 @@ func TestIndexDB(t *testing.T) { } }() - if err := testIndexDBBigMetricName(db); err != nil { - t.Fatalf("unexpected error: %s", err) - } mns, tsids, err := testIndexDBGetOrCreateTSIDByName(db, metricGroups) if err != nil { t.Fatalf("unexpected error: %s", err) } - if err := testIndexDBBigMetricName(db); err != nil { - t.Fatalf("unexpected error: %s", err) - } if err := testIndexDBCheckTSIDByName(db, mns, tsids, false); err != nil { t.Fatalf("unexpected error: %s", err) } - if err := testIndexDBBigMetricName(db); err != nil { - t.Fatalf("unexpected error: %s", err) - } // Re-open the db and verify it works as expected. db.MustClose() @@ -546,15 +537,9 @@ func TestIndexDB(t *testing.T) { if err != nil { t.Fatalf("cannot open indexDB: %s", err) } - if err := testIndexDBBigMetricName(db); err != nil { - t.Fatalf("unexpected error: %s", err) - } if err := testIndexDBCheckTSIDByName(db, mns, tsids, false); err != nil { t.Fatalf("unexpected error: %s", err) } - if err := testIndexDBBigMetricName(db); err != nil { - t.Fatalf("unexpected error: %s", err) - } }) t.Run("concurrent", func(t *testing.T) { @@ -577,27 +562,15 @@ func TestIndexDB(t *testing.T) { ch := make(chan error, 3) for i := 0; i < cap(ch); i++ { go func() { - if err := testIndexDBBigMetricName(db); err != nil { - ch <- err - return - } mns, tsid, err := testIndexDBGetOrCreateTSIDByName(db, metricGroups) if err != nil { ch <- err return } - if err := testIndexDBBigMetricName(db); err != nil { - ch <- err - return - } if err := testIndexDBCheckTSIDByName(db, mns, tsid, true); err != nil { ch <- err return } - if err := testIndexDBBigMetricName(db); err != nil { - ch <- err - return - } ch <- nil }() } @@ -618,74 +591,6 @@ func TestIndexDB(t *testing.T) { }) } -func testIndexDBBigMetricName(db *indexDB) error { - var bigBytes []byte - for i := 0; i < 128*1000; i++ { - bigBytes = append(bigBytes, byte(i)) - } - var mn MetricName - var tsid TSID - - is := db.getIndexSearch(noDeadline) - defer db.putIndexSearch(is) - - // Try creating too big metric group - mn.Reset() - mn.MetricGroup = append(mn.MetricGroup[:0], bigBytes...) - mn.sortTags() - metricName := mn.Marshal(nil) - metricNameRaw := mn.marshalRaw(nil) - if err := is.GetOrCreateTSIDByName(&tsid, metricName, metricNameRaw, 0); err == nil { - return fmt.Errorf("expecting non-nil error on an attempt to insert metric with too big MetricGroup") - } - - // Try creating too big tag key - mn.Reset() - mn.MetricGroup = append(mn.MetricGroup[:0], "xxx"...) - mn.Tags = []Tag{{ - Key: append([]byte(nil), bigBytes...), - Value: []byte("foobar"), - }} - mn.sortTags() - metricName = mn.Marshal(nil) - metricNameRaw = mn.marshalRaw(nil) - if err := is.GetOrCreateTSIDByName(&tsid, metricName, metricNameRaw, 0); err == nil { - return fmt.Errorf("expecting non-nil error on an attempt to insert metric with too big tag key") - } - - // Try creating too big tag value - mn.Reset() - mn.MetricGroup = append(mn.MetricGroup[:0], "xxx"...) - mn.Tags = []Tag{{ - Key: []byte("foobar"), - Value: append([]byte(nil), bigBytes...), - }} - mn.sortTags() - metricName = mn.Marshal(nil) - metricNameRaw = mn.marshalRaw(nil) - if err := is.GetOrCreateTSIDByName(&tsid, metricName, metricNameRaw, 0); err == nil { - return fmt.Errorf("expecting non-nil error on an attempt to insert metric with too big tag value") - } - - // Try creating metric name with too many tags - mn.Reset() - mn.MetricGroup = append(mn.MetricGroup[:0], "xxx"...) - for i := 0; i < 60000; i++ { - mn.Tags = append(mn.Tags, Tag{ - Key: []byte(fmt.Sprintf("foobar %d", i)), - Value: []byte(fmt.Sprintf("sdfjdslkfj %d", i)), - }) - } - mn.sortTags() - metricName = mn.Marshal(nil) - metricNameRaw = mn.marshalRaw(nil) - if err := is.GetOrCreateTSIDByName(&tsid, metricName, metricNameRaw, 0); err == nil { - return fmt.Errorf("expecting non-nil error on an attempt to insert metric with too many tags") - } - - return nil -} - func testIndexDBGetOrCreateTSIDByName(db *indexDB, metricGroups int) ([]MetricName, []TSID, error) { // Create tsids. var mns []MetricName @@ -727,9 +632,7 @@ func testIndexDBGetOrCreateTSIDByName(db *indexDB, metricGroups int) ([]MetricNa date := uint64(timestampFromTime(time.Now())) / msecPerDay for i := range tsids { tsid := &tsids[i] - if err := is.createPerDayIndexes(date, tsid.MetricID, &mns[i]); err != nil { - return nil, nil, fmt.Errorf("error in createPerDayIndexes(%d, %d): %w", date, tsid.MetricID, err) - } + is.createPerDayIndexes(date, tsid.MetricID, &mns[i]) } // Flush index to disk, so it becomes visible for search @@ -1720,9 +1623,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) { for i := range tsids { tsid := &tsids[i] metricIDs.Add(tsid.MetricID) - if err := is.createPerDayIndexes(date, tsid.MetricID, &mns[i]); err != nil { - t.Fatalf("error in createPerDayIndexes(%d, %d): %s", date, tsid.MetricID, err) - } + is.createPerDayIndexes(date, tsid.MetricID, &mns[i]) } allMetricIDs.Union(&metricIDs) perDayMetricIDs[date] = &metricIDs diff --git a/lib/storage/storage.go b/lib/storage/storage.go index f7fb6169cb..1b686140bb 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -2070,12 +2070,7 @@ func (s *Storage) updatePerDateData(rows []rawRow, mrs []*MetricRow) error { continue } mn.sortTags() - if err := is.createPerDayIndexes(date, metricID, mn); err != nil { - if firstError == nil { - firstError = fmt.Errorf("error when storing per-date inverted index for (date=%s, metricID=%d): %w", dateToString(date), metricID, err) - } - continue - } + is.createPerDayIndexes(date, metricID, mn) } dateMetricIDsForCache = append(dateMetricIDsForCache, dateMetricID{ date: date, From 544ea89f91cb5b74f2ee423d26a023bbf79e07b5 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sun, 4 Dec 2022 00:01:04 -0800 Subject: [PATCH 19/38] lib/{mergeset,storage}: add start background workers via startBackgroundWorkers() function --- lib/mergeset/table.go | 8 ++++++-- lib/storage/partition.go | 17 +++++++++-------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index 5dbbb4ad90..d88cba7cb1 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -291,8 +291,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb stopCh: make(chan struct{}), } tb.rawItems.init() - tb.startPartMergers() - tb.startRawItemsFlusher() + tb.startBackgroundWorkers() var m TableMetrics tb.UpdateMetrics(&m) @@ -323,6 +322,11 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb return tb, nil } +func (tb *Table) startBackgroundWorkers() { + tb.startPartMergers() + tb.startRawItemsFlusher() +} + // MustClose closes the table. func (tb *Table) MustClose() { close(tb.stopCh) diff --git a/lib/storage/partition.go b/lib/storage/partition.go index 5c28f9a97d..17e8aa8baa 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -204,16 +204,20 @@ func createPartition(timestamp int64, smallPartitionsPath, bigPartitionsPath str pt := newPartition(name, smallPartsPath, bigPartsPath, s) pt.tr.fromPartitionTimestamp(timestamp) - pt.startMergeWorkers() - pt.startRawRowsFlusher() - pt.startInmemoryPartsFlusher() - pt.startStalePartsRemover() + pt.startBackgroundWorkers() logger.Infof("partition %q has been created", name) return pt, nil } +func (pt *partition) startBackgroundWorkers() { + pt.startMergeWorkers() + pt.startRawRowsFlusher() + pt.startInmemoryPartsFlusher() + pt.startStalePartsRemover() +} + // Drop drops all the data on the storage for the given pt. // // The pt must be detached from table before calling pt.Drop. @@ -258,10 +262,7 @@ func openPartition(smallPartsPath, bigPartsPath string, s *Storage) (*partition, if err := pt.tr.fromPartitionName(name); err != nil { return nil, fmt.Errorf("cannot obtain partition time range from smallPartsPath %q: %w", smallPartsPath, err) } - pt.startMergeWorkers() - pt.startRawRowsFlusher() - pt.startInmemoryPartsFlusher() - pt.startStalePartsRemover() + pt.startBackgroundWorkers() return pt, nil } From 91a8afa17295e456c461cc7fe2ad131490888e04 Mon Sep 17 00:00:00 2001 From: Roman Khavronenko Date: Mon, 5 Dec 2022 08:34:54 +0100 Subject: [PATCH 20/38] vmalert: reduce allocations for Prometheus resp parse (#3435) Method `metrics()` now pre-allocates slices for labels and results from query responses. This reduces the number of allocations on the hot path for instant requests. Signed-off-by: hagen1778 --- app/vmalert/datasource/datasource.go | 13 ++++++ app/vmalert/datasource/vm_prom_api.go | 8 ++-- app/vmalert/datasource/vm_prom_api_test.go | 20 ++++++++++ app/vmalert/datasource/vm_test.go | 46 ++++++++++++++++------ 4 files changed, 70 insertions(+), 17 deletions(-) create mode 100644 app/vmalert/datasource/vm_prom_api_test.go diff --git a/app/vmalert/datasource/datasource.go b/app/vmalert/datasource/datasource.go index 898061fe76..6792efb56e 100644 --- a/app/vmalert/datasource/datasource.go +++ b/app/vmalert/datasource/datasource.go @@ -54,6 +54,19 @@ func (m *Metric) SetLabel(key, value string) { m.AddLabel(key, value) } +// SetLabels sets the given map as Metric labels +func (m *Metric) SetLabels(ls map[string]string) { + var i int + m.Labels = make([]Label, len(ls)) + for k, v := range ls { + m.Labels[i] = Label{ + Name: k, + Value: v, + } + i++ + } +} + // AddLabel appends the given label to the label set func (m *Metric) AddLabel(key, value string) { m.Labels = append(m.Labels, Label{Name: key, Value: value}) diff --git a/app/vmalert/datasource/vm_prom_api.go b/app/vmalert/datasource/vm_prom_api.go index 1b5fd59155..7689626fbd 100644 --- a/app/vmalert/datasource/vm_prom_api.go +++ b/app/vmalert/datasource/vm_prom_api.go @@ -32,19 +32,17 @@ type promInstant struct { } func (r promInstant) metrics() ([]Metric, error) { - var result []Metric + result := make([]Metric, len(r.Result)) for i, res := range r.Result { f, err := strconv.ParseFloat(res.TV[1].(string), 64) if err != nil { return nil, fmt.Errorf("metric %v, unable to parse float64 from %s: %w", res, res.TV[1], err) } var m Metric - for k, v := range r.Result[i].Labels { - m.AddLabel(k, v) - } + m.SetLabels(res.Labels) m.Timestamps = append(m.Timestamps, int64(res.TV[0].(float64))) m.Values = append(m.Values, f) - result = append(result, m) + result[i] = m } return result, nil } diff --git a/app/vmalert/datasource/vm_prom_api_test.go b/app/vmalert/datasource/vm_prom_api_test.go new file mode 100644 index 0000000000..0a0105810a --- /dev/null +++ b/app/vmalert/datasource/vm_prom_api_test.go @@ -0,0 +1,20 @@ +package datasource + +import ( + "encoding/json" + "testing" +) + +func BenchmarkMetrics(b *testing.B) { + payload := []byte(`[{"metric":{"__name__":"vm_rows"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests", "foo":"bar", "baz": "qux"},"value":[1583786140,"2000"]}]`) + + var pi promInstant + if err := json.Unmarshal(payload, &pi.Result); err != nil { + b.Fatalf(err.Error()) + } + b.Run("Instant", func(b *testing.B) { + for i := 0; i < b.N; i++ { + _, _ = pi.metrics() + } + }) +} diff --git a/app/vmalert/datasource/vm_test.go b/app/vmalert/datasource/vm_test.go index 1932b892de..e9d7287638 100644 --- a/app/vmalert/datasource/vm_test.go +++ b/app/vmalert/datasource/vm_test.go @@ -7,6 +7,7 @@ import ( "net/http/httptest" "net/url" "reflect" + "sort" "strconv" "strings" "testing" @@ -74,7 +75,7 @@ func TestVMInstantQuery(t *testing.T) { case 5: w.Write([]byte(`{"status":"success","data":{"resultType":"matrix"}}`)) case 6: - w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"vm_rows"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests"},"value":[1583786140,"2000"]}]}}`)) + w.Write([]byte(`{"status":"success","data":{"resultType":"vector","result":[{"metric":{"__name__":"vm_rows","foo":"bar"},"value":[1583786142,"13763"]},{"metric":{"__name__":"vm_requests","foo":"baz"},"value":[1583786140,"2000"]}]}}`)) case 7: w.Write([]byte(`{"status":"success","data":{"resultType":"scalar","result":[1583786142, "1"]}}`)) } @@ -115,19 +116,17 @@ func TestVMInstantQuery(t *testing.T) { } expected := []Metric{ { - Labels: []Label{{Value: "vm_rows", Name: "__name__"}}, + Labels: []Label{{Value: "vm_rows", Name: "__name__"}, {Value: "bar", Name: "foo"}}, Timestamps: []int64{1583786142}, Values: []float64{13763}, }, { - Labels: []Label{{Value: "vm_requests", Name: "__name__"}}, + Labels: []Label{{Value: "vm_requests", Name: "__name__"}, {Value: "baz", Name: "foo"}}, Timestamps: []int64{1583786140}, Values: []float64{2000}, }, } - if !reflect.DeepEqual(m, expected) { - t.Fatalf("unexpected metric %+v want %+v", m, expected) - } + metricsEqual(t, m, expected) m, req, err := pq.Query(ctx, query, ts) // 7 - scalar if err != nil { @@ -158,13 +157,36 @@ func TestVMInstantQuery(t *testing.T) { if len(m) != 1 { t.Fatalf("expected 1 metric got %d in %+v", len(m), m) } - exp := Metric{ - Labels: []Label{{Value: "constantLine(10)", Name: "name"}}, - Timestamps: []int64{1611758403}, - Values: []float64{10}, + exp := []Metric{ + { + Labels: []Label{{Value: "constantLine(10)", Name: "name"}}, + Timestamps: []int64{1611758403}, + Values: []float64{10}, + }, } - if !reflect.DeepEqual(m[0], exp) { - t.Fatalf("unexpected metric %+v want %+v", m[0], expected) + metricsEqual(t, m, exp) +} + +func metricsEqual(t *testing.T, gotM, expectedM []Metric) { + for i, exp := range expectedM { + got := gotM[i] + gotTS, expTS := got.Timestamps, exp.Timestamps + if !reflect.DeepEqual(gotTS, expTS) { + t.Fatalf("unexpected timestamps %+v want %+v", gotTS, expTS) + } + gotV, expV := got.Values, exp.Values + if !reflect.DeepEqual(gotV, expV) { + t.Fatalf("unexpected values %+v want %+v", gotV, expV) + } + sort.Slice(got.Labels, func(i, j int) bool { + return got.Labels[i].Name < got.Labels[j].Name + }) + sort.Slice(exp.Labels, func(i, j int) bool { + return exp.Labels[i].Name < exp.Labels[j].Name + }) + if !reflect.DeepEqual(exp.Labels, got.Labels) { + t.Fatalf("unexpected labels %+v want %+v", got.Labels, exp.Labels) + } } } From 6801b37e538b3e55e0c7dc1afdc516adf2015ec1 Mon Sep 17 00:00:00 2001 From: Roman Khavronenko Date: Mon, 5 Dec 2022 08:35:33 +0100 Subject: [PATCH 21/38] dashboards: add `Disk space usage %` and `Disk space usage % by type` panels (#3436) The new panels have been added to the vmstorage and drilldown rows. `Disk space usage %` is supposed to show disk space usage percentage. This panel is now also referred by `DiskRunsOutOfSpace` alerting rule. This panel has Drilldown option to show absolute values. `Disk space usage % by type` shows the relation between datapoints and indexdb size. It supposed to help identify cases when indexdb starts to take too much disk space. This panel has Drilldown option to show absolute values. Signed-off-by: hagen1778 --- dashboards/victoriametrics-cluster.json | 507 ++++++++++++++++++------ deployment/docker/alerts-cluster.yml | 2 +- 2 files changed, 392 insertions(+), 117 deletions(-) diff --git a/dashboards/victoriametrics-cluster.json b/dashboards/victoriametrics-cluster.json index 75be1b925e..1274cd6f27 100644 --- a/dashboards/victoriametrics-cluster.json +++ b/dashboards/victoriametrics-cluster.json @@ -1612,8 +1612,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1629,7 +1628,7 @@ "h": 8, "w": 12, "x": 0, - "y": 14 + "y": 30 }, "id": 66, "links": [], @@ -1724,8 +1723,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1741,7 +1739,7 @@ "h": 8, "w": 12, "x": 12, - "y": 14 + "y": 30 }, "id": 138, "links": [], @@ -1835,8 +1833,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1852,7 +1849,7 @@ "h": 8, "w": 12, "x": 0, - "y": 22 + "y": 38 }, "id": 64, "links": [], @@ -1942,8 +1939,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -1972,7 +1968,7 @@ "h": 8, "w": 12, "x": 12, - "y": 22 + "y": 38 }, "id": 122, "links": [], @@ -2080,8 +2076,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2113,7 +2108,7 @@ "h": 8, "w": 12, "x": 0, - "y": 30 + "y": 46 }, "id": 117, "links": [], @@ -2201,8 +2196,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2218,7 +2212,7 @@ "h": 8, "w": 12, "x": 12, - "y": 30 + "y": 46 }, "id": 119, "options": { @@ -2306,8 +2300,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2323,7 +2316,7 @@ "h": 8, "w": 12, "x": 0, - "y": 38 + "y": 54 }, "id": 68, "links": [], @@ -2411,8 +2404,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2428,7 +2420,7 @@ "h": 8, "w": 12, "x": 12, - "y": 38 + "y": 54 }, "id": 120, "options": { @@ -2516,8 +2508,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -2533,7 +2524,7 @@ "h": 8, "w": 12, "x": 0, - "y": 46 + "y": 62 }, "id": 70, "links": [], @@ -2675,7 +2666,7 @@ "h": 8, "w": 12, "x": 0, - "y": 15 + "y": 31 }, "id": 102, "options": { @@ -2789,7 +2780,7 @@ "h": 8, "w": 12, "x": 12, - "y": 15 + "y": 31 }, "id": 108, "options": { @@ -2890,7 +2881,7 @@ "h": 8, "w": 12, "x": 0, - "y": 23 + "y": 39 }, "id": 142, "links": [ @@ -3001,7 +2992,7 @@ "h": 8, "w": 12, "x": 12, - "y": 23 + "y": 39 }, "id": 107, "options": { @@ -3100,7 +3091,7 @@ "h": 8, "w": 12, "x": 0, - "y": 31 + "y": 47 }, "id": 170, "links": [], @@ -3206,7 +3197,7 @@ "h": 8, "w": 12, "x": 12, - "y": 31 + "y": 47 }, "id": 116, "links": [], @@ -3308,7 +3299,7 @@ "h": 9, "w": 12, "x": 0, - "y": 39 + "y": 55 }, "id": 144, "options": { @@ -3411,7 +3402,7 @@ "h": 9, "w": 12, "x": 12, - "y": 39 + "y": 55 }, "id": 58, "links": [], @@ -3515,7 +3506,7 @@ "h": 7, "w": 24, "x": 0, - "y": 48 + "y": 64 }, "id": 183, "options": { @@ -3663,7 +3654,7 @@ "h": 9, "w": 12, "x": 0, - "y": 5 + "y": 21 }, "id": 76, "links": [], @@ -3779,7 +3770,7 @@ "h": 9, "w": 12, "x": 12, - "y": 5 + "y": 21 }, "id": 86, "links": [], @@ -3904,7 +3895,7 @@ "h": 8, "w": 12, "x": 0, - "y": 14 + "y": 30 }, "id": 80, "links": [], @@ -4009,7 +4000,7 @@ "h": 8, "w": 12, "x": 12, - "y": 14 + "y": 30 }, "id": 78, "links": [], @@ -4125,7 +4116,7 @@ "h": 8, "w": 12, "x": 0, - "y": 22 + "y": 38 }, "id": 82, "options": { @@ -4232,7 +4223,7 @@ "h": 8, "w": 12, "x": 12, - "y": 22 + "y": 38 }, "id": 74, "options": { @@ -4334,7 +4325,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4445,7 +4437,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4557,7 +4550,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4702,7 +4696,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4839,7 +4834,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -4942,7 +4938,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5084,7 +5081,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5187,7 +5185,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5246,7 +5245,7 @@ "type": "prometheus", "uid": "$ds" }, - "description": "Shows amount of on-disk space occupied by data points.", + "description": "Shows the percentage of used disk space. It is recommended to have at least 20% of free disk space for the best performance.", "fieldConfig": { "defaults": { "color": { @@ -5259,7 +5258,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 10, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -5276,20 +5275,27 @@ "spanNulls": false, "stacking": { "group": "A", - "mode": "normal" + "mode": "none" }, "thresholdsStyle": { - "mode": "off" + "mode": "line" } }, - "links": [], + "links": [ + { + "targetBlank": true, + "title": "Drilldown", + "url": "/d/oS7Bi_0Wz?viewPanel=200&var-ds=$ds&var-instance=$instance&${__url_time_range}" + } + ], "mappings": [], "min": 0, "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5297,7 +5303,7 @@ } ] }, - "unit": "bytes" + "unit": "percentunit" }, "overrides": [] }, @@ -5307,7 +5313,7 @@ "x": 0, "y": 37 }, - "id": 18, + "id": 20, "links": [], "options": { "legend": { @@ -5324,7 +5330,7 @@ }, "tooltip": { "mode": "multi", - "sort": "none" + "sort": "desc" } }, "pluginVersion": "9.1.0", @@ -5335,15 +5341,43 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"}) ", + "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) /\n (\n sum(vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) +\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n ) \n)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "disk usage", + "legendFormat": "max", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$ds" + }, + "editorMode": "code", + "expr": "min(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) /\n (\n sum(vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) +\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n ) \n)", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "min", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$ds" + }, + "editorMode": "code", + "expr": "avg(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) /\n (\n sum(vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance) +\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n ) \n)", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "avg", + "range": true, + "refId": "C" } ], - "title": "Disk space usage (datapoints) ($instance)", + "title": "Disk space usage % ($instance)", "type": "timeseries" }, { @@ -5394,7 +5428,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5457,7 +5492,7 @@ "type": "prometheus", "uid": "$ds" }, - "description": "Shows amount of on-disk space occupied by inverted index.", + "description": "Shows the percentage of used disk space by type: datapoints or indexdb. Normally, indexdb takes much less space comparing to datapoints. But with high churn rate the size of the indexdb could grow significantly.\n\nThe sum of the % can be > 100% since panel shows max % per-job and per-instance. It means different instance can have different ratio between datapoints and indexdb size.", "fieldConfig": { "defaults": { "color": { @@ -5470,7 +5505,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 10, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -5487,28 +5522,31 @@ "spanNulls": false, "stacking": { "group": "A", - "mode": "normal" + "mode": "none" }, "thresholdsStyle": { - "mode": "off" + "mode": "line" } }, - "links": [], + "links": [ + { + "targetBlank": true, + "title": "Drilldown", + "url": "/d/oS7Bi_0Wz?viewPanel=201&var-ds=$ds&var-instance=$instance&${__url_time_range}" + } + ], "mappings": [], "min": 0, "thresholds": { "mode": "absolute", "steps": [ { - "color": "green" - }, - { - "color": "red", - "value": 80 + "color": "green", + "value": null } ] }, - "unit": "bytes" + "unit": "percentunit" }, "overrides": [] }, @@ -5518,7 +5556,7 @@ "x": 0, "y": 45 }, - "id": 20, + "id": 202, "links": [], "options": { "legend": { @@ -5535,7 +5573,7 @@ }, "tooltip": { "mode": "multi", - "sort": "none" + "sort": "desc" } }, "pluginVersion": "9.1.0", @@ -5546,15 +5584,29 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type=\"indexdb\"})", + "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=\"indexdb\"}) by(job, instance)\n / \n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n)", "format": "time_series", "intervalFactor": 1, - "legendFormat": "disk usage", + "legendFormat": "indexdb", "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$ds" + }, + "editorMode": "code", + "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) by(job, instance)\n / \n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n)", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "datapoints", + "range": true, + "refId": "B" } ], - "title": "Disk space usage (index) ($instance)", + "title": "Disk space usage % by type ($instance)", "type": "timeseries" }, { @@ -5605,7 +5657,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5740,7 +5793,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -5755,7 +5809,7 @@ "gridPos": { "h": 8, "w": 12, - "x": 12, + "x": 0, "y": 53 }, "id": 135, @@ -5862,8 +5916,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -5879,7 +5932,7 @@ "h": 8, "w": 12, "x": 0, - "y": 82 + "y": 98 }, "id": 92, "links": [], @@ -5969,8 +6022,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6006,7 +6058,7 @@ "h": 8, "w": 12, "x": 12, - "y": 82 + "y": 98 }, "id": 95, "links": [], @@ -6112,8 +6164,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6129,7 +6180,7 @@ "h": 8, "w": 12, "x": 0, - "y": 90 + "y": 106 }, "id": 163, "links": [], @@ -6257,8 +6308,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6274,7 +6324,7 @@ "h": 8, "w": 12, "x": 12, - "y": 90 + "y": 106 }, "id": 165, "links": [], @@ -6398,8 +6448,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6415,7 +6464,7 @@ "h": 8, "w": 12, "x": 0, - "y": 98 + "y": 114 }, "id": 178, "links": [], @@ -6506,8 +6555,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -6523,7 +6571,7 @@ "h": 8, "w": 12, "x": 12, - "y": 98 + "y": 114 }, "id": 180, "links": [], @@ -6630,7 +6678,7 @@ "h": 8, "w": 12, "x": 0, - "y": 106 + "y": 122 }, "id": 179, "links": [], @@ -6737,7 +6785,7 @@ "h": 8, "w": 12, "x": 12, - "y": 106 + "y": 122 }, "id": 181, "links": [], @@ -6855,7 +6903,7 @@ "h": 8, "w": 24, "x": 0, - "y": 114 + "y": 130 }, "id": 93, "links": [], @@ -6991,7 +7039,7 @@ "h": 8, "w": 12, "x": 0, - "y": 8 + "y": 24 }, "id": 97, "links": [], @@ -7117,7 +7165,7 @@ "h": 8, "w": 12, "x": 12, - "y": 8 + "y": 24 }, "id": 99, "links": [], @@ -7241,7 +7289,7 @@ "h": 8, "w": 12, "x": 0, - "y": 16 + "y": 32 }, "id": 185, "links": [], @@ -7385,7 +7433,7 @@ "h": 8, "w": 12, "x": 12, - "y": 16 + "y": 32 }, "id": 187, "links": [], @@ -7523,7 +7571,7 @@ "h": 8, "w": 12, "x": 0, - "y": 24 + "y": 40 }, "id": 90, "links": [], @@ -7631,7 +7679,7 @@ "h": 8, "w": 12, "x": 12, - "y": 24 + "y": 40 }, "id": 88, "links": [], @@ -7738,7 +7786,7 @@ "h": 8, "w": 12, "x": 0, - "y": 32 + "y": 48 }, "id": 139, "links": [], @@ -7845,7 +7893,7 @@ "h": 8, "w": 12, "x": 12, - "y": 32 + "y": 48 }, "id": 114, "links": [], @@ -7911,10 +7959,15 @@ }, "id": 198, "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, "content": "Drilldown row is used by other panels on the dashboard to show more detailed metrics per-instance.", "mode": "markdown" }, - "pluginVersion": "9.1.0", + "pluginVersion": "9.2.6", "transparent": true, "type": "text" }, @@ -7966,7 +8019,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -8067,7 +8121,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -8168,7 +8223,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, @@ -8271,7 +8327,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -8328,6 +8385,224 @@ ], "title": "Storage full ETA ($instance)", "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$ds" + }, + "description": "Shows the percentage of used disk space. It is recommended to have at least 20% of free disk space for the best performance.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 26 + }, + "id": 200, + "links": [], + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$ds" + }, + "editorMode": "code", + "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Disk space usage ($instance)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$ds" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 26 + }, + "id": 201, + "links": [], + "options": { + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Last *", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "9.1.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "$ds" + }, + "editorMode": "code", + "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=\"indexdb\"}) by(job, instance)", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{job}}:{{instance}} (indexdb)", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "$ds" + }, + "editorMode": "code", + "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) by(job, instance)", + "format": "time_series", + "hide": false, + "intervalFactor": 1, + "legendFormat": "{{job}}:{{instance}} (datapoints)", + "range": true, + "refId": "B" + } + ], + "title": "Disk space usage by type ($instance)", + "type": "timeseries" } ], "title": "Drilldown", diff --git a/deployment/docker/alerts-cluster.yml b/deployment/docker/alerts-cluster.yml index 1a99a08fb4..15c305452c 100644 --- a/deployment/docker/alerts-cluster.yml +++ b/deployment/docker/alerts-cluster.yml @@ -43,7 +43,7 @@ groups: labels: severity: critical annotations: - dashboard: http://localhost:3000/d/oS7Bi_0Wz?viewPanel=110&var-instance={{ $labels.instance }}" + dashboard: http://localhost:3000/d/oS7Bi_0Wz?viewPanel=200&var-instance={{ $labels.instance }}" summary: "Instance {{ $labels.instance }} will run out of disk space soon" description: "Disk utilisation on instance {{ $labels.instance }} is more than 80%.\n Having less than 20% of free disk space could cripple merges processes and overall performance. From 461158a4376ee81856c7423d9752dbf3ed686aa7 Mon Sep 17 00:00:00 2001 From: Yury Molodov Date: Mon, 5 Dec 2022 08:50:34 +0100 Subject: [PATCH 22/38] fix: add word-break for tooltip (#3437) --- .../packages/vmui/src/components/Chart/ChartTooltip/style.scss | 1 + 1 file changed, 1 insertion(+) diff --git a/app/vmui/packages/vmui/src/components/Chart/ChartTooltip/style.scss b/app/vmui/packages/vmui/src/components/Chart/ChartTooltip/style.scss index 5d1a9fe5e8..640697d83e 100644 --- a/app/vmui/packages/vmui/src/components/Chart/ChartTooltip/style.scss +++ b/app/vmui/packages/vmui/src/components/Chart/ChartTooltip/style.scss @@ -76,5 +76,6 @@ $chart-tooltip-y: -1 * ($padding-small + $chart-tooltip-half-icon); &-info { display: grid; grid-gap: 4px; + word-break: break-all; } } From e509552e92c9aa23f36d2af4ba2a7c4dc1ce3566 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 5 Dec 2022 01:01:57 -0800 Subject: [PATCH 23/38] vendor: `make vendor-update` --- go.mod | 62 +- go.sum | 122 ++-- .../go/compute/internal/version.go | 2 +- .../go/compute/metadata/CHANGES.md | 7 + .../go/compute/metadata/metadata.go | 1 + vendor/cloud.google.com/go/storage/CHANGES.md | 7 + .../go/storage/grpc_client.go | 33 +- .../internal/apiv2/stubs/storage.pb.go | 138 ++--- .../go/storage/internal/version.go | 2 +- vendor/cloud.google.com/go/storage/storage.go | 13 +- .../github.com/aws/aws-sdk-go-v2/CHANGELOG.md | 568 ++++++++++++++++++ vendor/github.com/aws/aws-sdk-go-v2/Makefile | 1 + .../aws-sdk-go-v2/aws/go_module_metadata.go | 2 +- .../aws/protocol/eventstream/CHANGELOG.md | 4 + .../eventstream/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/config/CHANGELOG.md | 4 + .../config/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/credentials/CHANGELOG.md | 4 + .../credentials/go_module_metadata.go | 2 +- .../feature/ec2/imds/CHANGELOG.md | 4 + .../feature/ec2/imds/go_module_metadata.go | 2 +- .../feature/s3/manager/CHANGELOG.md | 4 + .../feature/s3/manager/go_module_metadata.go | 2 +- .../internal/configsources/CHANGELOG.md | 4 + .../configsources/go_module_metadata.go | 2 +- .../internal/endpoints/v2/CHANGELOG.md | 4 + .../endpoints/v2/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/ini/CHANGELOG.md | 4 + .../internal/ini/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/internal/v4a/CHANGELOG.md | 4 + .../internal/v4a/go_module_metadata.go | 2 +- .../github.com/aws/aws-sdk-go-v2/modman.toml | 4 +- .../internal/accept-encoding/CHANGELOG.md | 4 + .../accept-encoding/go_module_metadata.go | 2 +- .../service/internal/checksum/CHANGELOG.md | 4 + .../internal/checksum/go_module_metadata.go | 2 +- .../internal/presigned-url/CHANGELOG.md | 4 + .../presigned-url/go_module_metadata.go | 2 +- .../service/internal/s3shared/CHANGELOG.md | 4 + .../internal/s3shared/go_module_metadata.go | 2 +- .../aws/aws-sdk-go-v2/service/s3/CHANGELOG.md | 4 + .../service/s3/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sso/CHANGELOG.md | 4 + .../service/sso/go_module_metadata.go | 2 +- .../service/ssooidc/CHANGELOG.md | 4 + .../service/ssooidc/go_module_metadata.go | 2 +- .../aws-sdk-go-v2/service/sts/CHANGELOG.md | 4 + .../service/sts/go_module_metadata.go | 2 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 135 ++++- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- vendor/github.com/aws/smithy-go/CHANGELOG.md | 4 + vendor/github.com/aws/smithy-go/README.md | 2 +- .../aws/smithy-go/encoding/xml/doc.go | 2 +- .../aws/smithy-go/go_module_metadata.go | 2 +- vendor/github.com/cespare/xxhash/v2/README.md | 31 +- .../github.com/cespare/xxhash/v2/testall.sh | 10 + vendor/github.com/cespare/xxhash/v2/xxhash.go | 47 +- .../cespare/xxhash/v2/xxhash_amd64.s | 308 +++++----- .../cespare/xxhash/v2/xxhash_arm64.s | 183 ++++++ .../v2/{xxhash_amd64.go => xxhash_asm.go} | 2 + .../cespare/xxhash/v2/xxhash_other.go | 22 +- .../cespare/xxhash/v2/xxhash_safe.go | 1 + .../cespare/xxhash/v2/xxhash_unsafe.go | 3 +- .../golang/protobuf/ptypes/empty/empty.pb.go | 62 -- .../prometheus/tsdb/chunkenc/histogram.go | 4 +- vendor/github.com/urfave/cli/v2/app.go | 4 + vendor/github.com/urfave/cli/v2/command.go | 2 +- vendor/github.com/urfave/cli/v2/flag.go | 9 +- .../urfave/cli/v2/godoc-current.txt | 2 + .../x/sys/windows/syscall_windows.go | 1 + .../x/sys/windows/zsyscall_windows.go | 7 + .../golang.org/x/text/unicode/bidi/trieval.go | 12 - vendor/golang.org/x/time/rate/rate.go | 20 +- .../protobuf/field_mask/field_mask.go | 23 - vendor/modules.txt | 64 +- 75 files changed, 1441 insertions(+), 587 deletions(-) create mode 100644 vendor/github.com/cespare/xxhash/v2/testall.sh create mode 100644 vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s rename vendor/github.com/cespare/xxhash/v2/{xxhash_amd64.go => xxhash_asm.go} (73%) delete mode 100644 vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go delete mode 100644 vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go diff --git a/go.mod b/go.mod index 8f6bd3b901..164b550bd8 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics go 1.19 require ( - cloud.google.com/go/storage v1.28.0 + cloud.google.com/go/storage v1.28.1 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.5.1 github.com/VictoriaMetrics/fastcache v1.12.0 @@ -13,19 +13,19 @@ require ( github.com/VictoriaMetrics/fasthttp v1.1.0 github.com/VictoriaMetrics/metrics v1.23.0 github.com/VictoriaMetrics/metricsql v0.49.1 - github.com/aws/aws-sdk-go-v2 v1.17.1 - github.com/aws/aws-sdk-go-v2/config v1.18.3 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42 - github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4 - github.com/cespare/xxhash/v2 v2.1.2 + github.com/aws/aws-sdk-go-v2 v1.17.2 + github.com/aws/aws-sdk-go-v2/config v1.18.4 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.43 + github.com/aws/aws-sdk-go-v2/service/s3 v1.29.5 + github.com/cespare/xxhash/v2 v2.2.0 github.com/cheggaaa/pb/v3 v3.1.0 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 github.com/googleapis/gax-go/v2 v2.7.0 github.com/influxdata/influxdb v1.10.0 github.com/klauspost/compress v1.15.12 - github.com/prometheus/prometheus v0.40.4 - github.com/urfave/cli/v2 v2.23.5 + github.com/prometheus/prometheus v0.40.5 + github.com/urfave/cli/v2 v2.23.6 github.com/valyala/fastjson v1.6.3 github.com/valyala/fastrand v1.1.0 github.com/valyala/fasttemplate v1.2.2 @@ -33,35 +33,35 @@ require ( github.com/valyala/quicktemplate v1.7.0 golang.org/x/net v0.2.0 golang.org/x/oauth2 v0.2.0 - golang.org/x/sys v0.2.0 + golang.org/x/sys v0.3.0 google.golang.org/api v0.103.0 gopkg.in/yaml.v2 v2.4.0 ) require ( cloud.google.com/go v0.107.0 // indirect - cloud.google.com/go/compute v1.12.1 // indirect - cloud.google.com/go/compute/metadata v0.2.1 // indirect + cloud.google.com/go/compute v1.13.0 // indirect + cloud.google.com/go/compute/metadata v0.2.2 // indirect cloud.google.com/go/iam v0.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.1.1 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect - github.com/aws/aws-sdk-go v1.44.149 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 // indirect - github.com/aws/smithy-go v1.13.4 // indirect + github.com/aws/aws-sdk-go v1.44.152 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.13.4 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.17 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.21 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.20 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 // indirect + github.com/aws/smithy-go v1.13.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -108,13 +108,13 @@ require ( go.opentelemetry.io/otel/trace v1.11.1 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/goleak v1.2.0 // indirect - golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 // indirect + golang.org/x/exp v0.0.0-20221204150635-6dcec336b2bb // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/text v0.4.0 // indirect - golang.org/x/time v0.2.0 // indirect + golang.org/x/text v0.5.0 // indirect + golang.org/x/time v0.3.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 // indirect + google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd // indirect google.golang.org/grpc v1.51.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index e3f7ed40c4..a942cb491d 100644 --- a/go.sum +++ b/go.sum @@ -21,10 +21,10 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.12.1 h1:gKVJMEyqV5c/UnpzjjQbo3Rjvvqpr9B1DFSbJC4OXr0= -cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= -cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48= -cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute v1.13.0 h1:AYrLkB8NPdDRslNp4Jxmzrhdr03fUAIDbiGFjLWowoU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute/metadata v0.2.2 h1:aWKAjYaBaOSrpKl57+jnS/3fJRQnxL7TvR/u1VVbt6k= +cloud.google.com/go/compute/metadata v0.2.2/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/iam v0.7.0 h1:k4MuwOsS7zGJJ+QfZ5vBK8SgHBAvYN/23BWsiihJ1vs= @@ -39,8 +39,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.28.0 h1:DLrIZ6xkeZX6K70fU/boWx5INJumt6f+nwwWSHXzzGY= -cloud.google.com/go/storage v1.28.0/go.mod h1:qlgZML35PXA3zoEnIkiPLY4/TOkUleufRlu6qmcf7sI= +cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.2.0 h1:sVW/AFBTGyJxDaMYlq0ct3jUXTtj12tQ6zE2GZUgVQw= @@ -89,54 +89,55 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.149 h1:zTWaUTbSjgMHvwhaQ91s/6ER8wMb3mA8M1GCZFO9QIo= -github.com/aws/aws-sdk-go v1.44.149/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= -github.com/aws/aws-sdk-go-v2 v1.17.1 h1:02c72fDJr87N8RAC2s3Qu0YuvMRZKNZJ9F+lAehCazk= -github.com/aws/aws-sdk-go-v2 v1.17.1/go.mod h1:JLnGeGONAyi2lWXI1p0PCIOIy333JMVK1U7Hf0aRFLw= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9 h1:RKci2D7tMwpvGpDNZnGQw9wk6v7o/xSwFcUAuNPoB8k= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9/go.mod h1:vCmV1q1VK8eoQJ5+aYE7PkK1K6v41qJ5pJdK3ggCDvg= -github.com/aws/aws-sdk-go-v2/config v1.18.3 h1:3kfBKcX3votFX84dm00U8RGA1sCCh3eRMOGzg5dCWfU= -github.com/aws/aws-sdk-go-v2/config v1.18.3/go.mod h1:BYdrbeCse3ZnOD5+2/VE/nATOK8fEUpBtmPMdKSyhMU= -github.com/aws/aws-sdk-go-v2/credentials v1.13.3 h1:ur+FHdp4NbVIv/49bUjBW+FE7e57HOo03ELodttmagk= -github.com/aws/aws-sdk-go-v2/credentials v1.13.3/go.mod h1:/rOMmqYBcFfNbRPU0iN9IgGqD5+V2yp3iWNmIlz0wI4= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 h1:E3PXZSI3F2bzyj6XxUXdTIfvp425HHhwKsFvmzBwHgs= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19/go.mod h1:VihW95zQpeKQWVPGkwT+2+WJNQV8UXFfMTWdU6VErL8= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42 h1:bxgBYvvBh+W1RnNYP4ROXEB8N+HSSucDszfE7Rb+kfU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42/go.mod h1:LHOsygMiW/14CkFxdXxvzKyMh3jbk/QfZVaDtCbLkl8= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 h1:nBO/RFxeq/IS5G9Of+ZrgucRciie2qpLy++3UGZ+q2E= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25/go.mod h1:Zb29PYkf42vVYQY6pvSyJCJcFHlPIiY+YKdPtwnvMkY= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 h1:oRHDrwCTVT8ZXi4sr9Ld+EXk7N/KGssOr2ygNeojEhw= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19/go.mod h1:6Q0546uHDp421okhmmGfbxzq2hBqbXFNpi4k+Q1JnQA= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 h1:Mza+vlnZr+fPKFKRq/lKGVvM6B/8ZZmNdEopOwSQLms= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26/go.mod h1:Y2OJ+P+MC1u1VKnavT+PshiEuGPyh/7DqxoDNij4/bg= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16 h1:2EXB7dtGwRYIN3XQ9qwIW504DVbKIw3r89xQnonGdsQ= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16/go.mod h1:XH+3h395e3WVdd6T2Z3mPxuI+x/HVtdqVOREkTiyubs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10 h1:dpiPHgmFstgkLG07KaYAewvuptq5kvo52xn7tVSrtrQ= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10/go.mod h1:9cBNUHI2aW4ho0A5T87O294iPDuuUOSIEDjnd1Lq/z0= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20 h1:KSvtm1+fPXE0swe9GPjc6msyrdTT0LB/BP8eLugL1FI= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20/go.mod h1:Mp4XI/CkWGD79AQxZ5lIFlgvC0A+gl+4BmyG1F+SfNc= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 h1:GE25AWCdNUPh9AOJzI9KIJnja7IwUc1WyUqz/JTyJ/I= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19/go.mod h1:02CP6iuYP+IVnBX5HULVdSAku/85eHB2Y9EsFhrkEwU= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19 h1:piDBAaWkaxkkVV3xJJbTehXCZRXYs49kvpi/LG6LR2o= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19/go.mod h1:BmQWRVkLTmyNzYPFAZgon53qKLWBNSvonugD1MrSWUs= -github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4 h1:QgmmWifaYZZcpaw3y1+ccRlgH6jAvLm4K/MBGUc7cNM= -github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4/go.mod h1:/NHbqPRiwxSPVOB2Xr+StDEH+GWV/64WwnUjv4KYzV0= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 h1:GFZitO48N/7EsFDt8fMa5iYdmWqkUDDB3Eje6z3kbG0= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.25/go.mod h1:IARHuzTXmj1C0KS35vboR0FeJ89OkEy1M9mWbK2ifCI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 h1:jcw6kKZrtNfBPJkaHrscDOZoe5gvi9wjudnxvozYFJo= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8/go.mod h1:er2JHN+kBY6FcMfcBBKNGCT3CarImmdFzishsqBmSRI= -github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 h1:60SJ4lhvn///8ygCzYy2l53bFW/Q15bVfyjyAWo6zuw= -github.com/aws/aws-sdk-go-v2/service/sts v1.17.5/go.mod h1:bXcN3koeVYiJcdDU89n3kCYILob7Y34AeLopUbZgLT4= -github.com/aws/smithy-go v1.13.4 h1:/RN2z1txIJWeXeOkzX+Hk/4Uuvv7dWtCjbmVJcrskyk= -github.com/aws/smithy-go v1.13.4/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= +github.com/aws/aws-sdk-go v1.44.152 h1:L9aaepO8wHB67gwuGD8VgIYH/cmQDxieCt7FeLa0+fI= +github.com/aws/aws-sdk-go v1.44.152/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.17.2 h1:r0yRZInwiPBNpQ4aDy/Ssh3ROWsGtKDwar2JS8Lm+N8= +github.com/aws/aws-sdk-go-v2 v1.17.2/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10/go.mod h1:VeTZetY5KRJLuD/7fkQXMU6Mw7H5m/KP2J5Iy9osMno= +github.com/aws/aws-sdk-go-v2/config v1.18.4 h1:VZKhr3uAADXHStS/Gf9xSYVmmaluTUfkc0dcbPiDsKE= +github.com/aws/aws-sdk-go-v2/config v1.18.4/go.mod h1:EZxMPLSdGAZ3eAmkqXfYbRppZJTzFTkv8VyEzJhKko4= +github.com/aws/aws-sdk-go-v2/credentials v1.13.4 h1:nEbHIyJy7mCvQ/kzGG7VWHSBpRB4H6sJy3bWierWUtg= +github.com/aws/aws-sdk-go-v2/credentials v1.13.4/go.mod h1:/Cj5w9LRsNTLSwexsohwDME32OzJ6U81Zs33zr2ZWOM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 h1:tpNOglTZ8kg9T38NpcGBxudqfUAwUzyUnLQ4XSd0CHE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20/go.mod h1:d9xFpWd3qYwdIXM0fvu7deD08vvdRXyc/ueV+0SqaWE= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.43 h1:+bkAMTd5OGyHu2nwNOangjEsP65fR0uhMbZJA52sZ64= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.43/go.mod h1:sS2tu0VEspKuY5eM1vQgy7P/hpZX8F62o6qsghZExWc= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 h1:5WU31cY7m0tG+AiaXuXGoMzo2GBQ1IixtWa8Yywsgco= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26/go.mod h1:2E0LdbJW6lbeU4uxjum99GZzI0ZjDpAb0CoSCM0oeEY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 h1:WW0qSzDWoiWU2FS5DbKpxGilFVlCEJPwx4YtjdfI0Jw= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20/go.mod h1:/+6lSiby8TBFpTVXZgKiN/rCfkYXEGvhlM4zCgPpt7w= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 h1:N2eKFw2S+JWRCtTt0IhIX7uoGGQciD4p6ba+SJv4WEU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27/go.mod h1:RdwFVc7PBYWY33fa2+8T1mSqQ7ZEK4ILpM0wfioDC3w= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.17 h1:5tXbMJ7Jq0iG65oiMg6tCLsHkSaO2xLXa2EmZ29vaTA= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.17/go.mod h1:twV0fKMQuqLY4klyFH56aXNq3AFiA5LO0/frTczEOFE= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 h1:y2+VQzC6Zh2ojtV2LoC0MNwHWc6qXv/j2vrQtlftkdA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11/go.mod h1:iV4q2hsqtNECrfmlXyord9u4zyuFEJX9eLgLpSPzWA8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.21 h1:77b1GfaSuIok5yB/3HYbG+ypWvOJDQ2rVdq943D17R4= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.21/go.mod h1:sPOz31BVdqeeurKEuUpLNSve4tdCNPluE+070HNcEHI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 h1:jlgyHbkZQAgAc7VIxJDmtouH8eNjOk2REVAQfVhdaiQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20/go.mod h1:Xs52xaLBqDEKRcAfX/hgjmD3YQ7c/W+BEyfamlO/W2E= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.20 h1:4K6dbmR0mlp3o4Bo78PnpvzHtYAqEeVMguvEenpMGsI= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.20/go.mod h1:1XpDcReIEOHsjwNToDKhIAO3qwLo1BnfbtSqWJa8j7g= +github.com/aws/aws-sdk-go-v2/service/s3 v1.29.5 h1:nRSEQj1JergKTVc8RGkhZvOEGgcvo4fWpDPwGDeg2ok= +github.com/aws/aws-sdk-go-v2/service/s3 v1.29.5/go.mod h1:wcaJTmjKFDW0s+Se55HBNIds6ghdAGoDDw+SGUdrfAk= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 h1:ActQgdTNQej/RuUJjB9uxYVLDOvRGtUreXF8L3c8wyg= +github.com/aws/aws-sdk-go-v2/service/sso v1.11.26/go.mod h1:uB9tV79ULEZUXc6Ob18A46KSQ0JDlrplPni9XW6Ot60= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 h1:wihKuqYUlA2T/Rx+yu2s6NDAns8B9DgnRooB1PVhY+Q= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9/go.mod h1:2E/3D/mB8/r2J7nK42daoKP/ooCwbf0q1PznNc+DZTU= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 h1:VQFOLQVL3BrKM/NLO/7FiS4vcp5bqK0mGMyk09xLoAY= +github.com/aws/aws-sdk-go-v2/service/sts v1.17.6/go.mod h1:Az3OXXYGyfNwQNsK/31L4R75qFYnO641RZGAoV3uH1c= +github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= +github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheggaaa/pb/v3 v3.1.0 h1:3uouEsl32RL7gTiQsuaXD4Bzbfl5tGztXGUvXbs4O04= github.com/cheggaaa/pb/v3 v3.1.0/go.mod h1:YjrevcBqadFDaGQKRdmZxTY42pXEqda48Ea3lt0K/BE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -407,8 +408,8 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/prometheus v0.40.4 h1:6aLtQSvnhmC/uo5Tx910AQm3Fxq1nzaJA6uiYtsA6So= -github.com/prometheus/prometheus v0.40.4/go.mod h1:bxgdmtoSNLmmIVPGmeTJ3OiP67VmuY4yalE4ZP6L/j8= +github.com/prometheus/prometheus v0.40.5 h1:wmk5yNrQlkQ2OvZucMhUB4k78AVfG34szb1UtopS8Vc= +github.com/prometheus/prometheus v0.40.5/go.mod h1:bxgdmtoSNLmmIVPGmeTJ3OiP67VmuY4yalE4ZP6L/j8= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw= @@ -434,8 +435,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/urfave/cli/v2 v2.23.5 h1:xbrU7tAYviSpqeR3X4nEFWUdB/uDZ6DE+HxmRU7Xtyw= -github.com/urfave/cli/v2 v2.23.5/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/cli/v2 v2.23.6 h1:iWmtKD+prGo1nKUtLO0Wg4z9esfBM4rAV4QRLQiEmJ4= +github.com/urfave/cli/v2 v2.23.6/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= @@ -498,8 +499,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 h1:yZNXmy+j/JpX19vZkVktWqAo7Gny4PBWYYK3zskGpx4= -golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/exp v0.0.0-20221204150635-6dcec336b2bb h1:QIsP/NmClBICkqnJ4rSIhnrGiGR7Yv9ZORGGnmmLTPk= +golang.org/x/exp v0.0.0-20221204150635-6dcec336b2bb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -634,8 +635,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -647,13 +648,14 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE= -golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -759,8 +761,8 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 h1:a2S6M0+660BgMNl++4JPlcAO/CjkqYItDEZwkoDQK7c= -google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd h1:OjndDrsik+Gt+e6fs45z9AxiewiKyLKYpA45W5Kpkks= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go index 5ac4a843e1..efedadbea2 100644 --- a/vendor/cloud.google.com/go/compute/internal/version.go +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.12.1" +const Version = "1.13.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 8631b6d6d2..6e3ee8d6ab 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,12 @@ # Changes +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + ## [0.1.0] (2022-10-26) Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 50538b1d34..d4aad9bf39 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -70,6 +70,7 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, Timeout: 5 * time.Second, } diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 580202cf84..f12da250ef 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,13 @@ # Changes +## [1.28.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.28.0...storage/v1.28.1) (2022-12-02) + + +### Bug Fixes + +* **storage:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) + ## [1.28.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.27.0...storage/v1.28.0) (2022-11-03) diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index f1e551b6fd..4a44cee8b6 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -792,14 +792,15 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec s := callSettings(c.settings, opts...) obj := req.dstObject.attrs.toProtoObject("") call := &storagepb.RewriteObjectRequest{ - SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), - SourceObject: req.srcObject.name, - RewriteToken: req.token, - DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), - DestinationName: req.dstObject.name, - Destination: obj, - DestinationKmsKey: req.dstObject.keyName, - DestinationPredefinedAcl: req.predefinedACL, + SourceBucket: bucketResourceName(globalProjectAlias, req.srcObject.bucket), + SourceObject: req.srcObject.name, + RewriteToken: req.token, + DestinationBucket: bucketResourceName(globalProjectAlias, req.dstObject.bucket), + DestinationName: req.dstObject.name, + Destination: obj, + DestinationKmsKey: req.dstObject.keyName, + DestinationPredefinedAcl: req.predefinedACL, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(req.dstObject.encryptionKey), } // The userProject, whether source or destination project, is decided by the code calling the interface. @@ -863,10 +864,10 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange } b := bucketResourceName(globalProjectAlias, params.bucket) - // TODO(noahdietz): Use encryptionKey to set relevant request fields. req := &storagepb.ReadObjectRequest{ - Bucket: b, - Object: params.object, + Bucket: b, + Object: params.object, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey), } // The default is a negative value, which means latest. if params.gen >= 0 { @@ -1008,8 +1009,6 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage return } - // TODO(noahdietz): Send encryption key via CommonObjectRequestParams. - // The chunk buffer is full, but there is no end in sight. This // means that a resumable upload will need to be used to send // multiple chunks, until we are done reading data. Start a @@ -1499,7 +1498,8 @@ func (w *gRPCWriter) startResumableUpload() error { } return run(w.ctx, func() error { upres, err := w.c.raw.StartResumableWrite(w.ctx, &storagepb.StartResumableWriteRequest{ - WriteObjectSpec: spec, + WriteObjectSpec: spec, + CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey), }) w.upid = upres.GetUploadId() return err @@ -1511,7 +1511,9 @@ func (w *gRPCWriter) startResumableUpload() error { func (w *gRPCWriter) queryProgress() (int64, error) { var persistedSize int64 err := run(w.ctx, func() error { - q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{UploadId: w.upid}) + q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{ + UploadId: w.upid, + }) persistedSize = q.GetPersistedSize() return err }, w.settings.retry, true, setRetryHeaderGRPC(w.ctx)) @@ -1582,6 +1584,7 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st req.FirstMessage = &storagepb.WriteObjectRequest_WriteObjectSpec{ WriteObjectSpec: spec, } + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey) } // TODO: Currently the checksums are only sent on the first message diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go index 13bbdb4c96..c36634b1a1 100644 --- a/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go +++ b/vendor/cloud.google.com/go/storage/internal/apiv2/stubs/storage.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/storage/v2/storage.proto package storage @@ -25,17 +25,17 @@ import ( reflect "reflect" sync "sync" - empty "github.com/golang/protobuf/ptypes/empty" - timestamp "github.com/golang/protobuf/ptypes/timestamp" _ "google.golang.org/genproto/googleapis/api/annotations" v1 "google.golang.org/genproto/googleapis/iam/v1" date "google.golang.org/genproto/googleapis/type/date" - field_mask "google.golang.org/genproto/protobuf/field_mask" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -264,7 +264,7 @@ type GetBucketRequest struct { // Mask specifying which fields to read. // A "*" field may be used to indicate all fields. // If no mask is specified, will default to all fields. - ReadMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *GetBucketRequest) Reset() { @@ -320,7 +320,7 @@ func (x *GetBucketRequest) GetIfMetagenerationNotMatch() int64 { return 0 } -func (x *GetBucketRequest) GetReadMask() *field_mask.FieldMask { +func (x *GetBucketRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -443,7 +443,7 @@ type ListBucketsRequest struct { // If no mask is specified, will default to all fields except items.owner, // items.acl, and items.default_object_acl. // * may be used to mean "all fields". - ReadMask *field_mask.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *ListBucketsRequest) Reset() { @@ -506,7 +506,7 @@ func (x *ListBucketsRequest) GetPrefix() string { return "" } -func (x *ListBucketsRequest) GetReadMask() *field_mask.FieldMask { +func (x *ListBucketsRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -664,7 +664,7 @@ type UpdateBucketRequest struct { // Not specifying any fields is an error. // Not specifying a field while setting that field to a non-default value is // an error. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,6,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateBucketRequest) Reset() { @@ -734,7 +734,7 @@ func (x *UpdateBucketRequest) GetPredefinedDefaultObjectAcl() string { return "" } -func (x *UpdateBucketRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -1408,7 +1408,7 @@ type ReadObjectRequest struct { // If no mask is specified, will default to all fields except metadata.owner // and metadata.acl. // * may be used to mean "all fields". - ReadMask *field_mask.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *ReadObjectRequest) Reset() { @@ -1513,7 +1513,7 @@ func (x *ReadObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestP return nil } -func (x *ReadObjectRequest) GetReadMask() *field_mask.FieldMask { +func (x *ReadObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -1554,7 +1554,7 @@ type GetObjectRequest struct { // If no mask is specified, will default to all fields except metadata.acl and // metadata.owner. // * may be used to mean "all fields". - ReadMask *field_mask.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` } func (x *GetObjectRequest) Reset() { @@ -1645,7 +1645,7 @@ func (x *GetObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestPa return nil } -func (x *GetObjectRequest) GetReadMask() *field_mask.FieldMask { +func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -2158,7 +2158,7 @@ type ListObjectsRequest struct { // If no mask is specified, will default to all fields except items.acl and // items.owner. // * may be used to mean "all fields". - ReadMask *field_mask.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` + ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"` // Filter results to objects whose names are lexicographically equal to or // after lexicographic_start. If lexicographic_end is also set, the objects // listed have names between lexicographic_start (inclusive) and @@ -2252,7 +2252,7 @@ func (x *ListObjectsRequest) GetVersions() bool { return false } -func (x *ListObjectsRequest) GetReadMask() *field_mask.FieldMask { +func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask { if x != nil { return x.ReadMask } @@ -2952,7 +2952,7 @@ type UpdateObjectRequest struct { // Not specifying any fields is an error. // Not specifying a field while setting that field to a non-default value is // an error. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` // A set of parameters common to Storage API requests concerning an object. CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"` } @@ -3031,7 +3031,7 @@ func (x *UpdateObjectRequest) GetPredefinedAcl() string { return "" } -func (x *UpdateObjectRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -3497,7 +3497,7 @@ type UpdateHmacKeyRequest struct { // Update mask for hmac_key. // Not specifying any fields will mean only the `state` field is updated to // the value specified in `hmac_key`. - UpdateMask *field_mask.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } func (x *UpdateHmacKeyRequest) Reset() { @@ -3539,7 +3539,7 @@ func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata { return nil } -func (x *UpdateHmacKeyRequest) GetUpdateMask() *field_mask.FieldMask { +func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask { if x != nil { return x.UpdateMask } @@ -3716,14 +3716,14 @@ type Bucket struct { // Output only. The creation time of the bucket. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - CreateTime *timestamp.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // The bucket's [https://www.w3.org/TR/cors/][Cross-Origin Resource Sharing] // (CORS) config. Cors []*Bucket_Cors `protobuf:"bytes,12,rep,name=cors,proto3" json:"cors,omitempty"` // Output only. The modification time of the bucket. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateTime *timestamp.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // The default value for event-based hold on newly created objects in this // bucket. Event-based hold is a way to retain objects indefinitely until an // event occurs, signified by the @@ -3894,7 +3894,7 @@ func (x *Bucket) GetLifecycle() *Bucket_Lifecycle { return nil } -func (x *Bucket) GetCreateTime() *timestamp.Timestamp { +func (x *Bucket) GetCreateTime() *timestamppb.Timestamp { if x != nil { return x.CreateTime } @@ -3908,7 +3908,7 @@ func (x *Bucket) GetCors() []*Bucket_Cors { return nil } -func (x *Bucket) GetUpdateTime() *timestamp.Timestamp { +func (x *Bucket) GetUpdateTime() *timestamppb.Timestamp { if x != nil { return x.UpdateTime } @@ -4296,9 +4296,9 @@ type HmacKeyMetadata struct { // Writable, can be updated by UpdateHmacKey operation. State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` // Output only. The creation time of the HMAC key. - CreateTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. The last modification time of the HMAC key metadata. - UpdateTime *timestamp.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // The etag of the HMAC key. Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"` } @@ -4370,14 +4370,14 @@ func (x *HmacKeyMetadata) GetState() string { return "" } -func (x *HmacKeyMetadata) GetCreateTime() *timestamp.Timestamp { +func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp { if x != nil { return x.CreateTime } return nil } -func (x *HmacKeyMetadata) GetUpdateTime() *timestamp.Timestamp { +func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp { if x != nil { return x.UpdateTime } @@ -4624,7 +4624,7 @@ type Object struct { // version of the object has been deleted. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - DeleteTime *timestamp.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` + DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"` // Content-Type of the object data, matching // [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5]. // If an object is stored without a Content-Type, it is served as @@ -4633,7 +4633,7 @@ type Object struct { // Output only. The creation time of the object. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - CreateTime *timestamp.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` + CreateTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"` // Output only. Number of underlying components that make up this object. Components are // accumulated by compose operations. // Attempting to set or update this field will result in a @@ -4649,7 +4649,7 @@ type Object struct { // Object Lifecycle Configuration. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateTime *timestamp.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` + UpdateTime *timestamppb.Timestamp `protobuf:"bytes,17,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"` // Cloud KMS Key used to encrypt this object, if the object is encrypted by // such a key. KmsKey string `protobuf:"bytes,18,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"` @@ -4657,7 +4657,7 @@ type Object struct { // object is initially created, it will be set to time_created. // Attempting to set or update this field will result in a // [FieldViolation][google.rpc.BadRequest.FieldViolation]. - UpdateStorageClassTime *timestamp.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` + UpdateStorageClassTime *timestamppb.Timestamp `protobuf:"bytes,19,opt,name=update_storage_class_time,json=updateStorageClassTime,proto3" json:"update_storage_class_time,omitempty"` // Whether an object is under temporary hold. While this flag is set to true, // the object is protected against deletion and overwrites. A common use case // of this flag is regulatory investigations where objects need to be retained @@ -4671,7 +4671,7 @@ type Object struct { // Note 2: This value can be provided even when temporary hold is set (so that // the user can reason about policy without having to first unset the // temporary hold). - RetentionExpireTime *timestamp.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` + RetentionExpireTime *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=retention_expire_time,json=retentionExpireTime,proto3" json:"retention_expire_time,omitempty"` // User-provided metadata, in key/value pairs. Metadata map[string]string `protobuf:"bytes,22,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Whether an object is under event-based hold. @@ -4694,7 +4694,7 @@ type Object struct { // such a key. CustomerEncryption *CustomerEncryption `protobuf:"bytes,25,opt,name=customer_encryption,json=customerEncryption,proto3" json:"customer_encryption,omitempty"` // A user-specified timestamp set on an object. - CustomTime *timestamp.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` + CustomTime *timestamppb.Timestamp `protobuf:"bytes,26,opt,name=custom_time,json=customTime,proto3" json:"custom_time,omitempty"` } func (x *Object) Reset() { @@ -4813,7 +4813,7 @@ func (x *Object) GetContentLanguage() string { return "" } -func (x *Object) GetDeleteTime() *timestamp.Timestamp { +func (x *Object) GetDeleteTime() *timestamppb.Timestamp { if x != nil { return x.DeleteTime } @@ -4827,7 +4827,7 @@ func (x *Object) GetContentType() string { return "" } -func (x *Object) GetCreateTime() *timestamp.Timestamp { +func (x *Object) GetCreateTime() *timestamppb.Timestamp { if x != nil { return x.CreateTime } @@ -4848,7 +4848,7 @@ func (x *Object) GetChecksums() *ObjectChecksums { return nil } -func (x *Object) GetUpdateTime() *timestamp.Timestamp { +func (x *Object) GetUpdateTime() *timestamppb.Timestamp { if x != nil { return x.UpdateTime } @@ -4862,7 +4862,7 @@ func (x *Object) GetKmsKey() string { return "" } -func (x *Object) GetUpdateStorageClassTime() *timestamp.Timestamp { +func (x *Object) GetUpdateStorageClassTime() *timestamppb.Timestamp { if x != nil { return x.UpdateStorageClassTime } @@ -4876,7 +4876,7 @@ func (x *Object) GetTemporaryHold() bool { return false } -func (x *Object) GetRetentionExpireTime() *timestamp.Timestamp { +func (x *Object) GetRetentionExpireTime() *timestamppb.Timestamp { if x != nil { return x.RetentionExpireTime } @@ -4911,7 +4911,7 @@ func (x *Object) GetCustomerEncryption() *CustomerEncryption { return nil } -func (x *Object) GetCustomTime() *timestamp.Timestamp { +func (x *Object) GetCustomTime() *timestamppb.Timestamp { if x != nil { return x.CustomTime } @@ -5845,7 +5845,7 @@ type Bucket_RetentionPolicy struct { // Server-determined value that indicates the time from which policy was // enforced and effective. - EffectiveTime *timestamp.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` + EffectiveTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=effective_time,json=effectiveTime,proto3" json:"effective_time,omitempty"` // Once locked, an object retention policy cannot be modified. IsLocked bool `protobuf:"varint,2,opt,name=is_locked,json=isLocked,proto3" json:"is_locked,omitempty"` // The duration in seconds that objects need to be retained. Retention @@ -5887,7 +5887,7 @@ func (*Bucket_RetentionPolicy) Descriptor() ([]byte, []int) { return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40, 6} } -func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamp.Timestamp { +func (x *Bucket_RetentionPolicy) GetEffectiveTime() *timestamppb.Timestamp { if x != nil { return x.EffectiveTime } @@ -6089,7 +6089,7 @@ type Bucket_Autoclass struct { // disabled/unconfigured or set to false after being enabled. If Autoclass // is enabled when the bucket is created, the toggle_time is set to the // bucket creation time. - ToggleTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` + ToggleTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=toggle_time,json=toggleTime,proto3" json:"toggle_time,omitempty"` } func (x *Bucket_Autoclass) Reset() { @@ -6131,7 +6131,7 @@ func (x *Bucket_Autoclass) GetEnabled() bool { return false } -func (x *Bucket_Autoclass) GetToggleTime() *timestamp.Timestamp { +func (x *Bucket_Autoclass) GetToggleTime() *timestamppb.Timestamp { if x != nil { return x.ToggleTime } @@ -6150,7 +6150,7 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct { // The deadline time for changing // `iamConfig.uniformBucketLevelAccess.enabled` from `true` to `false`. // Mutable until the specified deadline is reached, but not afterward. - LockTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` + LockTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=lock_time,json=lockTime,proto3" json:"lock_time,omitempty"` } func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() { @@ -6192,7 +6192,7 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetEnabled() bool { return false } -func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamp.Timestamp { +func (x *Bucket_IamConfig_UniformBucketLevelAccess) GetLockTime() *timestamppb.Timestamp { if x != nil { return x.LockTime } @@ -8184,13 +8184,13 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{ (*Bucket_Lifecycle_Rule_Condition)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule.Condition nil, // 73: google.storage.v2.Notification.CustomAttributesEntry nil, // 74: google.storage.v2.Object.MetadataEntry - (*field_mask.FieldMask)(nil), // 75: google.protobuf.FieldMask - (*timestamp.Timestamp)(nil), // 76: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 75: google.protobuf.FieldMask + (*timestamppb.Timestamp)(nil), // 76: google.protobuf.Timestamp (*date.Date)(nil), // 77: google.type.Date (*v1.GetIamPolicyRequest)(nil), // 78: google.iam.v1.GetIamPolicyRequest (*v1.SetIamPolicyRequest)(nil), // 79: google.iam.v1.SetIamPolicyRequest (*v1.TestIamPermissionsRequest)(nil), // 80: google.iam.v1.TestIamPermissionsRequest - (*empty.Empty)(nil), // 81: google.protobuf.Empty + (*emptypb.Empty)(nil), // 81: google.protobuf.Empty (*v1.Policy)(nil), // 82: google.iam.v1.Policy (*v1.TestIamPermissionsResponse)(nil), // 83: google.iam.v1.TestIamPermissionsResponse } @@ -9272,7 +9272,7 @@ const _ = grpc.SupportPackageIsVersion6 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type StorageClient interface { // Permanently deletes an empty bucket. - DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Returns metadata for the specified bucket. GetBucket(ctx context.Context, in *GetBucketRequest, opts ...grpc.CallOption) (*Bucket, error) // Creates a new bucket. @@ -9291,7 +9291,7 @@ type StorageClient interface { // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) // Permanently deletes a notification subscription. - DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // View a notification config. GetNotification(ctx context.Context, in *GetNotificationRequest, opts ...grpc.CallOption) (*Notification, error) // Creates a notification subscription for a given bucket. @@ -9306,7 +9306,7 @@ type StorageClient interface { ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error) // Deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the `generation` parameter is used. - DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Cancels an in-progress resumable upload. CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. @@ -9397,7 +9397,7 @@ type StorageClient interface { // Creates a new HMAC key for the given service account. CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) + DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) // Gets an existing HMAC key metadata for the given id. GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) // Lists HMAC keys under a given project with the additional filters provided. @@ -9414,8 +9414,8 @@ func NewStorageClient(cc grpc.ClientConnInterface) StorageClient { return &storageClient{cc} } -func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *storageClient) DeleteBucket(ctx context.Context, in *DeleteBucketRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteBucket", in, out, opts...) if err != nil { return nil, err @@ -9495,8 +9495,8 @@ func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketReques return out, nil } -func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *storageClient) DeleteNotification(ctx context.Context, in *DeleteNotificationRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotification", in, out, opts...) if err != nil { return nil, err @@ -9540,8 +9540,8 @@ func (c *storageClient) ComposeObject(ctx context.Context, in *ComposeObjectRequ return out, nil } -func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *storageClient) DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteObject", in, out, opts...) if err != nil { return nil, err @@ -9696,8 +9696,8 @@ func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequ return out, nil } -func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) { - out := new(empty.Empty) +func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...) if err != nil { return nil, err @@ -9735,7 +9735,7 @@ func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequ // StorageServer is the server API for Storage service. type StorageServer interface { // Permanently deletes an empty bucket. - DeleteBucket(context.Context, *DeleteBucketRequest) (*empty.Empty, error) + DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) // Returns metadata for the specified bucket. GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) // Creates a new bucket. @@ -9754,7 +9754,7 @@ type StorageServer interface { // Updates a bucket. Equivalent to JSON API's storage.buckets.patch method. UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) // Permanently deletes a notification subscription. - DeleteNotification(context.Context, *DeleteNotificationRequest) (*empty.Empty, error) + DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) // View a notification config. GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) // Creates a notification subscription for a given bucket. @@ -9769,7 +9769,7 @@ type StorageServer interface { ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) // Deletes an object and its metadata. Deletions are permanent if versioning // is not enabled for the bucket, or if the `generation` parameter is used. - DeleteObject(context.Context, *DeleteObjectRequest) (*empty.Empty, error) + DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) // Cancels an in-progress resumable upload. CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) // Retrieves an object's metadata. @@ -9860,7 +9860,7 @@ type StorageServer interface { // Creates a new HMAC key for the given service account. CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) // Deletes a given HMAC key. Key must be in an INACTIVE state. - DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*empty.Empty, error) + DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) // Gets an existing HMAC key metadata for the given id. GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) // Lists HMAC keys under a given project with the additional filters provided. @@ -9873,7 +9873,7 @@ type StorageServer interface { type UnimplementedStorageServer struct { } -func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*empty.Empty, error) { +func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented") } func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) { @@ -9900,7 +9900,7 @@ func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *v1.TestI func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented") } -func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*empty.Empty, error) { +func (*UnimplementedStorageServer) DeleteNotification(context.Context, *DeleteNotificationRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteNotification not implemented") } func (*UnimplementedStorageServer) GetNotification(context.Context, *GetNotificationRequest) (*Notification, error) { @@ -9915,7 +9915,7 @@ func (*UnimplementedStorageServer) ListNotifications(context.Context, *ListNotif func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) { return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented") } -func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*empty.Empty, error) { +func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented") } func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) { @@ -9951,7 +9951,7 @@ func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServic func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented") } -func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*empty.Empty, error) { +func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented") } func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) { diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index 50f34dc83c..008568b405 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.28.0" +const Version = "1.28.1" diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index 855792f474..b5c10efc8a 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -1412,12 +1412,13 @@ func newObjectFromProto(o *storagepb.Object) *ObjectAttrs { Generation: o.Generation, Metageneration: o.Metageneration, StorageClass: o.StorageClass, - CustomerKeySHA256: string(o.GetCustomerEncryption().GetKeySha256Bytes()), - KMSKeyName: o.GetKmsKey(), - Created: convertProtoTime(o.GetCreateTime()), - Deleted: convertProtoTime(o.GetDeleteTime()), - Updated: convertProtoTime(o.GetUpdateTime()), - CustomTime: convertProtoTime(o.GetCustomTime()), + // CustomerKeySHA256 needs to be presented as base64 encoded, but the response from gRPC is not. + CustomerKeySHA256: base64.StdEncoding.EncodeToString(o.GetCustomerEncryption().GetKeySha256Bytes()), + KMSKeyName: o.GetKmsKey(), + Created: convertProtoTime(o.GetCreateTime()), + Deleted: convertProtoTime(o.GetDeleteTime()), + Updated: convertProtoTime(o.GetUpdateTime()), + CustomTime: convertProtoTime(o.GetCustomTime()), } } diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md index 56b641cf7b..fcf2947ba5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md @@ -1,3 +1,571 @@ +# Release (2022-12-02) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.17.0](service/appsync/CHANGELOG.md#v1170-2022-12-02) + * **Feature**: Fixes the URI for the evaluatecode endpoint to include the /v1 prefix (ie. "/v1/dataplane-evaluatecode"). +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.1](service/ecs/CHANGELOG.md#v1201-2022-12-02) + * **Documentation**: Documentation updates for Amazon ECS +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.21.0](service/fms/CHANGELOG.md#v1210-2022-12-02) + * **Feature**: AWS Firewall Manager now supports Fortigate Cloud Native Firewall as a Service as a third-party policy type. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.28.0](service/mediaconvert/CHANGELOG.md#v1280-2022-12-02) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for configurable ID3 eMSG box attributes and the ability to signal them with InbandEventStream tags in DASH and CMAF outputs. +* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.25.0](service/medialive/CHANGELOG.md#v1250-2022-12-02) + * **Feature**: Updates to Event Signaling and Management (ESAM) API and documentation. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.21.0](service/polly/CHANGELOG.md#v1210-2022-12-02) + * **Feature**: Add language code for Finnish (fi-FI) +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.18.0](service/proton/CHANGELOG.md#v1180-2022-12-02) + * **Feature**: CreateEnvironmentAccountConnection RoleArn input is now optional +* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.3.0](service/redshiftserverless/CHANGELOG.md#v130-2022-12-02) + * **Feature**: Add Table Level Restore operations for Amazon Redshift Serverless. Add multi-port support for Amazon Redshift Serverless endpoints. Add Tagging support to Snapshots and Recovery Points in Amazon Redshift Serverless. +* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.18.7](service/sns/CHANGELOG.md#v1187-2022-12-02) + * **Documentation**: This release adds the message payload-filtering feature to the SNS Subscribe, SetSubscriptionAttributes, and GetSubscriptionAttributes API actions + +# Release (2022-12-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/codecatalyst`: [v1.0.0](service/codecatalyst/CHANGELOG.md#v100-2022-12-01) + * **Release**: New AWS service client module + * **Feature**: This release adds operations that support customers using the AWS Toolkits and Amazon CodeCatalyst, a unified software development service that helps developers develop, deploy, and maintain applications in the cloud. For more information, see the documentation. +* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.20.0](service/comprehend/CHANGELOG.md#v1200-2022-12-01) + * **Feature**: Comprehend now supports semi-structured documents (such as PDF files or image files) as inputs for custom analysis using the synchronous APIs (ClassifyDocument and DetectEntities). +* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.16.0](service/gamelift/CHANGELOG.md#v1160-2022-12-01) + * **Feature**: GameLift introduces a new feature, GameLift Anywhere. GameLift Anywhere allows you to integrate your own compute resources with GameLift. You can also use GameLift Anywhere to iteratively test your game servers without uploading the build to GameLift for every iteration. +* `github.com/aws/aws-sdk-go-v2/service/pipes`: [v1.0.0](service/pipes/CHANGELOG.md#v100-2022-12-01) + * **Release**: New AWS service client module + * **Feature**: AWS introduces new Amazon EventBridge Pipes which allow you to connect sources (SQS, Kinesis, DDB, Kafka, MQ) to Targets (14+ EventBridge Targets) without any code, with filtering, batching, input transformation, and an optional Enrichment stage (Lambda, StepFunctions, ApiGateway, ApiDestinations) +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.16.0](service/sfn/CHANGELOG.md#v1160-2022-12-01) + * **Feature**: This release adds support for the AWS Step Functions Map state in Distributed mode. The changes include a new MapRun resource and several new and modified APIs. + +# Release (2022-11-30) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.18.0](service/accessanalyzer/CHANGELOG.md#v1180-2022-11-30) + * **Feature**: This release adds support for S3 cross account access points. IAM Access Analyzer will now produce public or cross account findings when it detects bucket delegation to external account access points. +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.20.0](service/athena/CHANGELOG.md#v1200-2022-11-30) + * **Feature**: This release includes support for using Apache Spark in Amazon Athena. +* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.17.0](service/dataexchange/CHANGELOG.md#v1170-2022-11-30) + * **Feature**: This release enables data providers to license direct access to data in their Amazon S3 buckets or AWS Lake Formation data lakes through AWS Data Exchange. Subscribers get read-only access to the data and can use it in downstream AWS services, like Amazon Athena, without creating or managing copies. +* `github.com/aws/aws-sdk-go-v2/service/docdbelastic`: [v1.0.0](service/docdbelastic/CHANGELOG.md#v100-2022-11-30) + * **Release**: New AWS service client module + * **Feature**: Launched Amazon DocumentDB Elastic Clusters. You can now use the SDK to create, list, update and delete Amazon DocumentDB Elastic Cluster resources +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.37.0](service/glue/CHANGELOG.md#v1370-2022-11-30) + * **Feature**: This release adds support for AWS Glue Data Quality, which helps you evaluate and monitor the quality of your data and includes the API for creating, deleting, or updating data quality rulesets, runs and evaluations. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.28.0](service/s3control/CHANGELOG.md#v1280-2022-11-30) + * **Feature**: Amazon S3 now supports cross-account access points. S3 bucket owners can now allow trusted AWS accounts to create access points associated with their bucket. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.56.0](service/sagemaker/CHANGELOG.md#v1560-2022-11-30) + * **Feature**: Added Models as part of the Search API. Added Model shadow deployments in realtime inference, and shadow testing in managed inference. Added support for shared spaces, geospatial APIs, Model Cards, AutoMLJobStep in pipelines, Git repositories on user profiles and domains, Model sharing in Jumpstart. +* `github.com/aws/aws-sdk-go-v2/service/sagemakergeospatial`: [v1.0.0](service/sagemakergeospatial/CHANGELOG.md#v100-2022-11-30) + * **Release**: New AWS service client module + * **Feature**: This release provides Amazon SageMaker geospatial APIs to build, train, deploy and visualize geospatial models. + +# Release (2022-11-29.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.74.0](service/ec2/CHANGELOG.md#v1740-2022-11-292) + * **Feature**: This release adds support for AWS Verified Access and the Hpc6id Amazon EC2 compute optimized instance type, which features 3rd generation Intel Xeon Scalable processors. +* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.15.0](service/firehose/CHANGELOG.md#v1150-2022-11-292) + * **Feature**: Allow support for the Serverless offering for Amazon OpenSearch Service as a Kinesis Data Firehose delivery destination. +* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.19.0](service/kms/CHANGELOG.md#v1190-2022-11-292) + * **Feature**: AWS KMS introduces the External Key Store (XKS), a new feature for customers who want to protect their data with encryption keys stored in an external key management system under their control. +* `github.com/aws/aws-sdk-go-v2/service/omics`: [v1.0.0](service/omics/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: Amazon Omics is a new, purpose-built service that can be used by healthcare and life science organizations to store, query, and analyze omics data. The insights from that data can be used to accelerate scientific discoveries and improve healthcare. +* `github.com/aws/aws-sdk-go-v2/service/opensearchserverless`: [v1.0.0](service/opensearchserverless/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: Publish SDK for Amazon OpenSearch Serverless +* `github.com/aws/aws-sdk-go-v2/service/securitylake`: [v1.0.0](service/securitylake/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: Amazon Security Lake automatically centralizes security data from cloud, on-premises, and custom sources into a purpose-built data lake stored in your account. Security Lake makes it easier to analyze security data, so you can improve the protection of your workloads, applications, and data +* `github.com/aws/aws-sdk-go-v2/service/simspaceweaver`: [v1.0.0](service/simspaceweaver/CHANGELOG.md#v100-2022-11-292) + * **Release**: New AWS service client module + * **Feature**: AWS SimSpace Weaver is a new service that helps customers build spatial simulations at new levels of scale - resulting in virtual worlds with millions of dynamic entities. See the AWS SimSpace Weaver developer guide for more details on how to get started. https://docs.aws.amazon.com/simspaceweaver + +# Release (2022-11-29) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/arczonalshift`: [v1.0.0](service/arczonalshift/CHANGELOG.md#v100-2022-11-29) + * **Release**: New AWS service client module + * **Feature**: Amazon Route 53 Application Recovery Controller Zonal Shift is a new service that makes it easy to shift traffic away from an Availability Zone in a Region. See the developer guide for more information: https://docs.aws.amazon.com/r53recovery/latest/dg/what-is-route53-recovery.html +* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.18.0](service/computeoptimizer/CHANGELOG.md#v1180-2022-11-29) + * **Feature**: Adds support for a new recommendation preference that makes it possible for customers to optimize their EC2 recommendations by utilizing an external metrics ingestion service to provide metrics. +* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.28.0](service/configservice/CHANGELOG.md#v1280-2022-11-29) + * **Feature**: With this release, you can use AWS Config to evaluate your resources for compliance with Config rules before they are created or updated. Using Config rules in proactive mode enables you to test and build compliant resource templates or check resource configurations at the time they are provisioned. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.73.0](service/ec2/CHANGELOG.md#v1730-2022-11-29) + * **Feature**: Introduces ENA Express, which uses AWS SRD and dynamic routing to increase throughput and minimize latency, adds support for trust relationships between Reachability Analyzer and AWS Organizations to enable cross-account analysis, and adds support for Infrastructure Performance metric subscriptions. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.24.0](service/eks/CHANGELOG.md#v1240-2022-11-29) + * **Feature**: Adds support for additional EKS add-ons metadata and filtering fields +* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.26.0](service/fsx/CHANGELOG.md#v1260-2022-11-29) + * **Feature**: This release adds support for 4GB/s / 160K PIOPS FSx for ONTAP file systems and 10GB/s / 350K PIOPS FSx for OpenZFS file systems (Single_AZ_2). For FSx for ONTAP, this also adds support for DP volumes, snapshot policy, copy tags to backups, and Multi-AZ route table updates. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.36.0](service/glue/CHANGELOG.md#v1360-2022-11-29) + * **Feature**: This release allows the creation of Custom Visual Transforms (Dynamic Transforms) to be created via AWS Glue CLI/SDK. +* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.9.0](service/inspector2/CHANGELOG.md#v190-2022-11-29) + * **Feature**: This release adds support for Inspector to scan AWS Lambda. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.26.0](service/lambda/CHANGELOG.md#v1260-2022-11-29) + * **Feature**: Adds support for Lambda SnapStart, which helps improve the startup performance of functions. Customers can now manage SnapStart based functions via CreateFunction and UpdateFunctionConfiguration APIs +* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.1.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v110-2022-11-29) + * **Feature**: AWS now offers fully-compliant, Amazon-provided licenses for Microsoft Office Professional Plus 2021 Amazon Machine Images (AMIs) on Amazon EC2. These AMIs are now available on the Amazon EC2 console and on AWS Marketplace to launch instances on-demand without any long-term licensing commitments. +* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.24.0](service/macie2/CHANGELOG.md#v1240-2022-11-29) + * **Feature**: Added support for configuring Macie to continually sample objects from S3 buckets and inspect them for sensitive data. Results appear in statistics, findings, and other data that Macie provides. +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.28.0](service/quicksight/CHANGELOG.md#v1280-2022-11-29) + * **Feature**: This release adds new Describe APIs and updates Create and Update APIs to support the data model for Dashboards, Analyses, and Templates. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.27.0](service/s3control/CHANGELOG.md#v1270-2022-11-29) + * **Feature**: Added two new APIs to support Amazon S3 Multi-Region Access Point failover controls: GetMultiRegionAccessPointRoutes and SubmitMultiRegionAccessPointRoutes. The failover control APIs are supported in the following Regions: us-east-1, us-west-2, eu-west-1, ap-southeast-2, and ap-northeast-1. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.25.0](service/securityhub/CHANGELOG.md#v1250-2022-11-29) + * **Feature**: Adding StandardsManagedBy field to DescribeStandards API response + +# Release (2022-11-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.18.0](service/backup/CHANGELOG.md#v1180-2022-11-28) + * **Feature**: AWS Backup introduces support for legal hold and application stack backups. AWS Backup Audit Manager introduces support for cross-Region, cross-account reports. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.22.0](service/cloudwatch/CHANGELOG.md#v1220-2022-11-28) + * **Feature**: Adds cross-account support to the GetMetricData API. Adds cross-account support to the ListMetrics API through the usage of the IncludeLinkedAccounts flag and the new OwningAccounts field. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.17.0](service/cloudwatchlogs/CHANGELOG.md#v1170-2022-11-28) + * **Feature**: Updates to support CloudWatch Logs data protection and CloudWatch cross-account observability +* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.9.0](service/drs/CHANGELOG.md#v190-2022-11-28) + * **Feature**: Non breaking changes to existing APIs, and additional APIs added to support in-AWS failing back using AWS Elastic Disaster Recovery. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.20.0](service/ecs/CHANGELOG.md#v1200-2022-11-28) + * **Feature**: This release adds support for ECS Service Connect, a new capability that simplifies writing and operating resilient distributed applications. This release updates the TaskDefinition, Cluster, Service mutation APIs with Service connect constructs and also adds a new ListServicesByNamespace API. +* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.18.0](service/efs/CHANGELOG.md#v1180-2022-11-28) + * **Feature**: This release adds elastic as a new ThroughputMode value for EFS file systems and adds AFTER_1_DAY as a value for TransitionToIARules. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.32.0](service/iot/CHANGELOG.md#v1320-2022-11-28) + * **Feature**: Job scheduling enables the scheduled rollout of a Job with start and end times and a customizable end behavior when end time is reached. This is available for continuous and snapshot jobs. Added support for MQTT5 properties to AWS IoT TopicRule Republish Action. +* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.13.0](service/iotdataplane/CHANGELOG.md#v1130-2022-11-28) + * **Feature**: This release adds support for MQTT5 properties to AWS IoT HTTP Publish API. +* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.23.0](service/iotwireless/CHANGELOG.md#v1230-2022-11-28) + * **Feature**: This release includes a new feature for customers to calculate the position of their devices by adding three new APIs: UpdateResourcePosition, GetResourcePosition, and GetPositionEstimate. +* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.36.0](service/kendra/CHANGELOG.md#v1360-2022-11-28) + * **Feature**: Amazon Kendra now supports preview of table information from HTML tables in the search results. The most relevant cells with their corresponding rows, columns are displayed as a preview in the search result. The most relevant table cell or cells are also highlighted in table preview. +* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.16.0](service/mgn/CHANGELOG.md#v1160-2022-11-28) + * **Feature**: This release adds support for Application and Wave management. We also now support custom post-launch actions. +* `github.com/aws/aws-sdk-go-v2/service/oam`: [v1.0.0](service/oam/CHANGELOG.md#v100-2022-11-28) + * **Release**: New AWS service client module + * **Feature**: Amazon CloudWatch Observability Access Manager is a new service that allows configuration of the CloudWatch cross-account observability feature. +* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.17.0](service/organizations/CHANGELOG.md#v1170-2022-11-28) + * **Feature**: This release introduces delegated administrator for AWS Organizations, a new feature to help you delegate the management of your Organizations policies, enabling you to govern your AWS organization in a decentralized way. You can now allow member accounts to manage Organizations policies. +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.31.0](service/rds/CHANGELOG.md#v1310-2022-11-28) + * **Feature**: This release enables new Aurora and RDS feature called Blue/Green Deployments that makes updates to databases safer, simpler and faster. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.19.0](service/textract/CHANGELOG.md#v1190-2022-11-28) + * **Feature**: This release adds support for classifying and splitting lending documents by type, and extracting information by using the Analyze Lending APIs. This release also includes support for summarized information of the processed lending document package, in addition to per document results. +* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.22.0](service/transcribe/CHANGELOG.md#v1220-2022-11-28) + * **Feature**: This release adds support for 'inputType' for post-call and real-time (streaming) Call Analytics within Amazon Transcribe. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.8.0](service/transcribestreaming/CHANGELOG.md#v180-2022-11-28) + * **Feature**: This release adds support for real-time (streaming) and post-call Call Analytics within Amazon Transcribe. + +# Release (2022-11-23) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.10.0](service/grafana/CHANGELOG.md#v1100-2022-11-23) + * **Feature**: This release includes support for configuring a Grafana workspace to connect to a datasource within a VPC as well as new APIs for configuring Grafana settings. +* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.7.0](service/rbin/CHANGELOG.md#v170-2022-11-23) + * **Feature**: This release adds support for Rule Lock for Recycle Bin, which allows you to lock retention rules so that they can no longer be modified or deleted. + +# Release (2022-11-22) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.21.0](service/appflow/CHANGELOG.md#v1210-2022-11-22) + * **Feature**: Adding support for Amazon AppFlow to transfer the data to Amazon Redshift databases through Amazon Redshift Data API service. This feature will support the Redshift destination connector on both public and private accessible Amazon Redshift Clusters and Amazon Redshift Serverless. +* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.15.0](service/kinesisanalyticsv2/CHANGELOG.md#v1150-2022-11-22) + * **Feature**: Support for Apache Flink 1.15 in Kinesis Data Analytics. + +# Release (2022-11-21) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.25.0](service/route53/CHANGELOG.md#v1250-2022-11-21) + * **Feature**: Amazon Route 53 now supports the Asia Pacific (Hyderabad) Region (ap-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +# Release (2022-11-18.2) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.1](service/ssmsap/CHANGELOG.md#v101-2022-11-182) + * **Bug Fix**: Removes old model file for ssm sap and uses the new model file to regenerate client + +# Release (2022-11-18) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.20.0](service/appflow/CHANGELOG.md#v1200-2022-11-18) + * **Feature**: AppFlow provides a new API called UpdateConnectorRegistration to update a custom connector that customers have previously registered. With this API, customers no longer need to unregister and then register a connector to make an update. +* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.21.0](service/auditmanager/CHANGELOG.md#v1210-2022-11-18) + * **Feature**: This release introduces a new feature for Audit Manager: Evidence finder. You can now use evidence finder to quickly query your evidence, and add the matching evidence results to an assessment report. +* `github.com/aws/aws-sdk-go-v2/service/chimesdkvoice`: [v1.0.0](service/chimesdkvoice/CHANGELOG.md#v100-2022-11-18) + * **Release**: New AWS service client module + * **Feature**: Amazon Chime Voice Connector, Voice Connector Group and PSTN Audio Service APIs are now available in the Amazon Chime SDK Voice namespace. See https://docs.aws.amazon.com/chime-sdk/latest/dg/sdk-available-regions.html for more details. +* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.21.0](service/cloudfront/CHANGELOG.md#v1210-2022-11-18) + * **Feature**: CloudFront API support for staging distributions and associated traffic management policies. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.38.0](service/connect/CHANGELOG.md#v1380-2022-11-18) + * **Feature**: Added AllowedAccessControlTags and TagRestrictedResource for Tag Based Access Control on Amazon Connect Webpage +* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.17.6](service/dynamodb/CHANGELOG.md#v1176-2022-11-18) + * **Documentation**: Updated minor fixes for DynamoDB documentation. +* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.13.25](service/dynamodbstreams/CHANGELOG.md#v11325-2022-11-18) + * **Documentation**: Updated minor fixes for DynamoDB documentation. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.72.0](service/ec2/CHANGELOG.md#v1720-2022-11-18) + * **Feature**: This release adds support for copying an Amazon Machine Image's tags when copying an AMI. +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.35.0](service/glue/CHANGELOG.md#v1350-2022-11-18) + * **Feature**: AWSGlue Crawler - Adding support for Table and Column level Comments with database level datatypes for JDBC based crawler. +* `github.com/aws/aws-sdk-go-v2/service/iotroborunner`: [v1.0.0](service/iotroborunner/CHANGELOG.md#v100-2022-11-18) + * **Release**: New AWS service client module + * **Feature**: AWS IoT RoboRunner is a new service that makes it easy to build applications that help multi-vendor robots work together seamlessly. See the IoT RoboRunner developer guide for more details on getting started. https://docs.aws.amazon.com/iotroborunner/latest/dev/iotroborunner-welcome.html +* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.27.0](service/quicksight/CHANGELOG.md#v1270-2022-11-18) + * **Feature**: This release adds the following: 1) Asset management for centralized assets governance 2) QuickSight Q now supports public embedding 3) New Termination protection flag to mitigate accidental deletes 4) Athena data sources now accept a custom IAM role 5) QuickSight supports connectivity to Databricks +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.55.0](service/sagemaker/CHANGELOG.md#v1550-2022-11-18) + * **Feature**: Added DisableProfiler flag as a new field in ProfilerConfig +* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.15.0](service/servicecatalog/CHANGELOG.md#v1150-2022-11-18) + * **Feature**: This release 1. adds support for Principal Name Sharing with Service Catalog portfolio sharing. 2. Introduces repo sourced products which are created and managed with existing SC APIs. These products are synced to external repos and auto create new product versions based on changes in the repo. +* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.15.0](service/sfn/CHANGELOG.md#v1150-2022-11-18) + * **Feature**: This release adds support for using Step Functions service integrations to invoke any cross-account AWS resource, even if that service doesn't support resource-based policies or cross-account calls. See https://docs.aws.amazon.com/step-functions/latest/dg/concepts-access-cross-acct-resources.html +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.25.0](service/transfer/CHANGELOG.md#v1250-2022-11-18) + * **Feature**: Adds a NONE encryption algorithm type to AS2 connectors, providing support for skipping encryption of the AS2 message body when a HTTPS URL is also specified. + +# Release (2022-11-17) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.12.0](service/amplify/CHANGELOG.md#v1120-2022-11-17) + * **Feature**: Adds a new value (WEB_COMPUTE) to the Platform enum that allows customers to create Amplify Apps with Server-Side Rendering support. +* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.19.0](service/appflow/CHANGELOG.md#v1190-2022-11-17) + * **Feature**: AppFlow simplifies the preparation and cataloging of SaaS data into the AWS Glue Data Catalog where your data can be discovered and accessed by AWS analytics and ML services. AppFlow now also supports data field partitioning and file size optimization to improve query performance and reduce cost. +* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.16.0](service/appsync/CHANGELOG.md#v1160-2022-11-17) + * **Feature**: This release introduces the APPSYNC_JS runtime, and adds support for JavaScript in AppSync functions and AppSync pipeline resolvers. +* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.22.0](service/databasemigrationservice/CHANGELOG.md#v1220-2022-11-17) + * **Feature**: Adds support for Internet Protocol Version 6 (IPv6) on DMS Replication Instances +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.71.0](service/ec2/CHANGELOG.md#v1710-2022-11-17) + * **Feature**: This release adds a new optional parameter "privateIpAddress" for the CreateNatGateway API. PrivateIPAddress will allow customers to select a custom Private IPv4 address instead of having it be auto-assigned. +* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.25](service/elasticloadbalancingv2/CHANGELOG.md#v11825-2022-11-17) + * **Documentation**: Provides new target group attributes to turn on/off cross zone load balancing and configure target group health for Network Load Balancers and Application Load Balancers. Provides improvements to health check configuration for Network Load Balancers. +* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.4.0](service/emrserverless/CHANGELOG.md#v140-2022-11-17) + * **Feature**: Adds support for AWS Graviton2 based applications. You can now select CPU architecture when creating new applications or updating existing ones. +* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.1.0](service/ivschat/CHANGELOG.md#v110-2022-11-17) + * **Feature**: Adds LoggingConfiguration APIs for IVS Chat - a feature that allows customers to store and record sent messages in a chat room to S3 buckets, CloudWatch logs, or Kinesis firehose. +* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.25.0](service/lambda/CHANGELOG.md#v1250-2022-11-17) + * **Feature**: Add Node 18 (nodejs18.x) support to AWS Lambda. +* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.22.0](service/personalize/CHANGELOG.md#v1220-2022-11-17) + * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.20.0](service/polly/CHANGELOG.md#v1200-2022-11-17) + * **Feature**: Add two new neural voices - Ola (pl-PL) and Hala (ar-AE). +* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.8.0](service/rum/CHANGELOG.md#v180-2022-11-17) + * **Feature**: CloudWatch RUM now supports custom events. To use custom events, create an app monitor or update an app monitor with CustomEvent Status as ENABLED. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.26.0](service/s3control/CHANGELOG.md#v1260-2022-11-17) + * **Feature**: Added 34 new S3 Storage Lens metrics to support additional customer use cases. +* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.7](service/secretsmanager/CHANGELOG.md#v1167-2022-11-17) + * **Documentation**: Documentation updates for Secrets Manager. +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.24.0](service/securityhub/CHANGELOG.md#v1240-2022-11-17) + * **Feature**: Added SourceLayerArn and SourceLayerHash field for security findings. Updated AwsLambdaFunction Resource detail +* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.15.0](service/servicecatalogappregistry/CHANGELOG.md#v1150-2022-11-17) + * **Feature**: This release adds support for tagged resource associations, which allows you to associate a group of resources with a defined resource tag key and value to the application. +* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.17.4](service/sts/CHANGELOG.md#v1174-2022-11-17) + * **Documentation**: Documentation updates for AWS Security Token Service. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.18.0](service/textract/CHANGELOG.md#v1180-2022-11-17) + * **Feature**: This release adds support for specifying and extracting information from documents using the Signatures feature within Analyze Document API +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.27.0](service/workspaces/CHANGELOG.md#v1270-2022-11-17) + * **Feature**: The release introduces CreateStandbyWorkspaces, an API that allows you to create standby WorkSpaces associated with a primary WorkSpace in another Region. DescribeWorkspaces now includes related WorkSpaces properties. DescribeWorkspaceBundles and CreateWorkspaceBundle now return more bundle details. + +# Release (2022-11-16) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.19.1](service/batch/CHANGELOG.md#v1191-2022-11-16) + * **Documentation**: Documentation updates related to Batch on EKS +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.2.0](service/billingconductor/CHANGELOG.md#v120-2022-11-16) + * **Feature**: This release adds a new feature BillingEntity pricing rule. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.24.0](service/cloudformation/CHANGELOG.md#v1240-2022-11-16) + * **Feature**: Added UnsupportedTarget HandlerErrorCode for use with CFN Resource Hooks +* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.14.0](service/comprehendmedical/CHANGELOG.md#v1140-2022-11-16) + * **Feature**: This release supports new set of entities and traits. It also adds new category (BEHAVIORAL_ENVIRONMENTAL_SOCIAL). +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.37.0](service/connect/CHANGELOG.md#v1370-2022-11-16) + * **Feature**: This release adds a new MonitorContact API for initiating monitoring of ongoing Voice and Chat contacts. +* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.23.0](service/eks/CHANGELOG.md#v1230-2022-11-16) + * **Feature**: Adds support for customer-provided placement groups for Kubernetes control plane instances when creating local EKS clusters on Outposts +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.24.0](service/elasticache/CHANGELOG.md#v1240-2022-11-16) + * **Feature**: for Redis now supports AWS Identity and Access Management authentication access to Redis clusters starting with redis-engine version 7.0 +* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.8.0](service/iottwinmaker/CHANGELOG.md#v180-2022-11-16) + * **Feature**: This release adds the following: 1) ExecuteQuery API allows users to query their AWS IoT TwinMaker Knowledge Graph 2) Pricing plan APIs allow users to configure and manage their pricing mode 3) Support for property groups and tabular property values in existing AWS IoT TwinMaker APIs. +* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.12.0](service/personalizeevents/CHANGELOG.md#v1120-2022-11-16) + * **Feature**: This release provides support for creation and use of metric attributions in AWS Personalize +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.17.0](service/proton/CHANGELOG.md#v1170-2022-11-16) + * **Feature**: Add support for sorting and filtering in ListServiceInstances +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.30.0](service/rds/CHANGELOG.md#v1300-2022-11-16) + * **Feature**: This release adds support for container databases (CDBs) to Amazon RDS Custom for Oracle. A CDB contains one PDB at creation. You can add more PDBs using Oracle SQL. You can also customize your database installation by setting the Oracle base, Oracle home, and the OS user name and group. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.33.0](service/ssm/CHANGELOG.md#v1330-2022-11-16) + * **Feature**: This release adds support for cross account access in CreateOpsItem, UpdateOpsItem and GetOpsItem. It introduces new APIs to setup resource policies for SSM resources: PutResourcePolicy, GetResourcePolicies and DeleteResourcePolicy. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.19.0](service/ssmincidents/CHANGELOG.md#v1190-2022-11-16) + * **Feature**: Add support for PagerDuty integrations on ResponsePlan, IncidentRecord, and RelatedItem APIs +* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.24.0](service/transfer/CHANGELOG.md#v1240-2022-11-16) + * **Feature**: Allow additional operations to throw ThrottlingException +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.15.0](service/xray/CHANGELOG.md#v1150-2022-11-16) + * **Feature**: This release adds new APIs - PutResourcePolicy, DeleteResourcePolicy, ListResourcePolicies for supporting resource based policies for AWS X-Ray. + +# Release (2022-11-15) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.36.0](service/connect/CHANGELOG.md#v1360-2022-11-15) + * **Feature**: This release updates the APIs: UpdateInstanceAttribute, DescribeInstanceAttribute, and ListInstanceAttributes. You can use it to programmatically enable/disable enhanced contact monitoring using attribute type ENHANCED_CONTACT_MONITORING on the specified Amazon Connect instance. +* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.20.0](service/greengrassv2/CHANGELOG.md#v1200-2022-11-15) + * **Feature**: Adds new parent target ARN paramater to CreateDeployment, GetDeployment, and ListDeployments APIs for the new subdeployments feature. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.24.0](service/route53/CHANGELOG.md#v1240-2022-11-15) + * **Feature**: Amazon Route 53 now supports the Europe (Spain) Region (eu-south-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. +* `github.com/aws/aws-sdk-go-v2/service/ssmsap`: [v1.0.0](service/ssmsap/CHANGELOG.md#v100-2022-11-15) + * **Release**: New AWS service client module + * **Feature**: AWS Systems Manager for SAP provides simplified operations and management of SAP applications such as SAP HANA. With this release, SAP customers and partners can automate and simplify their SAP system administration tasks such as backup/restore of SAP HANA. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.26.0](service/workspaces/CHANGELOG.md#v1260-2022-11-15) + * **Feature**: This release introduces ModifyCertificateBasedAuthProperties, a new API that allows control of certificate-based auth properties associated with a WorkSpaces directory. The DescribeWorkspaceDirectories API will now additionally return certificate-based auth properties in its responses. + +# Release (2022-11-14) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.20.0](service/customerprofiles/CHANGELOG.md#v1200-2022-11-14) + * **Feature**: This release enhances the SearchProfiles API by providing functionality to search for profiles using multiple keys and logical operators. +* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.18.0](service/lakeformation/CHANGELOG.md#v1180-2022-11-14) + * **Feature**: This release adds a new parameter "Parameters" in the DataLakeSettings. +* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.13.3](service/managedblockchain/CHANGELOG.md#v1133-2022-11-14) + * **Documentation**: Updating the API docs data type: NetworkEthereumAttributes, and the operations DeleteNode, and CreateNode to also include the supported Goerli network. +* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.16.0](service/proton/CHANGELOG.md#v1160-2022-11-14) + * **Feature**: Add support for CodeBuild Provisioning +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.29.0](service/rds/CHANGELOG.md#v1290-2022-11-14) + * **Feature**: This release adds support for restoring an RDS Multi-AZ DB cluster snapshot to a Single-AZ deployment or a Multi-AZ DB instance deployment. +* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.12.0](service/workdocs/CHANGELOG.md#v1120-2022-11-14) + * **Feature**: Added 2 new document related operations, DeleteDocumentVersion and RestoreDocumentVersions. +* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.14.0](service/xray/CHANGELOG.md#v1140-2022-11-14) + * **Feature**: This release enhances GetServiceGraph API to support new type of edge to represent links between SQS and Lambda in event-driven applications. + +# Release (2022-11-11) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/config`: [v1.18.0](config/CHANGELOG.md#v1180-2022-11-11) + * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 + * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.13.0](credentials/CHANGELOG.md#v1130-2022-11-11) + * **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846 + * **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.1](service/glue/CHANGELOG.md#v1341-2022-11-11) + * **Documentation**: Added links related to enabling job bookmarks. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.31.0](service/iot/CHANGELOG.md#v1310-2022-11-11) + * **Feature**: This release add new api listRelatedResourcesForAuditFinding and new member type IssuerCertificates for Iot device device defender Audit. +* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.16.0](service/licensemanager/CHANGELOG.md#v1160-2022-11-11) + * **Feature**: AWS License Manager now supports onboarded Management Accounts or Delegated Admins to view granted licenses aggregated from all accounts in the organization. +* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.14.0](service/marketplacecatalog/CHANGELOG.md#v1140-2022-11-11) + * **Feature**: Added three new APIs to support tagging and tag-based authorization: TagResource, UntagResource, and ListTagsForResource. Added optional parameters to the StartChangeSet API to support tagging a resource while making a request to create it. +* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.21.0](service/rekognition/CHANGELOG.md#v1210-2022-11-11) + * **Feature**: Adding support for ImageProperties feature to detect dominant colors and image brightness, sharpness, and contrast, inclusion and exclusion filters for labels and label categories, new fields to the API response, "aliases" and "categories" +* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.23.8](service/securityhub/CHANGELOG.md#v1238-2022-11-11) + * **Documentation**: Documentation updates for Security Hub +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.18.0](service/ssmincidents/CHANGELOG.md#v1180-2022-11-11) + * **Feature**: RelatedItems now have an ID field which can be used for referencing them else where. Introducing event references in TimelineEvent API and increasing maximum length of "eventData" to 12K characters. + +# Release (2022-11-10) + +## General Highlights +* **Dependency Update**: Updated to the latest SDK module versions + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.1](service/autoscaling/CHANGELOG.md#v1241-2022-11-10) + * **Documentation**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.70.0](service/ec2/CHANGELOG.md#v1700-2022-11-10) + * **Feature**: This release adds a new price capacity optimized allocation strategy for Spot Instances to help customers optimize provisioning of Spot Instances via EC2 Auto Scaling, EC2 Fleet, and Spot Fleet. It allocates Spot Instances based on both spare capacity availability and Spot Instance price. +* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.19.0](service/ecs/CHANGELOG.md#v1190-2022-11-10) + * **Feature**: This release adds support for task scale-in protection with updateTaskProtection and getTaskProtection APIs. UpdateTaskProtection API can be used to protect a service managed task from being terminated by scale-in events and getTaskProtection API to get the scale-in protection status of a task. +* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.17.0](service/elasticsearchservice/CHANGELOG.md#v1170-2022-11-10) + * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.1](service/resourceexplorer2/CHANGELOG.md#v101-2022-11-10) + * **Documentation**: Text only updates to some Resource Explorer descriptions. +* `github.com/aws/aws-sdk-go-v2/service/scheduler`: [v1.0.0](service/scheduler/CHANGELOG.md#v100-2022-11-10) + * **Release**: New AWS service client module + * **Feature**: AWS introduces the new Amazon EventBridge Scheduler. EventBridge Scheduler is a serverless scheduler that allows you to create, run, and manage tasks from one central, managed service. + +# Release (2022-11-09) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.35.0](service/connect/CHANGELOG.md#v1350-2022-11-09) + * **Feature**: This release adds new fields SignInUrl, UserArn, and UserId to GetFederationToken response payload. +* `github.com/aws/aws-sdk-go-v2/service/connectcases`: [v1.1.0](service/connectcases/CHANGELOG.md#v110-2022-11-09) + * **Feature**: This release adds the ability to disable templates through the UpdateTemplate API. Disabling templates prevents customers from creating cases using the template. For more information see https://docs.aws.amazon.com/cases/latest/APIReference/Welcome.html +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.69.0](service/ec2/CHANGELOG.md#v1690-2022-11-09) + * **Feature**: Amazon EC2 Trn1 instances, powered by AWS Trainium chips, are purpose built for high-performance deep learning training. u-24tb1.112xlarge and u-18tb1.112xlarge High Memory instances are purpose-built to run large in-memory databases. +* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.14.0](service/groundstation/CHANGELOG.md#v1140-2022-11-09) + * **Feature**: This release adds the preview of customer-provided ephemeris support for AWS Ground Station, allowing space vehicle owners to provide their own position and trajectory information for a satellite. +* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.19.0](service/mediapackagevod/CHANGELOG.md#v1190-2022-11-09) + * **Feature**: This release adds "IncludeIframeOnlyStream" for Dash endpoints. +* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.7.0](service/transcribestreaming/CHANGELOG.md#v170-2022-11-09) + * **Feature**: This will release hi-IN and th-TH + +# Release (2022-11-08) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.16.0](service/acm/CHANGELOG.md#v1160-2022-11-08) + * **Feature**: Support added for requesting elliptic curve certificate key algorithm types P-256 (EC_prime256v1) and P-384 (EC_secp384r1). +* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.1.0](service/billingconductor/CHANGELOG.md#v110-2022-11-08) + * **Feature**: This release adds the Recurring Custom Line Item feature along with a new API ListCustomLineItemVersions. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.68.0](service/ec2/CHANGELOG.md#v1680-2022-11-08) + * **Feature**: This release enables sharing of EC2 Placement Groups across accounts and within AWS Organizations using Resource Access Manager +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.20.0](service/fms/CHANGELOG.md#v1200-2022-11-08) + * **Feature**: AWS Firewall Manager now supports importing existing AWS Network Firewall firewalls into Firewall Manager policies. +* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.24.0](service/lightsail/CHANGELOG.md#v1240-2022-11-08) + * **Feature**: This release adds support for Amazon Lightsail to automate the delegation of domains registered through Amazon Route 53 to Lightsail DNS management and to automate record creation for DNS validation of Lightsail SSL/TLS certificates. +* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.11.0](service/opensearch/CHANGELOG.md#v1110-2022-11-08) + * **Feature**: Amazon OpenSearch Service now offers managed VPC endpoints to connect to your Amazon OpenSearch Service VPC-enabled domain in a Virtual Private Cloud (VPC). This feature allows you to privately access OpenSearch Service domain without using public IPs or requiring traffic to traverse the Internet. +* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.19.0](service/polly/CHANGELOG.md#v1190-2022-11-08) + * **Feature**: Amazon Polly adds new voices: Elin (sv-SE), Ida (nb-NO), Laura (nl-NL) and Suvi (fi-FI). They are available as neural voices only. +* `github.com/aws/aws-sdk-go-v2/service/resourceexplorer2`: [v1.0.0](service/resourceexplorer2/CHANGELOG.md#v100-2022-11-08) + * **Release**: New AWS service client module + * **Feature**: This is the initial SDK release for AWS Resource Explorer. AWS Resource Explorer lets your users search for and discover your AWS resources across the AWS Regions in your account. +* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.23.0](service/route53/CHANGELOG.md#v1230-2022-11-08) + * **Feature**: Amazon Route 53 now supports the Europe (Zurich) Region (eu-central-2) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region. + +# Release (2022-11-07) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.19.0](service/athena/CHANGELOG.md#v1190-2022-11-07) + * **Feature**: Adds support for using Query Result Reuse +* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.24.0](service/autoscaling/CHANGELOG.md#v1240-2022-11-07) + * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes. +* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.20.0](service/cloudtrail/CHANGELOG.md#v1200-2022-11-07) + * **Feature**: This release includes support for configuring a delegated administrator to manage an AWS Organizations organization CloudTrail trails and event data stores, and AWS Key Management Service encryption of CloudTrail Lake event data stores. +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.67.0](service/ec2/CHANGELOG.md#v1670-2022-11-07) + * **Feature**: This release adds support for two new attributes for attribute-based instance type selection - NetworkBandwidthGbps and AllowedInstanceTypes. +* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.23.0](service/elasticache/CHANGELOG.md#v1230-2022-11-07) + * **Feature**: Added support for IPv6 and dual stack for Memcached and Redis clusters. Customers can now launch new Redis and Memcached clusters with IPv6 and dual stack networking support. +* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.26.0](service/lexmodelsv2/CHANGELOG.md#v1260-2022-11-07) + * **Feature**: Amazon Lex now supports new APIs for viewing and editing Custom Vocabulary in bots. +* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.27.0](service/mediaconvert/CHANGELOG.md#v1270-2022-11-07) + * **Feature**: The AWS Elemental MediaConvert SDK has added support for setting the SDR reference white point for HDR conversions and conversion of HDR10 to DolbyVision without mastering metadata. +* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.32.0](service/ssm/CHANGELOG.md#v1320-2022-11-07) + * **Feature**: This release includes support for applying a CloudWatch alarm to multi account multi region Systems Manager Automation +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.1](service/wafv2/CHANGELOG.md#v1231-2022-11-07) + * **Documentation**: The geo match statement now adds labels for country and region. You can match requests at the region level by combining a geo match statement with label match statements. +* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.17.0](service/wellarchitected/CHANGELOG.md#v1170-2022-11-07) + * **Feature**: This release adds support for integrations with AWS Trusted Advisor and AWS Service Catalog AppRegistry to improve workload discovery and speed up your workload reviews. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.25.0](service/workspaces/CHANGELOG.md#v1250-2022-11-07) + * **Feature**: This release adds protocols attribute to workspaces properties data type. This enables customers to migrate workspaces from PC over IP (PCoIP) to WorkSpaces Streaming Protocol (WSP) using create and modify workspaces public APIs. + +# Release (2022-11-04) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.1](service/cloudwatchlogs/CHANGELOG.md#v1161-2022-11-04) + * **Documentation**: Doc-only update for bug fixes and support of export to buckets encrypted with SSE-KMS +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.66.0](service/ec2/CHANGELOG.md#v1660-2022-11-04) + * **Feature**: This release adds API support for the recipient of an AMI account share to remove shared AMI launch permissions. +* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.15.0](service/emrcontainers/CHANGELOG.md#v1150-2022-11-04) + * **Feature**: Adding support for Job templates. Job templates allow you to create and store templates to configure Spark applications parameters. This helps you ensure consistent settings across applications by reusing and enforcing configuration overrides in data pipelines. +* `github.com/aws/aws-sdk-go-v2/service/internal/eventstreamtesting`: [v1.0.37](service/internal/eventstreamtesting/CHANGELOG.md#v1037-2022-11-04) + * **Dependency Update**: update golang.org/x/net dependency to 0.1.0 + +# Release (2022-11-03) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.10.0](service/memorydb/CHANGELOG.md#v1100-2022-11-03) + * **Feature**: Adding support for r6gd instances for MemoryDB Redis with data tiering. In a cluster with data tiering enabled, when available memory capacity is exhausted, the least recently used data is automatically tiered to solid state drives for cost-effective capacity scaling with minimal performance impact. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.54.0](service/sagemaker/CHANGELOG.md#v1540-2022-11-03) + * **Feature**: Amazon SageMaker now supports running training jobs on ml.trn1 instance types. + +# Release (2022-11-02) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.26.0](service/iotsitewise/CHANGELOG.md#v1260-2022-11-02) + * **Feature**: This release adds the ListAssetModelProperties and ListAssetProperties APIs. You can list all properties that belong to a single asset model or asset using these two new APIs. +* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.25.0](service/s3control/CHANGELOG.md#v1250-2022-11-02) + * **Feature**: S3 on Outposts launches support for Lifecycle configuration for Outposts buckets. With S3 Lifecycle configuration, you can mange objects so they are stored cost effectively. You can manage objects using size-based rules and specify how many noncurrent versions bucket will retain. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.53.0](service/sagemaker/CHANGELOG.md#v1530-2022-11-02) + * **Feature**: This release updates Framework model regex for ModelPackage to support new Framework version xgboost, sklearn. +* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.17.0](service/ssmincidents/CHANGELOG.md#v1170-2022-11-02) + * **Feature**: Adds support for tagging replication-set on creation. + +# Release (2022-11-01) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.28.0](service/rds/CHANGELOG.md#v1280-2022-11-01) + * **Feature**: Relational Database Service - This release adds support for configuring Storage Throughput on RDS database instances. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.17.0](service/textract/CHANGELOG.md#v1170-2022-11-01) + * **Feature**: Add ocr results in AnalyzeIDResponse as blocks + +# Release (2022-10-31) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.15.0](service/apprunner/CHANGELOG.md#v1150-2022-10-31) + * **Feature**: This release adds support for private App Runner services. Services may now be configured to be made private and only accessible from a VPC. The changes include a new VpcIngressConnection resource and several new and modified APIs. +* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.16.0](service/cloudwatchlogs/CHANGELOG.md#v1160-2022-10-31) + * **Feature**: SDK release to support tagging for destinations and log groups with TagResource. Also supports tag on create with PutDestination. +* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.34.0](service/connect/CHANGELOG.md#v1340-2022-10-31) + * **Feature**: Amazon connect now support a new API DismissUserContact to dismiss or remove terminated contacts in Agent CCP +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.65.0](service/ec2/CHANGELOG.md#v1650-2022-10-31) + * **Feature**: Elastic IP transfer is a new Amazon VPC feature that allows you to transfer your Elastic IP addresses from one AWS Account to another. +* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.30.0](service/iot/CHANGELOG.md#v1300-2022-10-31) + * **Feature**: This release adds the Amazon Location action to IoT Rules Engine. +* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.15.0](service/sesv2/CHANGELOG.md#v1150-2022-10-31) + * **Feature**: This release includes support for interacting with the Virtual Deliverability Manager, allowing you to opt in/out of the feature and to retrieve recommendations and metric data. +* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.16.0](service/textract/CHANGELOG.md#v1160-2022-10-31) + * **Feature**: This release introduces additional support for 30+ normalized fields such as vendor address and currency. It also includes OCR output in the response and accuracy improvements for the already supported fields in previous version + +# Release (2022-10-28) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.14.0](service/apprunner/CHANGELOG.md#v1140-2022-10-28) + * **Feature**: AWS App Runner adds .NET 6, Go 1, PHP 8.1 and Ruby 3.1 runtimes. +* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.18.0](service/appstream/CHANGELOG.md#v1180-2022-10-28) + * **Feature**: This release includes CertificateBasedAuthProperties in CreateDirectoryConfig and UpdateDirectoryConfig. +* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.16.20](service/cloud9/CHANGELOG.md#v11620-2022-10-28) + * **Documentation**: Update to the documentation section of the Cloud9 API Reference guide. +* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.23.0](service/cloudformation/CHANGELOG.md#v1230-2022-10-28) + * **Feature**: This release adds more fields to improves visibility of AWS CloudFormation StackSets information in following APIs: ListStackInstances, DescribeStackInstance, ListStackSetOperationResults, ListStackSetOperations, DescribeStackSetOperation. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.19.0](service/mediatailor/CHANGELOG.md#v1190-2022-10-28) + * **Feature**: This release introduces support for SCTE-35 segmentation descriptor messages which can be sent within time signal messages. + +# Release (2022-10-27) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.64.0](service/ec2/CHANGELOG.md#v1640-2022-10-27) + * **Feature**: Feature supports the replacement of instance root volume using an updated AMI without requiring customers to stop their instance. +* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.19.0](service/fms/CHANGELOG.md#v1190-2022-10-27) + * **Feature**: Add support NetworkFirewall Managed Rule Group Override flag in GetViolationDetails API +* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.34.0](service/glue/CHANGELOG.md#v1340-2022-10-27) + * **Feature**: Added support for custom datatypes when using custom csv classifier. +* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.13](service/redshift/CHANGELOG.md#v12613-2022-10-27) + * **Documentation**: This release clarifies use for the ElasticIp parameter of the CreateCluster and RestoreFromClusterSnapshot APIs. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.52.0](service/sagemaker/CHANGELOG.md#v1520-2022-10-27) + * **Feature**: This change allows customers to provide a custom entrypoint script for the docker container to be run while executing training jobs, and provide custom arguments to the entrypoint script. +* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.23.0](service/wafv2/CHANGELOG.md#v1230-2022-10-27) + * **Feature**: This release adds the following: Challenge rule action, to silently verify client browsers; rule group rule action override to any valid rule action, not just Count; token sharing between protected applications for challenge/CAPTCHA token; targeted rules option for Bot Control managed rule group. + +# Release (2022-10-26) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.23](service/iam/CHANGELOG.md#v11823-2022-10-26) + * **Documentation**: Doc only update that corrects instances of CLI not using an entity. +* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.18.0](service/kafka/CHANGELOG.md#v1180-2022-10-26) + * **Feature**: This release adds support for Tiered Storage. UpdateStorage allows you to control the Storage Mode for supported storage tiers. +* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.18.0](service/neptune/CHANGELOG.md#v1180-2022-10-26) + * **Feature**: Added a new cluster-level attribute to set the capacity range for Neptune Serverless instances. +* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.51.0](service/sagemaker/CHANGELOG.md#v1510-2022-10-26) + * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying Grid Search strategy for tuning jobs, which evaluates all hyperparameter combinations exhaustively based on the categorical hyperparameters provided. + +# Release (2022-10-25) + +## Module Highlights +* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.17.0](service/accessanalyzer/CHANGELOG.md#v1170-2022-10-25) + * **Feature**: This release adds support for six new resource types in IAM Access Analyzer to help you easily identify public and cross-account access to your AWS resources. Updated service API, documentation, and paginators. +* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.19.3](service/location/CHANGELOG.md#v1193-2022-10-25) + * **Documentation**: Added new map styles with satellite imagery for map resources using HERE as a data provider. +* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.18.0](service/mediatailor/CHANGELOG.md#v1180-2022-10-25) + * **Feature**: This release is a documentation update +* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.27.0](service/rds/CHANGELOG.md#v1270-2022-10-25) + * **Feature**: Relational Database Service - This release adds support for exporting DB cluster data to Amazon S3. +* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.24.0](service/workspaces/CHANGELOG.md#v1240-2022-10-25) + * **Feature**: This release adds new enums for supporting Workspaces Core features, including creating Manual running mode workspaces, importing regular Workspaces Core images and importing g4dn Workspaces Core images. + # Release (2022-10-24) ## General Highlights diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile index 4b761e771a..4bc9dfaf01 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/Makefile +++ b/vendor/github.com/aws/aws-sdk-go-v2/Makefile @@ -120,6 +120,7 @@ gen-config-asserts: gen-internal-codegen: @echo "Generating internal/codegen" cd internal/codegen \ + && go mod tidy \ && go generate gen-repo-mod-replace: diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go index 41d23512a4..6d936cd505 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go @@ -3,4 +3,4 @@ package aws // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.1" +const goModuleVersion = "1.17.2" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md index d81093cad4..c95d493ea8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.4.10 (2022-12-02) + +* No change notes available for this release. + # v1.4.9 (2022-10-24) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go index 35adfcc20c..0ca5492a3e 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/go_module_metadata.go @@ -3,4 +3,4 @@ package eventstream // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.4.9" +const goModuleVersion = "1.4.10" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md index 0386bcf7f4..e02d957c4a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.18.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.18.3 (2022-11-22) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go index 1a1aaed58a..44b6e16dcd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go @@ -3,4 +3,4 @@ package config // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.18.3" +const goModuleVersion = "1.18.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md index 953ce67f3c..613d814926 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.13.4 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.3 (2022-11-22) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go index 0bcacb3963..9866ca36f8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go @@ -3,4 +3,4 @@ package credentials // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.3" +const goModuleVersion = "1.13.4" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md index 0dfb44be1a..f0ab4cd76d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.12.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.12.19 (2022-10-24) * **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go index 9fc713a7cb..4da2bd2c18 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go @@ -3,4 +3,4 @@ package imds // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.12.19" +const goModuleVersion = "1.12.20" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md index 1602d22925..9f446c501c 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.11.43 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.42 (2022-11-22) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go index e481cd689d..475e01773b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/s3/manager/go_module_metadata.go @@ -3,4 +3,4 @@ package manager // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.42" +const goModuleVersion = "1.11.43" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md index ab6184058b..41d589b381 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.1.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.25 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go index b9d5ca7fae..58b3ba7ad8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go @@ -3,4 +3,4 @@ package configsources // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.25" +const goModuleVersion = "1.1.26" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md index 90e3d662d0..678f6634f2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md @@ -1,3 +1,7 @@ +# v2.4.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v2.4.19 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go index d839c6d9b6..ec010e0aae 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go @@ -3,4 +3,4 @@ package endpoints // goModuleVersion is the tagged release for this module -const goModuleVersion = "2.4.19" +const goModuleVersion = "2.4.20" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md index 2cac3297b3..fc5b9781b5 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.3.27 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.3.26 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go index 6d796b3100..e4c947fecc 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go @@ -3,4 +3,4 @@ package ini // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.3.26" +const goModuleVersion = "1.3.27" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md index cc8edf2eb5..bc55796348 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.0.17 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.0.16 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go index 2b1401a3fd..be1f79e20f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/v4a/go_module_metadata.go @@ -3,4 +3,4 @@ package v4a // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.0.16" +const goModuleVersion = "1.0.17" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml index d869782145..b6d07cdd6d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml +++ b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml @@ -1,10 +1,10 @@ [dependencies] "github.com/aws/aws-sdk-go" = "v1.44.28" - "github.com/aws/smithy-go" = "v1.13.4" + "github.com/aws/smithy-go" = "v1.13.5" "github.com/google/go-cmp" = "v0.5.8" "github.com/jmespath/go-jmespath" = "v0.4.0" - "golang.org/x/net" = "v0.0.0-20220127200216-cd36cc0744dd" + "golang.org/x/net" = "v0.1.0" [modules] diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md index a92035e29b..b3998b28b2 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.9.11 (2022-12-02) + +* No change notes available for this release. + # v1.9.10 (2022-10-24) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go index 036a0c08e5..f49fa9218d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go @@ -3,4 +3,4 @@ package acceptencoding // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.10" +const goModuleVersion = "1.9.11" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md index 531f193805..27d70fe1fd 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.1.21 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.1.20 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go index 0cf97a5652..c923037772 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/checksum/go_module_metadata.go @@ -3,4 +3,4 @@ package checksum // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.1.20" +const goModuleVersion = "1.1.21" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md index 89832ca1d0..a2dfc457c1 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.9.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.9.19 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go index c10027df60..3b99e9c4f6 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go @@ -3,4 +3,4 @@ package presignedurl // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.9.19" +const goModuleVersion = "1.9.20" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md index 782b3a3adc..5a91105868 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.13.20 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.19 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go index f0495ea2eb..b6e0f39a15 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/s3shared/go_module_metadata.go @@ -3,4 +3,4 @@ package s3shared // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.19" +const goModuleVersion = "1.13.20" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md index 3fd73499d4..7ba549c467 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.29.5 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.29.4 (2022-11-22) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go index d2204ecd02..0dbd3f1be8 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/s3/go_module_metadata.go @@ -3,4 +3,4 @@ package s3 // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.29.4" +const goModuleVersion = "1.29.5" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md index 2f8860d2f2..49b4e31d6b 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.11.26 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.11.25 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go index e2de3ea315..cbfe45ee1a 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go @@ -3,4 +3,4 @@ package sso // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.11.25" +const goModuleVersion = "1.11.26" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md index 4245e8d9fd..b3b019177d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.13.9 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.13.8 (2022-10-24) * **Dependency Update**: Updated to the latest SDK module versions diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go index 9c79d16f41..a5a50c97fa 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go @@ -3,4 +3,4 @@ package ssooidc // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.8" +const goModuleVersion = "1.13.9" diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md index 6255c0bc5d..106016915f 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md @@ -1,3 +1,7 @@ +# v1.17.6 (2022-12-02) + +* **Dependency Update**: Updated to the latest SDK module versions + # v1.17.5 (2022-11-22) * No change notes available for this release. diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go index 9e6b85cc41..ae6f9e766d 100644 --- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go +++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go @@ -3,4 +3,4 @@ package sts // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.17.5" +const goModuleVersion = "1.17.6" diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 0e53126722..76cba0656e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -1456,6 +1456,26 @@ var awsPartition = partition{ }, }, }, + "api.ecr-public": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "api.ecr-public.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "api.ecr-public.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, "api.elastic-inference": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -4940,6 +4960,17 @@ var awsPartition = partition{ }, }, }, + "codecatalyst": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "codecatalyst.global.api.aws", + }, + }, + }, "codecommit": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -12088,22 +12119,6 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, - endpointKey{ - Region: "dataplane-ap-south-1", - }: endpoint{ - Hostname: "greengrass-ats.iot.ap-south-1.amazonaws.com", - CredentialScope: credentialScope{ - Region: "ap-south-1", - }, - }, - endpointKey{ - Region: "dataplane-us-east-2", - }: endpoint{ - Hostname: "greengrass-ats.iot.us-east-2.amazonaws.com", - CredentialScope: credentialScope{ - Region: "us-east-2", - }, - }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18164,6 +18179,79 @@ var awsPartition = partition{ }, }, }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-east-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{}, + endpointKey{ + Region: "ap-south-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, + endpointKey{ + Region: "ca-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-central-1", + }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-1", + }: endpoint{}, + endpointKey{ + Region: "eu-west-2", + }: endpoint{}, + endpointKey{ + Region: "eu-west-3", + }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, + endpointKey{ + Region: "me-south-1", + }: endpoint{}, + endpointKey{ + Region: "sa-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-east-2", + }: endpoint{}, + endpointKey{ + Region: "us-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -21690,6 +21778,13 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "sagemaker-geospatial": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-west-2", + }: endpoint{}, + }, + }, "savingsplans": service{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, @@ -28147,14 +28242,6 @@ var awscnPartition = partition{ endpointKey{ Region: "cn-north-1", }: endpoint{}, - endpointKey{ - Region: "dataplane-cn-north-1", - }: endpoint{ - Hostname: "greengrass.ats.iot.cn-north-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-north-1", - }, - }, }, }, "guardduty": service{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 41fe3cf0e3..898da8d0bf 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.44.149" +const SDKVersion = "1.44.152" diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md index 41bbcfac3a..1e23bf95b3 100644 --- a/vendor/github.com/aws/smithy-go/CHANGELOG.md +++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md @@ -1,3 +1,7 @@ +# Release (2022-12-02) + +* No change notes available for this release. + # Release (2022-10-24) ## Module Highlights diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md index 789b378896..a4bb43fbe9 100644 --- a/vendor/github.com/aws/smithy-go/README.md +++ b/vendor/github.com/aws/smithy-go/README.md @@ -2,7 +2,7 @@ [![Go Build Status](https://github.com/aws/smithy-go/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/go.yml)[![Codegen Build Status](https://github.com/aws/smithy-go/actions/workflows/codegen.yml/badge.svg?branch=main)](https://github.com/aws/smithy-go/actions/workflows/codegen.yml) -Smithy code generators for Go. +[Smithy](https://smithy.io/) code generators for Go. **WARNING: All interfaces are subject to change.** diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go index d6e1e41e16..f9200093e8 100644 --- a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go +++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go @@ -2,7 +2,7 @@ Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to shape serializer function in which a xml.Value will be passed around. -Resources followed: https://awslabs.github.io/smithy/1.0/spec/core/xml-traits.html# +Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings Member Element diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go index 4ed5881885..8eaac41e7a 100644 --- a/vendor/github.com/aws/smithy-go/go_module_metadata.go +++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go @@ -3,4 +3,4 @@ package smithy // goModuleVersion is the tagged release for this module -const goModuleVersion = "1.13.4" +const goModuleVersion = "1.13.5" diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60b3..8bf0e5b781 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 0000000000..94b9c44398 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d541..a9e0d45c9d 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf79..3e8b132579 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) + blockLoop() - CMPQ SI, BX - JLE blockLoop + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX + ADDQ n, h - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX + ADDQ $24, end + CMPQ p, end + JG try4 - CMPQ SI, BX - JG fourByte +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 + CMPQ p, end + JLE loop8 - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX +try4: + ADDQ $4, end + CMPQ p, end + JG try1 - CMPQ SI, BX - JLE wordLoop + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h - MOVQ AX, ret+24(FP) + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 0000000000..7e3145a221 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807f4..9216e0a40c 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a821603..26df13bba4 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a31..e86f1b5fd8 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2e4..1c1638fd88 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go deleted file mode 100644 index 16686a6552..0000000000 --- a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/empty/empty.proto - -package empty - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/empty.proto. - -type Empty = emptypb.Empty - -var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() } -func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() { - if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File - file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil -} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go index 6e326888af..e57b96db90 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/histogram.go @@ -624,9 +624,9 @@ func (it *histogramIterator) Err() error { } func (it *histogramIterator) Reset(b []byte) { - // The first 2 bytes contain chunk headers. + // The first 3 bytes contain chunk headers. // We skip that for actual samples. - it.br = newBReader(b[2:]) + it.br = newBReader(b[3:]) it.numTotal = binary.BigEndian.Uint16(b) it.numRead = 0 diff --git a/vendor/github.com/urfave/cli/v2/app.go b/vendor/github.com/urfave/cli/v2/app.go index e7f79c5130..10198f4332 100644 --- a/vendor/github.com/urfave/cli/v2/app.go +++ b/vendor/github.com/urfave/cli/v2/app.go @@ -107,6 +107,8 @@ type App struct { CustomAppHelpTemplate string // SliceFlagSeparator is used to customize the separator for SliceFlag, the default is "," SliceFlagSeparator string + // DisableSliceFlagSeparator is used to disable SliceFlagSeparator, the default is false + DisableSliceFlagSeparator bool // Boolean to enable short-option handling so user can combine several // single-character bool arguments into one // i.e. foobar -o -v -> foobar -ov @@ -264,6 +266,8 @@ func (a *App) Setup() { if len(a.SliceFlagSeparator) != 0 { defaultSliceFlagSeparator = a.SliceFlagSeparator } + + disableSliceFlagSeparator = a.DisableSliceFlagSeparator } func (a *App) newRootCommand() *Command { diff --git a/vendor/github.com/urfave/cli/v2/command.go b/vendor/github.com/urfave/cli/v2/command.go index c5939d4ec8..b8a944d641 100644 --- a/vendor/github.com/urfave/cli/v2/command.go +++ b/vendor/github.com/urfave/cli/v2/command.go @@ -203,7 +203,7 @@ func (c *Command) Run(cCtx *Context, arguments ...string) (err error) { cerr := cCtx.checkRequiredFlags(c.Flags) if cerr != nil { - _ = ShowSubcommandHelp(cCtx) + _ = helpCommand.Action(cCtx) return cerr } diff --git a/vendor/github.com/urfave/cli/v2/flag.go b/vendor/github.com/urfave/cli/v2/flag.go index b66a75da5e..5c0a8b7328 100644 --- a/vendor/github.com/urfave/cli/v2/flag.go +++ b/vendor/github.com/urfave/cli/v2/flag.go @@ -15,7 +15,10 @@ import ( const defaultPlaceholder = "value" -var defaultSliceFlagSeparator = "," +var ( + defaultSliceFlagSeparator = "," + disableSliceFlagSeparator = false +) var ( slPfx = fmt.Sprintf("sl:::%d:::", time.Now().UTC().UnixNano()) @@ -380,5 +383,9 @@ func flagFromEnvOrFile(envVars []string, filePath string) (value string, fromWhe } func flagSplitMultiValues(val string) []string { + if disableSliceFlagSeparator { + return []string{val} + } + return strings.Split(val, defaultSliceFlagSeparator) } diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt index b8dbf6ad0a..6afd244f25 100644 --- a/vendor/github.com/urfave/cli/v2/godoc-current.txt +++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt @@ -318,6 +318,8 @@ type App struct { CustomAppHelpTemplate string // SliceFlagSeparator is used to customize the separator for SliceFlag, the default is "," SliceFlagSeparator string + // DisableSliceFlagSeparator is used to disable SliceFlagSeparator, the default is false + DisableSliceFlagSeparator bool // Boolean to enable short-option handling so user can combine several // single-character bool arguments into one // i.e. foobar -o -v -> foobar -ov diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 7a6ba43a7e..a49853e9d3 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -367,6 +367,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode //sys IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible //sys GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo +//sys GetLargePageMinimum() (size uintptr) // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 96ba8559c3..ac60052e44 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -252,6 +252,7 @@ var ( procGetFileType = modkernel32.NewProc("GetFileType") procGetFinalPathNameByHandleW = modkernel32.NewProc("GetFinalPathNameByHandleW") procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLargePageMinimum = modkernel32.NewProc("GetLargePageMinimum") procGetLastError = modkernel32.NewProc("GetLastError") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") @@ -2180,6 +2181,12 @@ func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) ( return } +func GetLargePageMinimum() (size uintptr) { + r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0) + size = uintptr(r0) + return +} + func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { diff --git a/vendor/golang.org/x/text/unicode/bidi/trieval.go b/vendor/golang.org/x/text/unicode/bidi/trieval.go index 4c459c4b72..6a796e2214 100644 --- a/vendor/golang.org/x/text/unicode/bidi/trieval.go +++ b/vendor/golang.org/x/text/unicode/bidi/trieval.go @@ -37,18 +37,6 @@ const ( unknownClass = ^Class(0) ) -var controlToClass = map[rune]Class{ - 0x202D: LRO, // LeftToRightOverride, - 0x202E: RLO, // RightToLeftOverride, - 0x202A: LRE, // LeftToRightEmbedding, - 0x202B: RLE, // RightToLeftEmbedding, - 0x202C: PDF, // PopDirectionalFormat, - 0x2066: LRI, // LeftToRightIsolate, - 0x2067: RLI, // RightToLeftIsolate, - 0x2068: FSI, // FirstStrongIsolate, - 0x2069: PDI, // PopDirectionalIsolate, -} - // A trie entry has the following bits: // 7..5 XOR mask for brackets // 4 1: Bracket open, 0: Bracket close diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f7c29f156..f0e0cf3cb1 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -83,7 +83,7 @@ func (lim *Limiter) Burst() int { // TokensAt returns the number of tokens available at time t. func (lim *Limiter) TokensAt(t time.Time) float64 { lim.mu.Lock() - _, _, tokens := lim.advance(t) // does not mutute lim + _, tokens := lim.advance(t) // does not mutate lim lim.mu.Unlock() return tokens } @@ -183,7 +183,7 @@ func (r *Reservation) CancelAt(t time.Time) { return } // advance time to now - t, _, tokens := r.lim.advance(t) + t, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { @@ -304,7 +304,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - t, _, tokens := lim.advance(t) + t, tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -321,7 +321,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - t, _, tokens := lim.advance(t) + t, tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -356,7 +356,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) } } - t, last, tokens := lim.advance(t) + t, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -379,15 +379,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) if ok { r.tokens = n r.timeToAct = t.Add(waitDuration) - } - // Update state - if ok { + // Update state lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct - } else { - lim.last = last } return r @@ -396,7 +392,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { last := lim.last if t.Before(last) { last = t @@ -409,7 +405,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newLast time.Time, new if burst := float64(lim.burst); tokens > burst { tokens = burst } - return t, last, tokens + return t, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go deleted file mode 100644 index d10ad66533..0000000000 --- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package field_mask aliases all exported identifiers in -// package "google.golang.org/protobuf/types/known/fieldmaskpb". -package field_mask - -import "google.golang.org/protobuf/types/known/fieldmaskpb" - -type FieldMask = fieldmaskpb.FieldMask - -var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto diff --git a/vendor/modules.txt b/vendor/modules.txt index e7245e2428..dfb5e08c68 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,16 +4,16 @@ cloud.google.com/go/internal cloud.google.com/go/internal/optional cloud.google.com/go/internal/trace cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.12.1 +# cloud.google.com/go/compute v1.13.0 ## explicit; go 1.19 cloud.google.com/go/compute/internal -# cloud.google.com/go/compute/metadata v0.2.1 +# cloud.google.com/go/compute/metadata v0.2.2 ## explicit; go 1.19 cloud.google.com/go/compute/metadata # cloud.google.com/go/iam v0.7.0 ## explicit; go 1.19 cloud.google.com/go/iam -# cloud.google.com/go/storage v1.28.0 +# cloud.google.com/go/storage v1.28.1 ## explicit; go 1.19 cloud.google.com/go/storage cloud.google.com/go/storage/internal @@ -79,7 +79,7 @@ github.com/VividCortex/ewma # github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 ## explicit; go 1.15 github.com/alecthomas/units -# github.com/aws/aws-sdk-go v1.44.149 +# github.com/aws/aws-sdk-go v1.44.152 ## explicit; go 1.11 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/awserr @@ -121,7 +121,7 @@ github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/aws/aws-sdk-go-v2 v1.17.1 +# github.com/aws/aws-sdk-go-v2 v1.17.2 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2 github.com/aws/aws-sdk-go-v2/aws @@ -143,14 +143,14 @@ github.com/aws/aws-sdk-go-v2/internal/sdkio github.com/aws/aws-sdk-go-v2/internal/strings github.com/aws/aws-sdk-go-v2/internal/sync/singleflight github.com/aws/aws-sdk-go-v2/internal/timeconv -# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.9 +# github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream/eventstreamapi -# github.com/aws/aws-sdk-go-v2/config v1.18.3 +# github.com/aws/aws-sdk-go-v2/config v1.18.4 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/config -# github.com/aws/aws-sdk-go-v2/credentials v1.13.3 +# github.com/aws/aws-sdk-go-v2/credentials v1.13.4 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/credentials github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds @@ -159,64 +159,64 @@ github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client github.com/aws/aws-sdk-go-v2/credentials/processcreds github.com/aws/aws-sdk-go-v2/credentials/ssocreds github.com/aws/aws-sdk-go-v2/credentials/stscreds -# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.19 +# github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.20 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/feature/ec2/imds github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config -# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.42 +# github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.43 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/feature/s3/manager -# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.25 +# github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.26 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/configsources -# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.19 +# github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.20 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 -# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.26 +# github.com/aws/aws-sdk-go-v2/internal/ini v1.3.27 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/ini -# github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.16 +# github.com/aws/aws-sdk-go-v2/internal/v4a v1.0.17 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/internal/v4a github.com/aws/aws-sdk-go-v2/internal/v4a/internal/crypto github.com/aws/aws-sdk-go-v2/internal/v4a/internal/v4 -# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.10 +# github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.11 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding -# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.20 +# github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.21 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/internal/checksum -# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.19 +# github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.20 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/internal/presigned-url -# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.19 +# github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.13.20 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/internal/s3shared github.com/aws/aws-sdk-go-v2/service/internal/s3shared/arn github.com/aws/aws-sdk-go-v2/service/internal/s3shared/config -# github.com/aws/aws-sdk-go-v2/service/s3 v1.29.4 +# github.com/aws/aws-sdk-go-v2/service/s3 v1.29.5 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/s3 github.com/aws/aws-sdk-go-v2/service/s3/internal/arn github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations github.com/aws/aws-sdk-go-v2/service/s3/internal/endpoints github.com/aws/aws-sdk-go-v2/service/s3/types -# github.com/aws/aws-sdk-go-v2/service/sso v1.11.25 +# github.com/aws/aws-sdk-go-v2/service/sso v1.11.26 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/sso github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sso/types -# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.8 +# github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.9 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/ssooidc github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints github.com/aws/aws-sdk-go-v2/service/ssooidc/types -# github.com/aws/aws-sdk-go-v2/service/sts v1.17.5 +# github.com/aws/aws-sdk-go-v2/service/sts v1.17.6 ## explicit; go 1.15 github.com/aws/aws-sdk-go-v2/service/sts github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints github.com/aws/aws-sdk-go-v2/service/sts/types -# github.com/aws/smithy-go v1.13.4 +# github.com/aws/smithy-go v1.13.5 ## explicit; go 1.15 github.com/aws/smithy-go github.com/aws/smithy-go/auth/bearer @@ -240,7 +240,7 @@ github.com/aws/smithy-go/waiter # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile -# github.com/cespare/xxhash/v2 v2.1.2 +# github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/cheggaaa/pb/v3 v3.1.0 @@ -293,7 +293,6 @@ github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/timestamp # github.com/golang/snappy v0.0.4 ## explicit @@ -399,7 +398,7 @@ github.com/prometheus/common/sigv4 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v0.40.4 +# github.com/prometheus/prometheus v0.40.5 ## explicit; go 1.18 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -444,7 +443,7 @@ github.com/russross/blackfriday/v2 ## explicit; go 1.13 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/urfave/cli/v2 v2.23.5 +# github.com/urfave/cli/v2 v2.23.6 ## explicit; go 1.18 github.com/urfave/cli/v2 # github.com/valyala/bytebufferpool v1.0.0 @@ -528,7 +527,7 @@ go.uber.org/atomic ## explicit; go 1.18 go.uber.org/goleak go.uber.org/goleak/internal/stack -# golang.org/x/exp v0.0.0-20221126150942-6ab00d035af9 +# golang.org/x/exp v0.0.0-20221204150635-6dcec336b2bb ## explicit; go 1.18 golang.org/x/exp/constraints golang.org/x/exp/slices @@ -557,18 +556,18 @@ golang.org/x/oauth2/jwt # golang.org/x/sync v0.1.0 ## explicit golang.org/x/sync/errgroup -# golang.org/x/sys v0.2.0 +# golang.org/x/sys v0.3.0 ## explicit; go 1.17 golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.4.0 +# golang.org/x/text v0.5.0 ## explicit; go 1.17 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.2.0 +# golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate # golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 @@ -608,7 +607,7 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6 +# google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations @@ -618,7 +617,6 @@ google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr -google.golang.org/genproto/protobuf/field_mask # google.golang.org/grpc v1.51.0 ## explicit; go 1.17 google.golang.org/grpc From 8189770c50165b62867327ad388f2c2ef237ab6f Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 5 Dec 2022 15:15:00 -0800 Subject: [PATCH 24/38] all: add `-inmemoryDataFlushInterval` command-line flag for controlling the frequency of saving in-memory data to disk The main purpose of this command-line flag is to increase the lifetime of low-end flash storage with the limited number of write operations it can perform. Such flash storage is usually installed on Raspberry PI or similar appliances. For example, `-inmemoryDataFlushInterval=1h` reduces the frequency of disk write operations to up to once per hour if the ingested one-hour worth of data fits the limit for in-memory data. The in-memory data is searchable in the same way as the data stored on disk. VictoriaMetrics automatically flushes the in-memory data to disk on graceful shutdown via SIGINT signal. The in-memory data is lost on unclean shutdown (hardware power loss, OOM crash, SIGKILL). Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3337 --- README.md | 77 +- app/victoria-metrics/main.go | 5 + app/vmstorage/main.go | 139 ++- docs/CHANGELOG.md | 28 + docs/README.md | 77 +- docs/Single-server-VictoriaMetrics.md | 77 +- lib/mergeset/inmemory_part.go | 34 + lib/mergeset/table.go | 925 +++++++++++----- lib/mergeset/table_test.go | 12 +- lib/storage/index_db_test.go | 4 +- lib/storage/inmemory_part.go | 34 + lib/storage/partition.go | 1290 ++++++++++++++--------- lib/storage/partition_search_test.go | 11 +- lib/storage/storage_test.go | 71 +- lib/storage/table.go | 7 +- lib/storage/table_search_test.go | 10 +- lib/storage/table_search_timing_test.go | 5 +- lib/storage/table_timing_test.go | 3 +- 18 files changed, 1833 insertions(+), 976 deletions(-) diff --git a/README.md b/README.md index 85c15a2e7e..c19d003e3c 100644 --- a/README.md +++ b/README.md @@ -1363,18 +1363,50 @@ It is recommended passing different `-promscrape.cluster.name` values to HA pair ## Storage -VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like -data structures. On insert, VictoriaMetrics accumulates up to 1s of data and dumps it on disk to -`<-storageDataPath>/data/small/YYYY_MM/` subdirectory forming a `part` with the following -name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each part consists of two "columns": -values and timestamps. These are sorted and compressed raw time series values. Additionally, part contains -index files for searching for specific series in the values and timestamps files. +VictoriaMetrics buffers the ingested data in memory for up to a second. Then the buffered data is written to in-memory `parts`, +which can be searched during queries. The in-memory `parts` are periodically persisted to disk, so they could survive unclean shutdown +such as out of memory crash, hardware power loss or `SIGKILL` signal. The interval for flushing the in-memory data to disk +can be configured with the `-inmemoryDataFlushInterval` command-line flag (note that too short flush interval may significantly increase disk IO). -`Parts` are periodically merged into the bigger parts. The resulting `part` is constructed -under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` subdirectory. -When the resulting `part` is complete, it is atomically moved from the `tmp` -to its own subdirectory, while the source parts are atomically removed. The end result is that the source -parts are substituted by a single resulting bigger `part` in the `<-storageDataPath>/data/{small,big}/YYYY_MM/` directory. +In-memory parts are persisted to disk into `part` directories under the `<-storageDataPath>/data/small/YYYY_MM/` folder, +where `YYYY_MM` is the month partition for the stored data. For example, `2022_11` is the partition for `parts` +with [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) from `November 2022`. + +The `part` directory has the following name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`, where: + +- `rowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part +- `blocksCount` - the number of blocks stored in the part (see details about blocks below) +- `minTimestamp` and `maxTimestamp` - minimum and maximum timestamps across raw samples stored in the part + +Each `part` consists of `blocks` sorted by internal time series id (aka `TSID`). +Each `block` contains up to 8K [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples), +which belong to a single [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series). +Raw samples in each block are sorted by `timestamp`. Blocks for the same time series are sorted +by the `timestamp` of the first sample. Timestamps and values for all the blocks +are stored in [compressed form](https://faun.pub/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932) +in separate files under `part` directory - `timestamps.bin` and `values.bin`. + +The `part` directory also contains `index.bin` and `metaindex.bin` files - these files contain index +for fast block lookups, which belong to the given `TSID` and cover the given time range. + +`Parts` are periodically merged into bigger parts in background. The background merge provides the following benefits: + +* keeping the number of data files under control, so they don't exceed limits on open files +* improved data compression, since bigger parts are usually compressed better than smaller parts +* improved query speed, since queries over smaller number of parts are executed faster +* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling) + and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge + +Newly added `parts` either successfully appear in the storage or fail to appear. +The newly added `parts` are being created in a temporary directory under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` folder. +When the newly added `part` is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html) +to a temporary directory, then it is atomically moved to the storage directory. +Thanks to this alogrithm, storage never contains partially created parts, even if hardware power off +occurrs in the middle of writing the `part` to disk - such incompletely written `parts` +are automatically deleted on the next VictoriaMetrics start. + +The same applies to merge process — `parts` are either fully merged into a new `part` or fail to merge, +leaving the source `parts` untouched. VictoriaMetrics doesn't merge parts if their summary size exceeds free disk space. This prevents from potential out of disk space errors during merge. @@ -1383,24 +1415,10 @@ This increases overhead during data querying, since VictoriaMetrics needs to rea bigger number of parts per each request. That's why it is recommended to have at least 20% of free disk space under directory pointed by `-storageDataPath` command-line flag. -Information about merging process is available in [single-node VictoriaMetrics](https://grafana.com/dashboards/10229) -and [clustered VictoriaMetrics](https://grafana.com/grafana/dashboards/11176) Grafana dashboards. +Information about merging process is available in [the dashboard for single-node VictoriaMetrics](https://grafana.com/dashboards/10229) +and [the dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176). See more details in [monitoring docs](#monitoring). -The `merge` process improves compression rate and keeps number of `parts` on disk relatively low. -Benefits of doing the merge process are the following: - -* it improves query performance, since lower number of `parts` are inspected with each query -* it reduces the number of data files, since each `part` contains fixed number of files -* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling) - and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge. - -Newly added `parts` either appear in the storage or fail to appear. -Storage never contains partially created parts. The same applies to merge process — `parts` are either fully -merged into a new `part` or fail to merge. MergeTree doesn't contain partially merged `parts`. -`Part` contents in MergeTree never change. Parts are immutable. They may be only deleted after the merge -to a bigger `part` or when the `part` contents goes outside the configured `-retentionPeriod`. - See [this article](https://valyala.medium.com/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) for more details. See also [how to work with snapshots](#how-to-work-with-snapshots). @@ -1723,9 +1741,10 @@ and [cardinality explorer docs](#cardinality-explorer). * VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage. This may lead to the following "issues": - * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to persistent storage + * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to searchable parts by requesting `/internal/force_flush` http handler. This handler is mostly needed for testing and debugging purposes. * The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset). + The `-inmemoryDataFlushInterval` command-line flag allows controlling the frequency of in-memory data flush to persistent storage. See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704). * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, @@ -2133,6 +2152,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field -influxTrimTimestamp duration Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) + -inmemoryDataFlushInterval duration + The interval for guaranteed saving of in-memory data to disk. The saved data survives unclean shutdown such as OOM crash, hardware reset, SIGKILL, etc. Bigger intervals may help increasing lifetime of flash storage with limited write cycles (e.g. Raspberry PI). Smaller intervals increase disk IO load. Minimum supported value is 1s (default 5s) -insert.maxQueueDuration duration The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s) -logNewSeries diff --git a/app/victoria-metrics/main.go b/app/victoria-metrics/main.go index 16fc874050..770259cc8f 100644 --- a/app/victoria-metrics/main.go +++ b/app/victoria-metrics/main.go @@ -29,6 +29,10 @@ var ( "equal to -dedup.minScrapeInterval > 0. See https://docs.victoriametrics.com/#deduplication and https://docs.victoriametrics.com/#downsampling") dryRun = flag.Bool("dryRun", false, "Whether to check only -promscrape.config and then exit. "+ "Unknown config entries aren't allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse=false command-line flag") + inmemoryDataFlushInterval = flag.Duration("inmemoryDataFlushInterval", 5*time.Second, "The interval for guaranteed saving of in-memory data to disk. "+ + "The saved data survives unclean shutdown such as OOM crash, hardware reset, SIGKILL, etc. "+ + "Bigger intervals may help increasing lifetime of flash storage with limited write cycles (e.g. Raspberry PI). "+ + "Smaller intervals increase disk IO load. Minimum supported value is 1s") ) func main() { @@ -54,6 +58,7 @@ func main() { logger.Infof("starting VictoriaMetrics at %q...", *httpListenAddr) startTime := time.Now() storage.SetDedupInterval(*minScrapeInterval) + storage.SetDataFlushInterval(*inmemoryDataFlushInterval) vmstorage.Init(promql.ResetRollupResultCacheIfNeeded) vmselect.Init() vminsert.Init() diff --git a/app/vmstorage/main.go b/app/vmstorage/main.go index 1ec41a60ae..2033386adc 100644 --- a/app/vmstorage/main.go +++ b/app/vmstorage/main.go @@ -100,7 +100,7 @@ func InitWithoutMetrics(resetCacheIfNeeded func(mrs []storage.MetricRow)) { storage.SetLogNewSeries(*logNewSeries) storage.SetFinalMergeDelay(*finalMergeDelay) storage.SetBigMergeWorkersCount(*bigMergeConcurrency) - storage.SetSmallMergeWorkersCount(*smallMergeConcurrency) + storage.SetMergeWorkersCount(*smallMergeConcurrency) storage.SetRetentionTimezoneOffset(*retentionTimezoneOffset) storage.SetFreeDiskSpaceLimit(minFreeDiskSpaceBytes.N) storage.SetTSIDCacheSize(cacheSizeStorageTSID.N) @@ -453,56 +453,80 @@ func registerStorageMetrics(strg *storage.Storage) { return 0 }) - metrics.NewGauge(`vm_active_merges{type="storage/big"}`, func() float64 { - return float64(tm().ActiveBigMerges) + metrics.NewGauge(`vm_active_merges{type="storage/inmemory"}`, func() float64 { + return float64(tm().ActiveInmemoryMerges) }) metrics.NewGauge(`vm_active_merges{type="storage/small"}`, func() float64 { return float64(tm().ActiveSmallMerges) }) - metrics.NewGauge(`vm_active_merges{type="indexdb"}`, func() float64 { - return float64(idbm().ActiveMerges) + metrics.NewGauge(`vm_active_merges{type="storage/big"}`, func() float64 { + return float64(tm().ActiveBigMerges) + }) + metrics.NewGauge(`vm_active_merges{type="indexdb/inmemory"}`, func() float64 { + return float64(idbm().ActiveInmemoryMerges) + }) + metrics.NewGauge(`vm_active_merges{type="indexdb/file"}`, func() float64 { + return float64(idbm().ActiveFileMerges) }) - metrics.NewGauge(`vm_merges_total{type="storage/big"}`, func() float64 { - return float64(tm().BigMergesCount) + metrics.NewGauge(`vm_merges_total{type="storage/inmemory"}`, func() float64 { + return float64(tm().InmemoryMergesCount) }) metrics.NewGauge(`vm_merges_total{type="storage/small"}`, func() float64 { return float64(tm().SmallMergesCount) }) - metrics.NewGauge(`vm_merges_total{type="indexdb"}`, func() float64 { - return float64(idbm().MergesCount) + metrics.NewGauge(`vm_merges_total{type="storage/big"}`, func() float64 { + return float64(tm().BigMergesCount) + }) + metrics.NewGauge(`vm_merges_total{type="indexdb/inmemory"}`, func() float64 { + return float64(idbm().InmemoryMergesCount) + }) + metrics.NewGauge(`vm_merges_total{type="indexdb/file"}`, func() float64 { + return float64(idbm().FileMergesCount) }) - metrics.NewGauge(`vm_rows_merged_total{type="storage/big"}`, func() float64 { - return float64(tm().BigRowsMerged) + metrics.NewGauge(`vm_rows_merged_total{type="storage/inmemory"}`, func() float64 { + return float64(tm().InmemoryRowsMerged) }) metrics.NewGauge(`vm_rows_merged_total{type="storage/small"}`, func() float64 { return float64(tm().SmallRowsMerged) }) - metrics.NewGauge(`vm_rows_merged_total{type="indexdb"}`, func() float64 { - return float64(idbm().ItemsMerged) + metrics.NewGauge(`vm_rows_merged_total{type="storage/big"}`, func() float64 { + return float64(tm().BigRowsMerged) + }) + metrics.NewGauge(`vm_rows_merged_total{type="indexdb/inmemory"}`, func() float64 { + return float64(idbm().InmemoryItemsMerged) + }) + metrics.NewGauge(`vm_rows_merged_total{type="indexdb/file"}`, func() float64 { + return float64(idbm().FileItemsMerged) }) - metrics.NewGauge(`vm_rows_deleted_total{type="storage/big"}`, func() float64 { - return float64(tm().BigRowsDeleted) + metrics.NewGauge(`vm_rows_deleted_total{type="storage/inmemory"}`, func() float64 { + return float64(tm().InmemoryRowsDeleted) }) metrics.NewGauge(`vm_rows_deleted_total{type="storage/small"}`, func() float64 { return float64(tm().SmallRowsDeleted) }) - - metrics.NewGauge(`vm_references{type="storage/big", name="parts"}`, func() float64 { - return float64(tm().BigPartsRefCount) + metrics.NewGauge(`vm_rows_deleted_total{type="storage/big"}`, func() float64 { + return float64(tm().BigRowsDeleted) }) - metrics.NewGauge(`vm_references{type="storage/small", name="parts"}`, func() float64 { + + metrics.NewGauge(`vm_part_references{type="storage/inmemory"}`, func() float64 { + return float64(tm().InmemoryPartsRefCount) + }) + metrics.NewGauge(`vm_part_references{type="storage/small"}`, func() float64 { return float64(tm().SmallPartsRefCount) }) - metrics.NewGauge(`vm_references{type="storage", name="partitions"}`, func() float64 { + metrics.NewGauge(`vm_part_references{type="storage/big"}`, func() float64 { + return float64(tm().BigPartsRefCount) + }) + metrics.NewGauge(`vm_partition_references{type="storage"}`, func() float64 { return float64(tm().PartitionsRefCount) }) - metrics.NewGauge(`vm_references{type="indexdb", name="objects"}`, func() float64 { + metrics.NewGauge(`vm_object_references{type="indexdb"}`, func() float64 { return float64(idbm().IndexDBRefCount) }) - metrics.NewGauge(`vm_references{type="indexdb", name="parts"}`, func() float64 { + metrics.NewGauge(`vm_part_references{type="indexdb"}`, func() float64 { return float64(idbm().PartsRefCount) }) @@ -531,11 +555,11 @@ func registerStorageMetrics(strg *storage.Storage) { return float64(idbm().CompositeFilterMissingConversions) }) - metrics.NewGauge(`vm_assisted_merges_total{type="storage/small"}`, func() float64 { - return float64(tm().SmallAssistedMerges) + metrics.NewGauge(`vm_assisted_merges_total{type="storage/inmemory"}`, func() float64 { + return float64(tm().InmemoryAssistedMerges) }) - metrics.NewGauge(`vm_assisted_merges_total{type="indexdb"}`, func() float64 { - return float64(idbm().AssistedMerges) + metrics.NewGauge(`vm_assisted_merges_total{type="indexdb/inmemory"}`, func() float64 { + return float64(idbm().AssistedInmemoryMerges) }) metrics.NewGauge(`vm_indexdb_items_added_total`, func() float64 { @@ -546,11 +570,8 @@ func registerStorageMetrics(strg *storage.Storage) { }) // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/686 - metrics.NewGauge(`vm_merge_need_free_disk_space{type="storage/small"}`, func() float64 { - return float64(tm().SmallMergeNeedFreeDiskSpace) - }) - metrics.NewGauge(`vm_merge_need_free_disk_space{type="storage/big"}`, func() float64 { - return float64(tm().BigMergeNeedFreeDiskSpace) + metrics.NewGauge(`vm_merge_need_free_disk_space`, func() float64 { + return float64(tm().MergeNeedFreeDiskSpace) }) metrics.NewGauge(`vm_pending_rows{type="storage"}`, func() float64 { @@ -560,34 +581,52 @@ func registerStorageMetrics(strg *storage.Storage) { return float64(idbm().PendingItems) }) - metrics.NewGauge(`vm_parts{type="storage/big"}`, func() float64 { - return float64(tm().BigPartsCount) + metrics.NewGauge(`vm_parts{type="storage/inmemory"}`, func() float64 { + return float64(tm().InmemoryPartsCount) }) metrics.NewGauge(`vm_parts{type="storage/small"}`, func() float64 { return float64(tm().SmallPartsCount) }) - metrics.NewGauge(`vm_parts{type="indexdb"}`, func() float64 { - return float64(idbm().PartsCount) + metrics.NewGauge(`vm_parts{type="storage/big"}`, func() float64 { + return float64(tm().BigPartsCount) + }) + metrics.NewGauge(`vm_parts{type="indexdb/inmemory"}`, func() float64 { + return float64(idbm().InmemoryPartsCount) + }) + metrics.NewGauge(`vm_parts{type="indexdb/file"}`, func() float64 { + return float64(idbm().FilePartsCount) }) - metrics.NewGauge(`vm_blocks{type="storage/big"}`, func() float64 { - return float64(tm().BigBlocksCount) + metrics.NewGauge(`vm_blocks{type="storage/inmemory"}`, func() float64 { + return float64(tm().InmemoryBlocksCount) }) metrics.NewGauge(`vm_blocks{type="storage/small"}`, func() float64 { return float64(tm().SmallBlocksCount) }) - metrics.NewGauge(`vm_blocks{type="indexdb"}`, func() float64 { - return float64(idbm().BlocksCount) + metrics.NewGauge(`vm_blocks{type="storage/big"}`, func() float64 { + return float64(tm().BigBlocksCount) + }) + metrics.NewGauge(`vm_blocks{type="indexdb/inmemory"}`, func() float64 { + return float64(idbm().InmemoryBlocksCount) + }) + metrics.NewGauge(`vm_blocks{type="indexdb/file"}`, func() float64 { + return float64(idbm().FileBlocksCount) }) - metrics.NewGauge(`vm_data_size_bytes{type="storage/big"}`, func() float64 { - return float64(tm().BigSizeBytes) + metrics.NewGauge(`vm_data_size_bytes{type="storage/inmemory"}`, func() float64 { + return float64(tm().InmemorySizeBytes) }) metrics.NewGauge(`vm_data_size_bytes{type="storage/small"}`, func() float64 { return float64(tm().SmallSizeBytes) }) - metrics.NewGauge(`vm_data_size_bytes{type="indexdb"}`, func() float64 { - return float64(idbm().SizeBytes) + metrics.NewGauge(`vm_data_size_bytes{type="storage/big"}`, func() float64 { + return float64(tm().BigSizeBytes) + }) + metrics.NewGauge(`vm_data_size_bytes{type="indexdb/inmemory"}`, func() float64 { + return float64(idbm().InmemorySizeBytes) + }) + metrics.NewGauge(`vm_data_size_bytes{type="indexdb/file"}`, func() float64 { + return float64(idbm().FileSizeBytes) }) metrics.NewGauge(`vm_rows_added_to_storage_total`, func() float64 { @@ -665,14 +704,20 @@ func registerStorageMetrics(strg *storage.Storage) { return float64(m().TimestampsBytesSaved) }) - metrics.NewGauge(`vm_rows{type="storage/big"}`, func() float64 { - return float64(tm().BigRowsCount) + metrics.NewGauge(`vm_rows{type="storage/inmemory"}`, func() float64 { + return float64(tm().InmemoryRowsCount) }) metrics.NewGauge(`vm_rows{type="storage/small"}`, func() float64 { return float64(tm().SmallRowsCount) }) - metrics.NewGauge(`vm_rows{type="indexdb"}`, func() float64 { - return float64(idbm().ItemsCount) + metrics.NewGauge(`vm_rows{type="storage/big"}`, func() float64 { + return float64(tm().BigRowsCount) + }) + metrics.NewGauge(`vm_rows{type="indexdb/inmemory"}`, func() float64 { + return float64(idbm().InmemoryItemsCount) + }) + metrics.NewGauge(`vm_rows{type="indexdb/file"}`, func() float64 { + return float64(idbm().FileItemsCount) }) metrics.NewGauge(`vm_date_range_search_calls_total`, func() float64 { diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index d1b5507a27..96694cc09d 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -17,6 +17,34 @@ The following tip changes can be tested by building VictoriaMetrics components f **Update note 1:** this release drops support for direct upgrade from VictoriaMetrics versions prior [v1.28.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.28.0). Please upgrade to `v1.84.0`, wait until `finished round 2 of background conversion` line is emitted to log by single-node VictoriaMetrics or by `vmstorage`, and then upgrade to newer releases. +**Update note 2:** this release splits `type="indexdb"` metrics into `type="indexdb/inmemory"` and `type="indexdb/file"` metrics. This may break old dashboards and alerting rules, which contain label filters on `{type="indexdb"}`. It is recommended upgrading to the latest available dashboards and alerting rules mentioned in [these docs](https://docs.victoriametrics.com/#monitoring). + +* FEATURE: add `-inmemoryDataFlushInterval` command-line flag, which can be used for controlling the frequency of in-memory data flush to disk. The data flush frequency can be reduced when VictoriaMetrics stores data to low-end flash device with limited number of write cycles (for example, on Raspberry PI). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3337). +* FEATURE: expose additional metrics for `indexdb` and `storage` parts stored in memory and for `indexdb` parts stored in files (see [storage docs](https://docs.victoriametrics.com/#storage) for technical details): + * `vm_active_merges{type="storage/inmemory"}` - active merges for in-memory `storage` parts + * `vm_active_merges{type="indexdb/inmemory"}` - active merges for in-memory `indexdb` parts + * `vm_active_merges{type="indexdb/file"}` - active merges for file-based `indexdb` parts + * `vm_merges_total{type="storage/inmemory"}` - the total merges for in-memory `storage` parts + * `vm_merges_total{type="indexdb/inmemory"}` - the total merges for in-memory `indexdb` parts + * `vm_merges_total{type="indexdb/file"}` - the total merges for file-based `indexdb` parts + * `vm_rows_merged_total{type="storage/inmemory"}` - the total rows merged for in-memory `storage` parts + * `vm_rows_merged_total{type="indexdb/inmemory"}` - the total rows merged for in-memory `indexdb` parts + * `vm_rows_merged_total{type="indexdb/file"}` - the total rows merged for file-based `indexdb` parts + * `vm_rows_deleted_total{type="storage/inmemory"}` - the total rows deleted for in-memory `storage` parts + * `vm_assisted_merges_total{type="storage/inmemory"}` - the total number of assisted merges for in-memory `storage` parts + * `vm_assisted_merges_total{type="indexdb/inmemory"}` - the total number of assisted merges for in-memory `indexdb` parts + * `vm_parts{type="storage/inmemory"}` - the total number of in-memory `storage` parts + * `vm_parts{type="indexdb/inmemory"}` - the total number of in-memory `indexdb` parts + * `vm_parts{type="indexdb/file"}` - the total number of file-based `indexdb` parts + * `vm_blocks{type="storage/inmemory"}` - the total number of in-memory `storage` blocks + * `vm_blocks{type="indexdb/inmemory"}` - the total number of in-memory `indexdb` blocks + * `vm_blocks{type="indexdb/file"}` - the total number of file-based `indexdb` blocks + * `vm_data_size_bytes{type="storage/inmemory"}` - the total size of in-memory `storage` blocks + * `vm_data_size_bytes{type="indexdb/inmemory"}` - the total size of in-memory `indexdb` blocks + * `vm_data_size_bytes{type="indexdb/file"}` - the total size of file-based `indexdb` blocks + * `vm_rows{type="storage/inmemory"}` - the total number of in-memory `storage` rows + * `vm_rows{type="indexdb/inmemory"}` - the total number of in-memory `indexdb` rows + * `vm_rows{type="indexdb/file"}` - the total number of file-based `indexdb` rows * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): improve [service discovery](https://docs.victoriametrics.com/sd_configs.html) performance when discovering big number of targets (10K and more). * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `exported_` prefix to metric names exported by scrape targets if these metric names clash with [automatically generated metrics](https://docs.victoriametrics.com/vmagent.html#automatically-generated-metrics) such as `up`, `scrape_samples_scraped`, etc. This prevents from corruption of automatically generated metrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3406). * FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve error message when the requested path cannot be properly parsed, so users could identify the issue and properly fix the path. Now the error message links to [url format docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3402). diff --git a/docs/README.md b/docs/README.md index dce732e898..1fe27c686d 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1364,18 +1364,50 @@ It is recommended passing different `-promscrape.cluster.name` values to HA pair ## Storage -VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like -data structures. On insert, VictoriaMetrics accumulates up to 1s of data and dumps it on disk to -`<-storageDataPath>/data/small/YYYY_MM/` subdirectory forming a `part` with the following -name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each part consists of two "columns": -values and timestamps. These are sorted and compressed raw time series values. Additionally, part contains -index files for searching for specific series in the values and timestamps files. +VictoriaMetrics buffers the ingested data in memory for up to a second. Then the buffered data is written to in-memory `parts`, +which can be searched during queries. The in-memory `parts` are periodically persisted to disk, so they could survive unclean shutdown +such as out of memory crash, hardware power loss or `SIGKILL` signal. The interval for flushing the in-memory data to disk +can be configured with the `-inmemoryDataFlushInterval` command-line flag (note that too short flush interval may significantly increase disk IO). -`Parts` are periodically merged into the bigger parts. The resulting `part` is constructed -under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` subdirectory. -When the resulting `part` is complete, it is atomically moved from the `tmp` -to its own subdirectory, while the source parts are atomically removed. The end result is that the source -parts are substituted by a single resulting bigger `part` in the `<-storageDataPath>/data/{small,big}/YYYY_MM/` directory. +In-memory parts are persisted to disk into `part` directories under the `<-storageDataPath>/data/small/YYYY_MM/` folder, +where `YYYY_MM` is the month partition for the stored data. For example, `2022_11` is the partition for `parts` +with [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) from `November 2022`. + +The `part` directory has the following name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`, where: + +- `rowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part +- `blocksCount` - the number of blocks stored in the part (see details about blocks below) +- `minTimestamp` and `maxTimestamp` - minimum and maximum timestamps across raw samples stored in the part + +Each `part` consists of `blocks` sorted by internal time series id (aka `TSID`). +Each `block` contains up to 8K [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples), +which belong to a single [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series). +Raw samples in each block are sorted by `timestamp`. Blocks for the same time series are sorted +by the `timestamp` of the first sample. Timestamps and values for all the blocks +are stored in [compressed form](https://faun.pub/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932) +in separate files under `part` directory - `timestamps.bin` and `values.bin`. + +The `part` directory also contains `index.bin` and `metaindex.bin` files - these files contain index +for fast block lookups, which belong to the given `TSID` and cover the given time range. + +`Parts` are periodically merged into bigger parts in background. The background merge provides the following benefits: + +* keeping the number of data files under control, so they don't exceed limits on open files +* improved data compression, since bigger parts are usually compressed better than smaller parts +* improved query speed, since queries over smaller number of parts are executed faster +* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling) + and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge + +Newly added `parts` either successfully appear in the storage or fail to appear. +The newly added `parts` are being created in a temporary directory under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` folder. +When the newly added `part` is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html) +to a temporary directory, then it is atomically moved to the storage directory. +Thanks to this alogrithm, storage never contains partially created parts, even if hardware power off +occurrs in the middle of writing the `part` to disk - such incompletely written `parts` +are automatically deleted on the next VictoriaMetrics start. + +The same applies to merge process — `parts` are either fully merged into a new `part` or fail to merge, +leaving the source `parts` untouched. VictoriaMetrics doesn't merge parts if their summary size exceeds free disk space. This prevents from potential out of disk space errors during merge. @@ -1384,24 +1416,10 @@ This increases overhead during data querying, since VictoriaMetrics needs to rea bigger number of parts per each request. That's why it is recommended to have at least 20% of free disk space under directory pointed by `-storageDataPath` command-line flag. -Information about merging process is available in [single-node VictoriaMetrics](https://grafana.com/dashboards/10229) -and [clustered VictoriaMetrics](https://grafana.com/grafana/dashboards/11176) Grafana dashboards. +Information about merging process is available in [the dashboard for single-node VictoriaMetrics](https://grafana.com/dashboards/10229) +and [the dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176). See more details in [monitoring docs](#monitoring). -The `merge` process improves compression rate and keeps number of `parts` on disk relatively low. -Benefits of doing the merge process are the following: - -* it improves query performance, since lower number of `parts` are inspected with each query -* it reduces the number of data files, since each `part` contains fixed number of files -* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling) - and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge. - -Newly added `parts` either appear in the storage or fail to appear. -Storage never contains partially created parts. The same applies to merge process — `parts` are either fully -merged into a new `part` or fail to merge. MergeTree doesn't contain partially merged `parts`. -`Part` contents in MergeTree never change. Parts are immutable. They may be only deleted after the merge -to a bigger `part` or when the `part` contents goes outside the configured `-retentionPeriod`. - See [this article](https://valyala.medium.com/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) for more details. See also [how to work with snapshots](#how-to-work-with-snapshots). @@ -1724,9 +1742,10 @@ and [cardinality explorer docs](#cardinality-explorer). * VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage. This may lead to the following "issues": - * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to persistent storage + * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to searchable parts by requesting `/internal/force_flush` http handler. This handler is mostly needed for testing and debugging purposes. * The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset). + The `-inmemoryDataFlushInterval` command-line flag allows controlling the frequency of in-memory data flush to persistent storage. See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704). * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, @@ -2134,6 +2153,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field -influxTrimTimestamp duration Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) + -inmemoryDataFlushInterval duration + The interval for guaranteed saving of in-memory data to disk. The saved data survives unclean shutdown such as OOM crash, hardware reset, SIGKILL, etc. Bigger intervals may help increasing lifetime of flash storage with limited write cycles (e.g. Raspberry PI). Smaller intervals increase disk IO load. Minimum supported value is 1s (default 5s) -insert.maxQueueDuration duration The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s) -logNewSeries diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index 7a9908aabd..ed764040bc 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -1367,18 +1367,50 @@ It is recommended passing different `-promscrape.cluster.name` values to HA pair ## Storage -VictoriaMetrics stores time series data in [MergeTree](https://en.wikipedia.org/wiki/Log-structured_merge-tree)-like -data structures. On insert, VictoriaMetrics accumulates up to 1s of data and dumps it on disk to -`<-storageDataPath>/data/small/YYYY_MM/` subdirectory forming a `part` with the following -name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`. Each part consists of two "columns": -values and timestamps. These are sorted and compressed raw time series values. Additionally, part contains -index files for searching for specific series in the values and timestamps files. +VictoriaMetrics buffers the ingested data in memory for up to a second. Then the buffered data is written to in-memory `parts`, +which can be searched during queries. The in-memory `parts` are periodically persisted to disk, so they could survive unclean shutdown +such as out of memory crash, hardware power loss or `SIGKILL` signal. The interval for flushing the in-memory data to disk +can be configured with the `-inmemoryDataFlushInterval` command-line flag (note that too short flush interval may significantly increase disk IO). -`Parts` are periodically merged into the bigger parts. The resulting `part` is constructed -under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` subdirectory. -When the resulting `part` is complete, it is atomically moved from the `tmp` -to its own subdirectory, while the source parts are atomically removed. The end result is that the source -parts are substituted by a single resulting bigger `part` in the `<-storageDataPath>/data/{small,big}/YYYY_MM/` directory. +In-memory parts are persisted to disk into `part` directories under the `<-storageDataPath>/data/small/YYYY_MM/` folder, +where `YYYY_MM` is the month partition for the stored data. For example, `2022_11` is the partition for `parts` +with [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) from `November 2022`. + +The `part` directory has the following name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`, where: + +- `rowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part +- `blocksCount` - the number of blocks stored in the part (see details about blocks below) +- `minTimestamp` and `maxTimestamp` - minimum and maximum timestamps across raw samples stored in the part + +Each `part` consists of `blocks` sorted by internal time series id (aka `TSID`). +Each `block` contains up to 8K [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples), +which belong to a single [time series](https://docs.victoriametrics.com/keyConcepts.html#time-series). +Raw samples in each block are sorted by `timestamp`. Blocks for the same time series are sorted +by the `timestamp` of the first sample. Timestamps and values for all the blocks +are stored in [compressed form](https://faun.pub/victoriametrics-achieving-better-compression-for-time-series-data-than-gorilla-317bc1f95932) +in separate files under `part` directory - `timestamps.bin` and `values.bin`. + +The `part` directory also contains `index.bin` and `metaindex.bin` files - these files contain index +for fast block lookups, which belong to the given `TSID` and cover the given time range. + +`Parts` are periodically merged into bigger parts in background. The background merge provides the following benefits: + +* keeping the number of data files under control, so they don't exceed limits on open files +* improved data compression, since bigger parts are usually compressed better than smaller parts +* improved query speed, since queries over smaller number of parts are executed faster +* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling) + and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge + +Newly added `parts` either successfully appear in the storage or fail to appear. +The newly added `parts` are being created in a temporary directory under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` folder. +When the newly added `part` is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html) +to a temporary directory, then it is atomically moved to the storage directory. +Thanks to this alogrithm, storage never contains partially created parts, even if hardware power off +occurrs in the middle of writing the `part` to disk - such incompletely written `parts` +are automatically deleted on the next VictoriaMetrics start. + +The same applies to merge process — `parts` are either fully merged into a new `part` or fail to merge, +leaving the source `parts` untouched. VictoriaMetrics doesn't merge parts if their summary size exceeds free disk space. This prevents from potential out of disk space errors during merge. @@ -1387,24 +1419,10 @@ This increases overhead during data querying, since VictoriaMetrics needs to rea bigger number of parts per each request. That's why it is recommended to have at least 20% of free disk space under directory pointed by `-storageDataPath` command-line flag. -Information about merging process is available in [single-node VictoriaMetrics](https://grafana.com/dashboards/10229) -and [clustered VictoriaMetrics](https://grafana.com/grafana/dashboards/11176) Grafana dashboards. +Information about merging process is available in [the dashboard for single-node VictoriaMetrics](https://grafana.com/dashboards/10229) +and [the dashboard for VictoriaMetrics cluster](https://grafana.com/grafana/dashboards/11176). See more details in [monitoring docs](#monitoring). -The `merge` process improves compression rate and keeps number of `parts` on disk relatively low. -Benefits of doing the merge process are the following: - -* it improves query performance, since lower number of `parts` are inspected with each query -* it reduces the number of data files, since each `part` contains fixed number of files -* various background maintenance tasks such as [de-duplication](#deduplication), [downsampling](#downsampling) - and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge. - -Newly added `parts` either appear in the storage or fail to appear. -Storage never contains partially created parts. The same applies to merge process — `parts` are either fully -merged into a new `part` or fail to merge. MergeTree doesn't contain partially merged `parts`. -`Part` contents in MergeTree never change. Parts are immutable. They may be only deleted after the merge -to a bigger `part` or when the `part` contents goes outside the configured `-retentionPeriod`. - See [this article](https://valyala.medium.com/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282) for more details. See also [how to work with snapshots](#how-to-work-with-snapshots). @@ -1727,9 +1745,10 @@ and [cardinality explorer docs](#cardinality-explorer). * VictoriaMetrics buffers incoming data in memory for up to a few seconds before flushing it to persistent storage. This may lead to the following "issues": - * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to persistent storage + * Data becomes available for querying in a few seconds after inserting. It is possible to flush in-memory buffers to searchable parts by requesting `/internal/force_flush` http handler. This handler is mostly needed for testing and debugging purposes. * The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset). + The `-inmemoryDataFlushInterval` command-line flag allows controlling the frequency of in-memory data flush to persistent storage. See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704). * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, @@ -2137,6 +2156,8 @@ Pass `-help` to VictoriaMetrics in order to see the list of supported command-li Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if InfluxDB line contains only a single field -influxTrimTimestamp duration Trim timestamps for InfluxDB line protocol data to this duration. Minimum practical duration is 1ms. Higher duration (i.e. 1s) may be used for reducing disk space usage for timestamp data (default 1ms) + -inmemoryDataFlushInterval duration + The interval for guaranteed saving of in-memory data to disk. The saved data survives unclean shutdown such as OOM crash, hardware reset, SIGKILL, etc. Bigger intervals may help increasing lifetime of flash storage with limited write cycles (e.g. Raspberry PI). Smaller intervals increase disk IO load. Minimum supported value is 1s (default 5s) -insert.maxQueueDuration duration The maximum duration for waiting in the queue for insert requests due to -maxConcurrentInserts (default 1m0s) -logNewSeries diff --git a/lib/mergeset/inmemory_part.go b/lib/mergeset/inmemory_part.go index c3caca201c..d8da08c973 100644 --- a/lib/mergeset/inmemory_part.go +++ b/lib/mergeset/inmemory_part.go @@ -1,8 +1,12 @@ package mergeset import ( + "fmt" + "path/filepath" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" ) @@ -28,6 +32,36 @@ func (mp *inmemoryPart) Reset() { mp.lensData.Reset() } +// StoreToDisk stores mp to the given path on disk. +func (mp *inmemoryPart) StoreToDisk(path string) error { + if err := fs.MkdirAllIfNotExist(path); err != nil { + return fmt.Errorf("cannot create directory %q: %w", path, err) + } + metaindexPath := path + "/metaindex.bin" + if err := fs.WriteFileAndSync(metaindexPath, mp.metaindexData.B); err != nil { + return fmt.Errorf("cannot store metaindex: %w", err) + } + indexPath := path + "/index.bin" + if err := fs.WriteFileAndSync(indexPath, mp.indexData.B); err != nil { + return fmt.Errorf("cannot store index: %w", err) + } + itemsPath := path + "/items.bin" + if err := fs.WriteFileAndSync(itemsPath, mp.itemsData.B); err != nil { + return fmt.Errorf("cannot store items: %w", err) + } + lensPath := path + "/lens.bin" + if err := fs.WriteFileAndSync(lensPath, mp.lensData.B); err != nil { + return fmt.Errorf("cannot store lens: %w", err) + } + if err := mp.ph.WriteMetadata(path); err != nil { + return fmt.Errorf("cannot store metadata: %w", err) + } + // Sync parent directory in order to make sure the written files remain visible after hardware reset + parentDirPath := filepath.Dir(path) + fs.MustSyncPath(parentDirPath) + return nil +} + // Init initializes mp from ib. func (mp *inmemoryPart) Init(ib *inmemoryBlock) { mp.Reset() diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index d88cba7cb1..ea18b78add 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -22,10 +22,10 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg" ) -// maxParts is the maximum number of parts in the table. +// maxInmemoryParts is the maximum number of inmemory parts in the table. // // This number may be reached when the insertion pace outreaches merger pace. -const maxParts = 512 +const maxInmemoryParts = 64 // Default number of parts to merge at once. // @@ -46,6 +46,24 @@ const finalPartsToMerge = 2 // The required time shouldn't exceed a day. const maxPartSize = 400e9 +// The interval for flushing buffered data to parts, so it becomes visible to search. +const pendingItemsFlushInterval = time.Second + +// The interval for guaranteed flush of recently ingested data from memory to on-disk parts, +// so they survive process crash. +var dataFlushInterval = 5 * time.Second + +// SetDataFlushInterval sets the interval for guaranteed flush of recently ingested data from memory to disk. +// +// The data can be flushed from memory to disk more frequently if it doesn't fit the memory limit. +// +// This function must be called before initializing the indexdb. +func SetDataFlushInterval(d time.Duration) { + if d > pendingItemsFlushInterval { + dataFlushInterval = d + } +} + // maxItemsPerCachedPart is the maximum items per created part by the merge, // which must be cached in the OS page cache. // @@ -65,20 +83,23 @@ func maxItemsPerCachedPart() uint64 { return maxItems } -// The interval for flushing (converting) recent raw items into parts, -// so they become visible to search. -const rawItemsFlushInterval = time.Second - // Table represents mergeset table. type Table struct { // Atomically updated counters must go first in the struct, so they are properly // aligned to 8 bytes on 32-bit architectures. // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212 - activeMerges uint64 - mergesCount uint64 - itemsMerged uint64 - assistedMerges uint64 + activeInmemoryMerges uint64 + activeFileMerges uint64 + + inmemoryMergesCount uint64 + fileMergesCount uint64 + + inmemoryItemsMerged uint64 + fileItemsMerged uint64 + + assistedInmemoryMerges uint64 + itemsAdded uint64 itemsAddedSizeBytes uint64 @@ -93,14 +114,20 @@ type Table struct { prepareBlock PrepareBlockCallback isReadOnly *uint32 - partsLock sync.Mutex - parts []*partWrapper - // rawItems contains recently added items that haven't been converted to parts yet. // // rawItems aren't used in search for performance reasons rawItems rawItemsShards + // partsLock protects inmemoryParts and fileParts. + partsLock sync.Mutex + + // inmemoryParts contains inmemory parts. + inmemoryParts []*partWrapper + + // fileParts contains file-backed parts. + fileParts []*partWrapper + snapshotLock sync.RWMutex flockF *os.File @@ -139,10 +166,13 @@ func (riss *rawItemsShards) init() { } func (riss *rawItemsShards) addItems(tb *Table, items [][]byte) { - n := atomic.AddUint32(&riss.shardIdx, 1) shards := riss.shards - idx := n % uint32(len(shards)) - shards[idx].addItems(tb, items) + shardsLen := uint32(len(shards)) + for len(items) > 0 { + n := atomic.AddUint32(&riss.shardIdx, 1) + idx := n % shardsLen + items = shards[idx].addItems(tb, items) + } } func (riss *rawItemsShards) Len() int { @@ -179,8 +209,9 @@ func (ris *rawItemsShard) Len() int { return n } -func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) { - var blocksToFlush []*inmemoryBlock +func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) [][]byte { + var ibsToFlush []*inmemoryBlock + var tailItems [][]byte ris.mu.Lock() ibs := ris.ibs @@ -190,10 +221,17 @@ func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) { ris.ibs = ibs } ib := ibs[len(ibs)-1] - for _, item := range items { + for i, item := range items { if ib.Add(item) { continue } + if len(ibs) >= maxBlocksPerShard { + ibsToFlush = ibs + ibs = make([]*inmemoryBlock, 0, maxBlocksPerShard) + tailItems = items[i:] + atomic.StoreUint64(&ris.lastFlushTime, fasttime.UnixTimestamp()) + break + } ib = getInmemoryBlock() if ib.Add(item) { ibs = append(ibs, ib) @@ -203,17 +241,11 @@ func (ris *rawItemsShard) addItems(tb *Table, items [][]byte) { logger.Panicf("BUG: cannot insert too big item into an empty inmemoryBlock len(item)=%d; the caller should be responsible for avoiding too big items", len(item)) } ris.ibs = ibs - if len(ibs) >= maxBlocksPerShard { - blocksToFlush = append(blocksToFlush, ibs...) - for i := range ibs { - ibs[i] = nil - } - ris.ibs = ibs[:0] - atomic.StoreUint64(&ris.lastFlushTime, fasttime.UnixTimestamp()) - } ris.mu.Unlock() - tb.mergeRawItemsBlocks(blocksToFlush, false) + tb.flushBlocksToParts(ibsToFlush, false) + + return tailItems } type partWrapper struct { @@ -224,6 +256,9 @@ type partWrapper struct { refCount uint64 isInMerge bool + + // The deadline when the in-memory part must be flushed to disk. + flushToDiskDeadline time.Time } func (pw *partWrapper) incRef() { @@ -285,7 +320,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb flushCallback: flushCallback, prepareBlock: prepareBlock, isReadOnly: isReadOnly, - parts: pws, + fileParts: pws, mergeIdx: uint64(time.Now().UnixNano()), flockF: flockF, stopCh: make(chan struct{}), @@ -296,7 +331,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb var m TableMetrics tb.UpdateMetrics(&m) logger.Infof("table %q has been opened in %.3f seconds; partsCount: %d; blocksCount: %d, itemsCount: %d; sizeBytes: %d", - path, time.Since(startTime).Seconds(), m.PartsCount, m.BlocksCount, m.ItemsCount, m.SizeBytes) + path, time.Since(startTime).Seconds(), m.FilePartsCount, m.FileBlocksCount, m.FileItemsCount, m.FileSizeBytes) if flushCallback != nil { tb.flushCallbackWorkerWG.Add(1) @@ -323,8 +358,9 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb } func (tb *Table) startBackgroundWorkers() { - tb.startPartMergers() - tb.startRawItemsFlusher() + tb.startMergeWorkers() + tb.startInmemoryPartsFlusher() + tb.startPendingItemsFlusher() } // MustClose closes the table. @@ -339,42 +375,26 @@ func (tb *Table) MustClose() { logger.Infof("flushing inmemory parts to files on %q...", tb.path) startTime = time.Now() - // Flush raw items the last time before exit. - tb.flushPendingItems(true) - - // Flush inmemory parts to disk. - var pws []*partWrapper - tb.partsLock.Lock() - for _, pw := range tb.parts { - if pw.mp == nil { - continue - } - if pw.isInMerge { - logger.Panicf("BUG: the inmemory part %s mustn't be in merge after stopping parts merger in %q", &pw.mp.ph, tb.path) - } - pw.isInMerge = true - pws = append(pws, pw) - } - tb.partsLock.Unlock() - - if err := tb.mergePartsOptimal(pws); err != nil { - logger.Panicf("FATAL: cannot flush inmemory parts to files in %q: %s", tb.path, err) - } - logger.Infof("%d inmemory parts have been flushed to files in %.3f seconds on %q", len(pws), time.Since(startTime).Seconds(), tb.path) + // Flush inmemory items the last time before exit. + tb.flushInmemoryItems() logger.Infof("waiting for flush callback worker to stop on %q...", tb.path) startTime = time.Now() tb.flushCallbackWorkerWG.Wait() logger.Infof("flush callback worker stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path) - // Remove references to parts from the tb, so they may be eventually closed - // after all the searches are done. + // Remove references to parts from the tb, so they may be eventually closed after all the searches are done. tb.partsLock.Lock() - parts := tb.parts - tb.parts = nil + inmemoryParts := tb.inmemoryParts + fileParts := tb.fileParts + tb.inmemoryParts = nil + tb.fileParts = nil tb.partsLock.Unlock() - for _, pw := range parts { + for _, pw := range inmemoryParts { + pw.decRef() + } + for _, pw := range fileParts { pw.decRef() } @@ -391,20 +411,33 @@ func (tb *Table) Path() string { // TableMetrics contains essential metrics for the Table. type TableMetrics struct { - ActiveMerges uint64 - MergesCount uint64 - ItemsMerged uint64 - AssistedMerges uint64 + ActiveInmemoryMerges uint64 + ActiveFileMerges uint64 + + InmemoryMergesCount uint64 + FileMergesCount uint64 + + InmemoryItemsMerged uint64 + FileItemsMerged uint64 + + AssistedInmemoryMerges uint64 + ItemsAdded uint64 ItemsAddedSizeBytes uint64 PendingItems uint64 - PartsCount uint64 + InmemoryPartsCount uint64 + FilePartsCount uint64 - BlocksCount uint64 - ItemsCount uint64 - SizeBytes uint64 + InmemoryBlocksCount uint64 + FileBlocksCount uint64 + + InmemoryItemsCount uint64 + FileItemsCount uint64 + + InmemorySizeBytes uint64 + FileSizeBytes uint64 DataBlocksCacheSize uint64 DataBlocksCacheSizeBytes uint64 @@ -421,26 +454,46 @@ type TableMetrics struct { PartsRefCount uint64 } +// TotalItemsCount returns the total number of items in the table. +func (tm *TableMetrics) TotalItemsCount() uint64 { + return tm.InmemoryItemsCount + tm.FileItemsCount +} + // UpdateMetrics updates m with metrics from tb. func (tb *Table) UpdateMetrics(m *TableMetrics) { - m.ActiveMerges += atomic.LoadUint64(&tb.activeMerges) - m.MergesCount += atomic.LoadUint64(&tb.mergesCount) - m.ItemsMerged += atomic.LoadUint64(&tb.itemsMerged) - m.AssistedMerges += atomic.LoadUint64(&tb.assistedMerges) + m.ActiveInmemoryMerges += atomic.LoadUint64(&tb.activeInmemoryMerges) + m.ActiveFileMerges += atomic.LoadUint64(&tb.activeFileMerges) + + m.InmemoryMergesCount += atomic.LoadUint64(&tb.inmemoryMergesCount) + m.FileMergesCount += atomic.LoadUint64(&tb.fileMergesCount) + + m.InmemoryItemsMerged += atomic.LoadUint64(&tb.inmemoryItemsMerged) + m.FileItemsMerged += atomic.LoadUint64(&tb.fileItemsMerged) + + m.AssistedInmemoryMerges += atomic.LoadUint64(&tb.assistedInmemoryMerges) + m.ItemsAdded += atomic.LoadUint64(&tb.itemsAdded) m.ItemsAddedSizeBytes += atomic.LoadUint64(&tb.itemsAddedSizeBytes) m.PendingItems += uint64(tb.rawItems.Len()) tb.partsLock.Lock() - m.PartsCount += uint64(len(tb.parts)) - for _, pw := range tb.parts { + + m.InmemoryPartsCount += uint64(len(tb.inmemoryParts)) + for _, pw := range tb.inmemoryParts { p := pw.p + m.InmemoryBlocksCount += p.ph.blocksCount + m.InmemoryItemsCount += p.ph.itemsCount + m.InmemorySizeBytes += p.size + m.PartsRefCount += atomic.LoadUint64(&pw.refCount) + } - m.BlocksCount += p.ph.blocksCount - m.ItemsCount += p.ph.itemsCount - m.SizeBytes += p.size - + m.FilePartsCount += uint64(len(tb.fileParts)) + for _, pw := range tb.fileParts { + p := pw.p + m.FileBlocksCount += p.ph.blocksCount + m.FileItemsCount += p.ph.itemsCount + m.FileSizeBytes += p.size m.PartsRefCount += atomic.LoadUint64(&pw.refCount) } tb.partsLock.Unlock() @@ -477,10 +530,14 @@ func (tb *Table) AddItems(items [][]byte) { // The appended parts must be released with putParts. func (tb *Table) getParts(dst []*partWrapper) []*partWrapper { tb.partsLock.Lock() - for _, pw := range tb.parts { + for _, pw := range tb.inmemoryParts { pw.incRef() } - dst = append(dst, tb.parts...) + for _, pw := range tb.fileParts { + pw.incRef() + } + dst = append(dst, tb.inmemoryParts...) + dst = append(dst, tb.fileParts...) tb.partsLock.Unlock() return dst @@ -493,79 +550,142 @@ func (tb *Table) putParts(pws []*partWrapper) { } } -func (tb *Table) startRawItemsFlusher() { +func (tb *Table) mergePartsOptimal(pws []*partWrapper) error { + sortPartsForOptimalMerge(pws) + for len(pws) > 0 { + n := defaultPartsToMerge + if n > len(pws) { + n = len(pws) + } + pwsChunk := pws[:n] + pws = pws[n:] + err := tb.mergeParts(pwsChunk, nil, true) + if err == nil { + continue + } + tb.releasePartsToMerge(pws) + return fmt.Errorf("cannot optimally merge %d parts: %w", n, err) + } + return nil +} + +// DebugFlush flushes all the added items to the storage, so they become visible to search. +// +// This function is only for debugging and testing. +func (tb *Table) DebugFlush() { + tb.flushPendingItems(nil, true) + + // Wait for background flushers to finish. + tb.rawItemsPendingFlushesWG.Wait() +} + +func (tb *Table) startInmemoryPartsFlusher() { tb.wg.Add(1) go func() { - tb.rawItemsFlusher() + tb.inmemoryPartsFlusher() tb.wg.Done() }() } -func (tb *Table) rawItemsFlusher() { - ticker := time.NewTicker(rawItemsFlushInterval) +func (tb *Table) startPendingItemsFlusher() { + tb.wg.Add(1) + go func() { + tb.pendingItemsFlusher() + tb.wg.Done() + }() +} + +func (tb *Table) inmemoryPartsFlusher() { + ticker := time.NewTicker(dataFlushInterval) defer ticker.Stop() for { select { case <-tb.stopCh: return case <-ticker.C: - tb.flushPendingItems(false) + tb.flushInmemoryParts(false) } } } -func (tb *Table) mergePartsOptimal(pws []*partWrapper) error { - for len(pws) > defaultPartsToMerge { - pwsChunk := pws[:defaultPartsToMerge] - pws = pws[defaultPartsToMerge:] - if err := tb.mergeParts(pwsChunk, nil, false); err != nil { - tb.releasePartsToMerge(pws) - return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err) +func (tb *Table) pendingItemsFlusher() { + ticker := time.NewTicker(pendingItemsFlushInterval) + defer ticker.Stop() + var ibs []*inmemoryBlock + for { + select { + case <-tb.stopCh: + return + case <-ticker.C: + ibs = tb.flushPendingItems(ibs[:0], false) + for i := range ibs { + ibs[i] = nil + } } } - if len(pws) == 0 { - return nil +} + +func (tb *Table) flushPendingItems(dst []*inmemoryBlock, isFinal bool) []*inmemoryBlock { + return tb.rawItems.flush(tb, dst, isFinal) +} + +func (tb *Table) flushInmemoryItems() { + tb.rawItems.flush(tb, nil, true) + tb.flushInmemoryParts(true) +} + +func (tb *Table) flushInmemoryParts(isFinal bool) { + for { + currentTime := time.Now() + var pws []*partWrapper + + tb.partsLock.Lock() + for _, pw := range tb.inmemoryParts { + if !pw.isInMerge && (isFinal || pw.flushToDiskDeadline.Before(currentTime)) { + pw.isInMerge = true + pws = append(pws, pw) + } + } + tb.partsLock.Unlock() + + if err := tb.mergePartsOptimal(pws); err != nil { + logger.Panicf("FATAL: cannot merge in-memory parts: %s", err) + } + if !isFinal { + return + } + tb.partsLock.Lock() + n := len(tb.inmemoryParts) + tb.partsLock.Unlock() + if n == 0 { + // All the in-memory parts were flushed to disk. + return + } + // Some parts weren't flushed to disk because they were being merged. + // Sleep for a while and try flushing them again. + time.Sleep(10 * time.Millisecond) } - if err := tb.mergeParts(pws, nil, false); err != nil { - return fmt.Errorf("cannot merge %d parts: %w", len(pws), err) - } - return nil } -// DebugFlush flushes all the added items to the storage, -// so they become visible to search. -// -// This function is only for debugging and testing. -func (tb *Table) DebugFlush() { - tb.flushPendingItems(true) - - // Wait for background flushers to finish. - tb.rawItemsPendingFlushesWG.Wait() -} - -func (tb *Table) flushPendingItems(isFinal bool) { - tb.rawItems.flush(tb, isFinal) -} - -func (riss *rawItemsShards) flush(tb *Table, isFinal bool) { +func (riss *rawItemsShards) flush(tb *Table, dst []*inmemoryBlock, isFinal bool) []*inmemoryBlock { tb.rawItemsPendingFlushesWG.Add(1) defer tb.rawItemsPendingFlushesWG.Done() - var blocksToFlush []*inmemoryBlock for i := range riss.shards { - blocksToFlush = riss.shards[i].appendBlocksToFlush(blocksToFlush, tb, isFinal) + dst = riss.shards[i].appendBlocksToFlush(dst, tb, isFinal) } - tb.mergeRawItemsBlocks(blocksToFlush, isFinal) + tb.flushBlocksToParts(dst, isFinal) + return dst } func (ris *rawItemsShard) appendBlocksToFlush(dst []*inmemoryBlock, tb *Table, isFinal bool) []*inmemoryBlock { currentTime := fasttime.UnixTimestamp() - flushSeconds := int64(rawItemsFlushInterval.Seconds()) + flushSeconds := int64(pendingItemsFlushInterval.Seconds()) if flushSeconds <= 0 { flushSeconds = 1 } lastFlushTime := atomic.LoadUint64(&ris.lastFlushTime) - if !isFinal && currentTime <= lastFlushTime+uint64(flushSeconds) { + if !isFinal && currentTime < lastFlushTime+uint64(flushSeconds) { // Fast path - nothing to flush return dst } @@ -582,27 +702,29 @@ func (ris *rawItemsShard) appendBlocksToFlush(dst []*inmemoryBlock, tb *Table, i return dst } -func (tb *Table) mergeRawItemsBlocks(ibs []*inmemoryBlock, isFinal bool) { +func (tb *Table) flushBlocksToParts(ibs []*inmemoryBlock, isFinal bool) { if len(ibs) == 0 { return } - - pws := make([]*partWrapper, 0, (len(ibs)+defaultPartsToMerge-1)/defaultPartsToMerge) var pwsLock sync.Mutex - var wg sync.WaitGroup + pws := make([]*partWrapper, 0, (len(ibs)+defaultPartsToMerge-1)/defaultPartsToMerge) + wg := getWaitGroup() for len(ibs) > 0 { n := defaultPartsToMerge if n > len(ibs) { n = len(ibs) } wg.Add(1) - go func(ibsPart []*inmemoryBlock) { - defer wg.Done() - pw := tb.mergeInmemoryBlocks(ibsPart) + flushConcurrencyCh <- struct{}{} + go func(ibsChunk []*inmemoryBlock) { + defer func() { + <-flushConcurrencyCh + wg.Done() + }() + pw := tb.createInmemoryPart(ibsChunk) if pw == nil { return } - pw.isInMerge = true pwsLock.Lock() pws = append(pws, pw) pwsLock.Unlock() @@ -610,49 +732,78 @@ func (tb *Table) mergeRawItemsBlocks(ibs []*inmemoryBlock, isFinal bool) { ibs = ibs[n:] } wg.Wait() - if len(pws) > 0 { - if err := tb.mergeParts(pws, nil, true); err != nil { - logger.Panicf("FATAL: cannot merge raw parts: %s", err) - } - if tb.flushCallback != nil { - if isFinal { - tb.flushCallback() - } else { - atomic.CompareAndSwapUint32(&tb.needFlushCallbackCall, 0, 1) - } + putWaitGroup(wg) + + tb.partsLock.Lock() + tb.inmemoryParts = append(tb.inmemoryParts, pws...) + tb.partsLock.Unlock() + + flushConcurrencyCh <- struct{}{} + tb.assistedMergeForInmemoryParts() + <-flushConcurrencyCh + // There is no need in assited merge for file parts, + // since the bottleneck is possible only at inmemory parts. + + if tb.flushCallback != nil { + if isFinal { + tb.flushCallback() + } else { + atomic.CompareAndSwapUint32(&tb.needFlushCallbackCall, 0, 1) } } +} +var flushConcurrencyCh = make(chan struct{}, cgroup.AvailableCPUs()) + +func (tb *Table) assistedMergeForInmemoryParts() { for { tb.partsLock.Lock() - ok := len(tb.parts) <= maxParts + ok := getNotInMergePartsCount(tb.inmemoryParts) < maxInmemoryParts tb.partsLock.Unlock() if ok { return } - // The added part exceeds maxParts count. Assist with merging other parts. - // // Prioritize assisted merges over searches. storagepacelimiter.Search.Inc() - err := tb.mergeExistingParts(false) + err := tb.mergeInmemoryParts() storagepacelimiter.Search.Dec() if err == nil { - atomic.AddUint64(&tb.assistedMerges, 1) + atomic.AddUint64(&tb.assistedInmemoryMerges, 1) continue } - if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) { + if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) { return } - logger.Panicf("FATAL: cannot merge small parts: %s", err) + logger.Panicf("FATAL: cannot assist with merging inmemory parts: %s", err) } } -func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper { - atomic.AddUint64(&tb.mergesCount, 1) - atomic.AddUint64(&tb.activeMerges, 1) - defer atomic.AddUint64(&tb.activeMerges, ^uint64(0)) +func getNotInMergePartsCount(pws []*partWrapper) int { + n := 0 + for _, pw := range pws { + if !pw.isInMerge { + n++ + } + } + return n +} +func getWaitGroup() *sync.WaitGroup { + v := wgPool.Get() + if v == nil { + return &sync.WaitGroup{} + } + return v.(*sync.WaitGroup) +} + +func putWaitGroup(wg *sync.WaitGroup) { + wgPool.Put(wg) +} + +var wgPool sync.Pool + +func (tb *Table) createInmemoryPart(ibs []*inmemoryBlock) *partWrapper { outItemsCount := uint64(0) for _, ib := range ibs { outItemsCount += uint64(ib.Len()) @@ -672,16 +823,14 @@ func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper { if len(bsrs) == 0 { return nil } + flushToDiskDeadline := time.Now().Add(dataFlushInterval) if len(bsrs) == 1 { // Nothing to merge. Just return a single inmemory part. + bsr := bsrs[0] mp := &inmemoryPart{} - mp.Init(&bsrs[0].Block) - p := mp.NewPart() - return &partWrapper{ - p: p, - mp: mp, - refCount: 1, - } + mp.Init(&bsr.Block) + putBlockStreamReader(bsr) + return newPartWrapperFromInmemoryPart(mp, flushToDiskDeadline) } // Prepare blockStreamWriter for destination part. @@ -693,7 +842,10 @@ func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper { // Merge parts. // The merge shouldn't be interrupted by stopCh, // since it may be final after stopCh is closed. - err := mergeBlockStreams(&mpDst.ph, bsw, bsrs, tb.prepareBlock, nil, &tb.itemsMerged) + atomic.AddUint64(&tb.activeInmemoryMerges, 1) + err := mergeBlockStreams(&mpDst.ph, bsw, bsrs, tb.prepareBlock, nil, &tb.inmemoryItemsMerged) + atomic.AddUint64(&tb.activeInmemoryMerges, ^uint64(0)) + atomic.AddUint64(&tb.inmemoryMergesCount, 1) if err != nil { logger.Panicf("FATAL: cannot merge inmemoryBlocks: %s", err) } @@ -701,33 +853,64 @@ func (tb *Table) mergeInmemoryBlocks(ibs []*inmemoryBlock) *partWrapper { for _, bsr := range bsrs { putBlockStreamReader(bsr) } + return newPartWrapperFromInmemoryPart(mpDst, flushToDiskDeadline) +} - p := mpDst.NewPart() +func newPartWrapperFromInmemoryPart(mp *inmemoryPart, flushToDiskDeadline time.Time) *partWrapper { + p := mp.NewPart() return &partWrapper{ - p: p, - mp: mpDst, - refCount: 1, + p: p, + mp: mp, + refCount: 1, + flushToDiskDeadline: flushToDiskDeadline, } } -func (tb *Table) startPartMergers() { - for i := 0; i < mergeWorkersCount; i++ { +func (tb *Table) startMergeWorkers() { + for i := 0; i < cap(mergeWorkersLimitCh); i++ { tb.wg.Add(1) go func() { - if err := tb.partMerger(); err != nil { - logger.Panicf("FATAL: unrecoverable error when merging parts in %q: %s", tb.path, err) - } + tb.mergeWorker() tb.wg.Done() }() } } +func getMaxInmemoryPartSize() uint64 { + // Allow up to 5% of memory for in-memory parts. + n := uint64(0.05 * float64(memory.Allowed()) / maxInmemoryParts) + if n < 1e6 { + n = 1e6 + } + return n +} + +func (tb *Table) getMaxFilePartSize() uint64 { + n := fs.MustGetFreeSpace(tb.path) + // Divide free space by the max number of concurrent merges. + maxOutBytes := n / uint64(cap(mergeWorkersLimitCh)) + if maxOutBytes > maxPartSize { + maxOutBytes = maxPartSize + } + return maxOutBytes +} + func (tb *Table) canBackgroundMerge() bool { return atomic.LoadUint32(tb.isReadOnly) == 0 } var errReadOnlyMode = fmt.Errorf("storage is in readonly mode") +func (tb *Table) mergeInmemoryParts() error { + maxOutBytes := tb.getMaxFilePartSize() + + tb.partsLock.Lock() + pws := getPartsToMerge(tb.inmemoryParts, maxOutBytes, false) + tb.partsLock.Unlock() + + return tb.mergeParts(pws, tb.stopCh, false) +} + func (tb *Table) mergeExistingParts(isFinal bool) error { if !tb.canBackgroundMerge() { // Do not perform background merge in read-only mode @@ -735,32 +918,32 @@ func (tb *Table) mergeExistingParts(isFinal bool) error { // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603 return errReadOnlyMode } - n := fs.MustGetFreeSpace(tb.path) - // Divide free space by the max number of concurrent merges. - maxOutBytes := n / uint64(mergeWorkersCount) - if maxOutBytes > maxPartSize { - maxOutBytes = maxPartSize - } + maxOutBytes := tb.getMaxFilePartSize() tb.partsLock.Lock() - pws := getPartsToMerge(tb.parts, maxOutBytes, isFinal) + dst := make([]*partWrapper, 0, len(tb.inmemoryParts)+len(tb.fileParts)) + dst = append(dst, tb.inmemoryParts...) + dst = append(dst, tb.fileParts...) + pws := getPartsToMerge(dst, maxOutBytes, isFinal) tb.partsLock.Unlock() - return tb.mergeParts(pws, tb.stopCh, false) + return tb.mergeParts(pws, tb.stopCh, isFinal) } const ( - minMergeSleepTime = time.Millisecond - maxMergeSleepTime = time.Second + minMergeSleepTime = 10 * time.Millisecond + maxMergeSleepTime = 10 * time.Second ) -func (tb *Table) partMerger() error { +func (tb *Table) mergeWorker() { sleepTime := minMergeSleepTime var lastMergeTime uint64 isFinal := false t := time.NewTimer(sleepTime) for { + mergeWorkersLimitCh <- struct{}{} err := tb.mergeExistingParts(isFinal) + <-mergeWorkersLimitCh if err == nil { // Try merging additional parts. sleepTime = minMergeSleepTime @@ -770,12 +953,13 @@ func (tb *Table) partMerger() error { } if errors.Is(err, errForciblyStopped) { // The merger has been stopped. - return nil + return } if !errors.Is(err, errNothingToMerge) && !errors.Is(err, errReadOnlyMode) { - return err + // Unexpected error. + logger.Panicf("FATAL: unrecoverable error when merging inmemory parts in %q: %s", tb.path, err) } - if fasttime.UnixTimestamp()-lastMergeTime > 30 { + if finalMergeDelaySeconds > 0 && fasttime.UnixTimestamp()-lastMergeTime > finalMergeDelaySeconds { // We have free time for merging into bigger parts. // This should improve select performance. lastMergeTime = fasttime.UnixTimestamp() @@ -790,13 +974,27 @@ func (tb *Table) partMerger() error { } select { case <-tb.stopCh: - return nil + return case <-t.C: t.Reset(sleepTime) } } } +// Disable final merge by default, since it may lead to high disk IO and CPU usage +// after some inactivity time. +var finalMergeDelaySeconds = uint64(0) + +// SetFinalMergeDelay sets the delay before doing final merge for Table without newly ingested data. +// +// This function may be called only before Table initialization. +func SetFinalMergeDelay(delay time.Duration) { + if delay <= 0 { + return + } + finalMergeDelaySeconds = uint64(delay.Seconds() + 1) +} + var errNothingToMerge = fmt.Errorf("nothing to merge") func (tb *Table) releasePartsToMerge(pws []*partWrapper) { @@ -810,150 +1008,315 @@ func (tb *Table) releasePartsToMerge(pws []*partWrapper) { tb.partsLock.Unlock() } -// mergeParts merges pws. +// mergeParts merges pws to a single resulting part. // // Merging is immediately stopped if stopCh is closed. // +// If isFinal is set, then the resulting part will be stored to disk. +// // All the parts inside pws must have isInMerge field set to true. -func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterParts bool) error { +func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isFinal bool) error { if len(pws) == 0 { // Nothing to merge. return errNothingToMerge } defer tb.releasePartsToMerge(pws) - atomic.AddUint64(&tb.mergesCount, 1) - atomic.AddUint64(&tb.activeMerges, 1) - defer atomic.AddUint64(&tb.activeMerges, ^uint64(0)) - startTime := time.Now() - // Prepare blockStreamReaders for source parts. - bsrs := make([]*blockStreamReader, 0, len(pws)) - defer func() { + // Initialize destination paths. + dstPartType := getDstPartType(pws, isFinal) + tmpPartPath, mergeIdx := tb.getDstPartPaths(dstPartType) + + if isFinal && len(pws) == 1 && pws[0].mp != nil { + // Fast path: flush a single in-memory part to disk. + mp := pws[0].mp + if tmpPartPath == "" { + logger.Panicf("BUG: tmpPartPath must be non-empty") + } + if err := mp.StoreToDisk(tmpPartPath); err != nil { + return fmt.Errorf("cannot store in-memory part to %q: %w", tmpPartPath, err) + } + pwNew, err := tb.openCreatedPart(&mp.ph, pws, nil, tmpPartPath, mergeIdx) + if err != nil { + return fmt.Errorf("cannot atomically register the created part: %w", err) + } + tb.swapSrcWithDstParts(pws, pwNew, dstPartType) + return nil + } + + // Prepare BlockStreamReaders for source parts. + bsrs, err := openBlockStreamReaders(pws) + if err != nil { + return err + } + closeBlockStreamReaders := func() { for _, bsr := range bsrs { putBlockStreamReader(bsr) } - }() + bsrs = nil + } + + // Prepare BlockStreamWriter for destination part. + srcSize := uint64(0) + srcItemsCount := uint64(0) + srcBlocksCount := uint64(0) + for _, pw := range pws { + srcSize += pw.p.size + srcItemsCount += pw.p.ph.itemsCount + srcBlocksCount += pw.p.ph.blocksCount + } + compressLevel := getCompressLevel(srcItemsCount) + bsw := getBlockStreamWriter() + var mpNew *inmemoryPart + if dstPartType == partInmemory { + mpNew = &inmemoryPart{} + bsw.InitFromInmemoryPart(mpNew, compressLevel) + } else { + if tmpPartPath == "" { + logger.Panicf("BUG: tmpPartPath must be non-empty") + } + nocache := srcItemsCount > maxItemsPerCachedPart() + if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil { + closeBlockStreamReaders() + return fmt.Errorf("cannot create destination part at %q: %w", tmpPartPath, err) + } + } + + // Merge source parts to destination part. + ph, err := tb.mergePartsInternal(tmpPartPath, bsw, bsrs, dstPartType, stopCh) + putBlockStreamWriter(bsw) + closeBlockStreamReaders() + if err != nil { + return fmt.Errorf("cannot merge %d parts: %w", len(pws), err) + } + if mpNew != nil { + // Update partHeader for destination inmemory part after the merge. + mpNew.ph = *ph + } + + // Atomically move the created part from tmpPartPath to its destination + // and swap the source parts with the newly created part. + pwNew, err := tb.openCreatedPart(ph, pws, mpNew, tmpPartPath, mergeIdx) + if err != nil { + return fmt.Errorf("cannot atomically register the created part: %w", err) + } + tb.swapSrcWithDstParts(pws, pwNew, dstPartType) + + d := time.Since(startTime) + if d <= 30*time.Second { + return nil + } + + // Log stats for long merges. + dstItemsCount := uint64(0) + dstBlocksCount := uint64(0) + dstSize := uint64(0) + dstPartPath := "" + if pwNew != nil { + pDst := pwNew.p + dstItemsCount = pDst.ph.itemsCount + dstBlocksCount = pDst.ph.blocksCount + dstSize = pDst.size + dstPartPath = pDst.path + } + durationSecs := d.Seconds() + itemsPerSec := int(float64(srcItemsCount) / durationSecs) + logger.Infof("merged (%d parts, %d items, %d blocks, %d bytes) into (1 part, %d items, %d blocks, %d bytes) in %.3f seconds at %d items/sec to %q", + len(pws), srcItemsCount, srcBlocksCount, srcSize, dstItemsCount, dstBlocksCount, dstSize, durationSecs, itemsPerSec, dstPartPath) + + return nil +} + +func getFlushToDiskDeadline(pws []*partWrapper) time.Time { + d := pws[0].flushToDiskDeadline + for _, pw := range pws[1:] { + if pw.flushToDiskDeadline.Before(d) { + d = pw.flushToDiskDeadline + } + } + return d +} + +type partType int + +var ( + partInmemory = partType(0) + partFile = partType(1) +) + +func getDstPartType(pws []*partWrapper, isFinal bool) partType { + dstPartSize := getPartsSize(pws) + if isFinal || dstPartSize > getMaxInmemoryPartSize() { + return partFile + } + if !areAllInmemoryParts(pws) { + // If at least a single source part is located in file, + // then the destination part must be in file for durability reasons. + return partFile + } + return partInmemory +} + +func (tb *Table) getDstPartPaths(dstPartType partType) (string, uint64) { + tmpPartPath := "" + mergeIdx := tb.nextMergeIdx() + switch dstPartType { + case partInmemory: + case partFile: + tmpPartPath = fmt.Sprintf("%s/tmp/%016X", tb.path, mergeIdx) + default: + logger.Panicf("BUG: unknown partType=%d", dstPartType) + } + return tmpPartPath, mergeIdx +} + +func openBlockStreamReaders(pws []*partWrapper) ([]*blockStreamReader, error) { + bsrs := make([]*blockStreamReader, 0, len(pws)) for _, pw := range pws { bsr := getBlockStreamReader() if pw.mp != nil { - if !isOuterParts { - logger.Panicf("BUG: inmemory part must be always outer") - } bsr.InitFromInmemoryPart(pw.mp) } else { if err := bsr.InitFromFilePart(pw.p.path); err != nil { - return fmt.Errorf("cannot open source part for merging: %w", err) + for _, bsr := range bsrs { + putBlockStreamReader(bsr) + } + return nil, fmt.Errorf("cannot open source part for merging: %w", err) } } bsrs = append(bsrs, bsr) } + return bsrs, nil +} - outItemsCount := uint64(0) - outBlocksCount := uint64(0) - for _, pw := range pws { - outItemsCount += pw.p.ph.itemsCount - outBlocksCount += pw.p.ph.blocksCount - } - nocache := true - if outItemsCount < maxItemsPerCachedPart() { - // Cache small (i.e. recent) output parts in OS file cache, - // since there is high chance they will be read soon. - nocache = false - } - - // Prepare blockStreamWriter for destination part. - mergeIdx := tb.nextMergeIdx() - tmpPartPath := fmt.Sprintf("%s/tmp/%016X", tb.path, mergeIdx) - bsw := getBlockStreamWriter() - compressLevel := getCompressLevel(outItemsCount) - if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil { - return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err) - } - - // Merge parts into a temporary location. +func (tb *Table) mergePartsInternal(tmpPartPath string, bsw *blockStreamWriter, bsrs []*blockStreamReader, dstPartType partType, stopCh <-chan struct{}) (*partHeader, error) { var ph partHeader - err := mergeBlockStreams(&ph, bsw, bsrs, tb.prepareBlock, stopCh, &tb.itemsMerged) - putBlockStreamWriter(bsw) + var itemsMerged *uint64 + var mergesCount *uint64 + var activeMerges *uint64 + switch dstPartType { + case partInmemory: + itemsMerged = &tb.inmemoryItemsMerged + mergesCount = &tb.inmemoryMergesCount + activeMerges = &tb.activeInmemoryMerges + case partFile: + itemsMerged = &tb.fileItemsMerged + mergesCount = &tb.fileMergesCount + activeMerges = &tb.activeFileMerges + default: + logger.Panicf("BUG: unknown partType=%d", dstPartType) + } + atomic.AddUint64(activeMerges, 1) + err := mergeBlockStreams(&ph, bsw, bsrs, tb.prepareBlock, stopCh, itemsMerged) + atomic.AddUint64(activeMerges, ^uint64(0)) + atomic.AddUint64(mergesCount, 1) if err != nil { - return fmt.Errorf("error when merging parts to %q: %w", tmpPartPath, err) + return nil, fmt.Errorf("cannot merge parts to %q: %w", tmpPartPath, err) } - if err := ph.WriteMetadata(tmpPartPath); err != nil { - return fmt.Errorf("cannot write metadata to destination part %q: %w", tmpPartPath, err) - } - - // Close bsrs (aka source parts). - for _, bsr := range bsrs { - putBlockStreamReader(bsr) - } - bsrs = nil - - // Create a transaction for atomic deleting old parts and moving - // new part to its destination place. - var bb bytesutil.ByteBuffer - for _, pw := range pws { - if pw.mp == nil { - fmt.Fprintf(&bb, "%s\n", pw.p.path) + if tmpPartPath != "" { + if err := ph.WriteMetadata(tmpPartPath); err != nil { + return nil, fmt.Errorf("cannot write metadata to destination part %q: %w", tmpPartPath, err) } } - dstPartPath := ph.Path(tb.path, mergeIdx) - fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath) - txnPath := fmt.Sprintf("%s/txn/%016X", tb.path, mergeIdx) - if err := fs.WriteFileAtomically(txnPath, bb.B, false); err != nil { - return fmt.Errorf("cannot create transaction file %q: %w", txnPath, err) - } + return &ph, nil +} - // Run the created transaction. - if err := runTransaction(&tb.snapshotLock, tb.path, txnPath); err != nil { - return fmt.Errorf("cannot execute transaction %q: %w", txnPath, err) - } +func (tb *Table) openCreatedPart(ph *partHeader, pws []*partWrapper, mpNew *inmemoryPart, tmpPartPath string, mergeIdx uint64) (*partWrapper, error) { + dstPartPath := "" + if mpNew == nil || !areAllInmemoryParts(pws) { + // Either source or destination parts are located on disk. + // Create a transaction for atomic deleting of old parts and moving new part to its destination on disk. + var bb bytesutil.ByteBuffer + for _, pw := range pws { + if pw.mp == nil { + fmt.Fprintf(&bb, "%s\n", pw.p.path) + } + } + dstPartPath = ph.Path(tb.path, mergeIdx) + fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath) + txnPath := fmt.Sprintf("%s/txn/%016X", tb.path, mergeIdx) + if err := fs.WriteFileAtomically(txnPath, bb.B, false); err != nil { + return nil, fmt.Errorf("cannot create transaction file %q: %w", txnPath, err) + } - // Open the merged part. - newP, err := openFilePart(dstPartPath) + // Run the created transaction. + if err := runTransaction(&tb.snapshotLock, tb.path, txnPath); err != nil { + return nil, fmt.Errorf("cannot execute transaction %q: %w", txnPath, err) + } + } + // Open the created part. + if mpNew != nil { + // Open the created part from memory. + flushToDiskDeadline := getFlushToDiskDeadline(pws) + pwNew := newPartWrapperFromInmemoryPart(mpNew, flushToDiskDeadline) + return pwNew, nil + } + // Open the created part from disk. + pNew, err := openFilePart(dstPartPath) if err != nil { - return fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err) + return nil, fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err) } - newPSize := newP.size - newPW := &partWrapper{ - p: newP, + pwNew := &partWrapper{ + p: pNew, refCount: 1, } + return pwNew, nil +} - // Atomically remove old parts and add new part. +func areAllInmemoryParts(pws []*partWrapper) bool { + for _, pw := range pws { + if pw.mp == nil { + return false + } + } + return true +} + +func (tb *Table) swapSrcWithDstParts(pws []*partWrapper, pwNew *partWrapper, dstPartType partType) { + // Atomically unregister old parts and add new part to tb. m := make(map[*partWrapper]bool, len(pws)) for _, pw := range pws { m[pw] = true } if len(m) != len(pws) { - logger.Panicf("BUG: %d duplicate parts found in the merge of %d parts", len(pws)-len(m), len(pws)) + logger.Panicf("BUG: %d duplicate parts found when merging %d parts", len(pws)-len(m), len(pws)) } - removedParts := 0 + removedInmemoryParts := 0 + removedFileParts := 0 + tb.partsLock.Lock() - tb.parts, removedParts = removeParts(tb.parts, m) - tb.parts = append(tb.parts, newPW) + tb.inmemoryParts, removedInmemoryParts = removeParts(tb.inmemoryParts, m) + tb.fileParts, removedFileParts = removeParts(tb.fileParts, m) + if pwNew != nil { + switch dstPartType { + case partInmemory: + tb.inmemoryParts = append(tb.inmemoryParts, pwNew) + case partFile: + tb.fileParts = append(tb.fileParts, pwNew) + default: + logger.Panicf("BUG: unknown partType=%d", dstPartType) + } + } tb.partsLock.Unlock() + + removedParts := removedInmemoryParts + removedFileParts if removedParts != len(m) { - if !isOuterParts { - logger.Panicf("BUG: unexpected number of parts removed; got %d; want %d", removedParts, len(m)) - } - if removedParts != 0 { - logger.Panicf("BUG: removed non-zero outer parts: %d", removedParts) - } + logger.Panicf("BUG: unexpected number of parts removed; got %d, want %d", removedParts, len(m)) } - // Remove partition references from old parts. + // Remove references from old parts. for _, pw := range pws { pw.decRef() } +} - d := time.Since(startTime) - if d > 30*time.Second { - logger.Infof("merged %d items across %d blocks in %.3f seconds at %d items/sec to %q; sizeBytes: %d", - outItemsCount, outBlocksCount, d.Seconds(), int(float64(outItemsCount)/d.Seconds()), dstPartPath, newPSize) +func getPartsSize(pws []*partWrapper) uint64 { + n := uint64(0) + for _, pw := range pws { + n += pw.p.size } - - return nil + return n } func getCompressLevel(itemsCount uint64) int { @@ -990,7 +1353,7 @@ func (tb *Table) nextMergeIdx() uint64 { return atomic.AddUint64(&tb.mergeIdx, 1) } -var mergeWorkersCount = cgroup.AvailableCPUs() +var mergeWorkersLimitCh = make(chan struct{}, cgroup.AvailableCPUs()) func openParts(path string) ([]*partWrapper, error) { // The path can be missing after restoring from backup, so create it if needed. @@ -1095,7 +1458,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error { } // Flush inmemory items to disk. - tb.flushPendingItems(true) + tb.flushInmemoryItems() // The snapshot must be created under the lock in order to prevent from // concurrent modifications via runTransaction. @@ -1154,7 +1517,7 @@ func runTransactions(txnLock *sync.RWMutex, path string) error { if os.IsNotExist(err) { return nil } - return fmt.Errorf("cannot open %q: %w", txnDir, err) + return fmt.Errorf("cannot open transaction dir: %w", err) } defer fs.MustClose(d) @@ -1334,8 +1697,7 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte } src = tmp - // Sort src parts by size. - sort.Slice(src, func(i, j int) bool { return src[i].p.size < src[j].p.size }) + sortPartsForOptimalMerge(src) maxSrcParts := maxPartsToMerge if maxSrcParts > len(src) { @@ -1386,17 +1748,24 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte return append(dst, pws...) } +func sortPartsForOptimalMerge(pws []*partWrapper) { + // Sort src parts by size. + sort.Slice(pws, func(i, j int) bool { + return pws[i].p.size < pws[j].p.size + }) +} + func removeParts(pws []*partWrapper, partsToRemove map[*partWrapper]bool) ([]*partWrapper, int) { - removedParts := 0 dst := pws[:0] for _, pw := range pws { if !partsToRemove[pw] { dst = append(dst, pw) - continue } - removedParts++ } - return dst, removedParts + for i := len(dst); i < len(pws); i++ { + pws[i] = nil + } + return dst, len(pws) - len(dst) } func isSpecialDir(name string) bool { diff --git a/lib/mergeset/table_test.go b/lib/mergeset/table_test.go index 6a79685378..eff20bcb5f 100644 --- a/lib/mergeset/table_test.go +++ b/lib/mergeset/table_test.go @@ -90,8 +90,8 @@ func TestTableAddItemsSerial(t *testing.T) { var m TableMetrics tb.UpdateMetrics(&m) - if m.ItemsCount != itemsCount { - t.Fatalf("unexpected itemsCount; got %d; want %v", m.ItemsCount, itemsCount) + if n := m.TotalItemsCount(); n != itemsCount { + t.Fatalf("unexpected itemsCount; got %d; want %v", n, itemsCount) } tb.MustClose() @@ -235,8 +235,8 @@ func TestTableAddItemsConcurrent(t *testing.T) { var m TableMetrics tb.UpdateMetrics(&m) - if m.ItemsCount != itemsCount { - t.Fatalf("unexpected itemsCount; got %d; want %v", m.ItemsCount, itemsCount) + if n := m.TotalItemsCount(); n != itemsCount { + t.Fatalf("unexpected itemsCount; got %d; want %v", n, itemsCount) } tb.MustClose() @@ -292,8 +292,8 @@ func testReopenTable(t *testing.T, path string, itemsCount int) { } var m TableMetrics tb.UpdateMetrics(&m) - if m.ItemsCount != uint64(itemsCount) { - t.Fatalf("unexpected itemsCount after re-opening; got %d; want %v", m.ItemsCount, itemsCount) + if n := m.TotalItemsCount(); n != uint64(itemsCount) { + t.Fatalf("unexpected itemsCount after re-opening; got %d; want %v", n, itemsCount) } tb.MustClose() } diff --git a/lib/storage/index_db_test.go b/lib/storage/index_db_test.go index 5268a24fb5..4bf20312b2 100644 --- a/lib/storage/index_db_test.go +++ b/lib/storage/index_db_test.go @@ -1480,8 +1480,8 @@ func TestIndexDBRepopulateAfterRotation(t *testing.T) { // verify the storage contains rows. var m Metrics s.UpdateMetrics(&m) - if m.TableMetrics.SmallRowsCount < uint64(metricRowsN) { - t.Fatalf("expecting at least %d rows in the table; got %d", metricRowsN, m.TableMetrics.SmallRowsCount) + if rowsCount := m.TableMetrics.TotalRowsCount(); rowsCount < uint64(metricRowsN) { + t.Fatalf("expecting at least %d rows in the table; got %d", metricRowsN, rowsCount) } // check new series were registered in indexDB diff --git a/lib/storage/inmemory_part.go b/lib/storage/inmemory_part.go index b0681c9849..70f05c15fb 100644 --- a/lib/storage/inmemory_part.go +++ b/lib/storage/inmemory_part.go @@ -1,9 +1,13 @@ package storage import ( + "fmt" + "path/filepath" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" ) @@ -31,6 +35,36 @@ func (mp *inmemoryPart) Reset() { mp.creationTime = 0 } +// StoreToDisk stores the mp to the given path on disk. +func (mp *inmemoryPart) StoreToDisk(path string) error { + if err := fs.MkdirAllIfNotExist(path); err != nil { + return fmt.Errorf("cannot create directory %q: %w", path, err) + } + timestampsPath := path + "/timestamps.bin" + if err := fs.WriteFileAndSync(timestampsPath, mp.timestampsData.B); err != nil { + return fmt.Errorf("cannot store timestamps: %w", err) + } + valuesPath := path + "/values.bin" + if err := fs.WriteFileAndSync(valuesPath, mp.valuesData.B); err != nil { + return fmt.Errorf("cannot store values: %w", err) + } + indexPath := path + "/index.bin" + if err := fs.WriteFileAndSync(indexPath, mp.indexData.B); err != nil { + return fmt.Errorf("cannot store index: %w", err) + } + metaindexPath := path + "/metaindex.bin" + if err := fs.WriteFileAndSync(metaindexPath, mp.metaindexData.B); err != nil { + return fmt.Errorf("cannot store metaindex: %w", err) + } + if err := mp.ph.writeMinDedupInterval(path); err != nil { + return fmt.Errorf("cannot store min dedup interval: %w", err) + } + // Sync parent directory in order to make sure the written files remain visible after hardware reset + parentDirPath := filepath.Dir(path) + fs.MustSyncPath(parentDirPath) + return nil +} + // InitFromRows initializes mp from the given rows. func (mp *inmemoryPart) InitFromRows(rows []rawRow) { if len(rows) == 0 { diff --git a/lib/storage/partition.go b/lib/storage/partition.go index 17e8aa8baa..714a50f8b2 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -19,33 +19,19 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/memory" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storagepacelimiter" "github.com/VictoriaMetrics/VictoriaMetrics/lib/syncwg" ) -func maxSmallPartSize() uint64 { - // Small parts are cached in the OS page cache, - // so limit their size by the remaining free RAM. - mem := memory.Remaining() - // It is expected no more than defaultPartsToMerge/2 parts exist - // in the OS page cache before they are merged into bigger part. - // Half of the remaining RAM must be left for lib/mergeset parts, - // so the maxItems is calculated using the below code: - maxSize := uint64(mem) / defaultPartsToMerge - if maxSize < 10e6 { - maxSize = 10e6 - } - return maxSize -} - // The maximum size of big part. // // This number limits the maximum time required for building big part. // This time shouldn't exceed a few days. const maxBigPartSize = 1e12 -// The maximum number of small parts in the partition. -const maxSmallPartsPerPartition = 256 +// The maximum number of inmemory parts in the partition. +const maxInmemoryPartsPerPartition = 32 // Default number of parts to merge at once. // @@ -65,6 +51,25 @@ const finalPartsToMerge = 3 // Higher number of shards reduces CPU contention and increases the max bandwidth on multi-core systems. var rawRowsShardsPerPartition = (cgroup.AvailableCPUs() + 1) / 2 +// The interval for flushing bufferred rows into parts, so they become visible to search. +const pendingRowsFlushInterval = time.Second + +// The interval for guaranteed flush of recently ingested data from memory to on-disk parts, +// so they survive process crash. +var dataFlushInterval = 5 * time.Second + +// SetDataFlushInterval sets the interval for guaranteed flush of recently ingested data from memory to disk. +// +// The data can be flushed from memory to disk more frequently if it doesn't fit the memory limit. +// +// This function must be called before initializing the storage. +func SetDataFlushInterval(d time.Duration) { + if d > pendingRowsFlushInterval { + dataFlushInterval = d + mergeset.SetDataFlushInterval(d) + } +} + // getMaxRawRowsPerShard returns the maximum number of rows that haven't been converted into parts yet. func getMaxRawRowsPerShard() int { maxRawRowsPerPartitionOnce.Do(func() { @@ -85,32 +90,30 @@ var ( maxRawRowsPerPartitionOnce sync.Once ) -// The interval for flushing (converting) recent raw rows into parts, -// so they become visible to search. -const rawRowsFlushInterval = time.Second - -// The interval for flushing inmemory parts to persistent storage, -// so they survive process crash. -const inmemoryPartsFlushInterval = 5 * time.Second - // partition represents a partition. type partition struct { // Put atomic counters to the top of struct, so they are aligned to 8 bytes on 32-bit arch. // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212 - activeBigMerges uint64 - activeSmallMerges uint64 - bigMergesCount uint64 - smallMergesCount uint64 - bigRowsMerged uint64 - smallRowsMerged uint64 - bigRowsDeleted uint64 - smallRowsDeleted uint64 + activeInmemoryMerges uint64 + activeSmallMerges uint64 + activeBigMerges uint64 - smallAssistedMerges uint64 + inmemoryMergesCount uint64 + smallMergesCount uint64 + bigMergesCount uint64 - smallMergeNeedFreeDiskSpace uint64 - bigMergeNeedFreeDiskSpace uint64 + inmemoryRowsMerged uint64 + smallRowsMerged uint64 + bigRowsMerged uint64 + + inmemoryRowsDeleted uint64 + smallRowsDeleted uint64 + bigRowsDeleted uint64 + + inmemoryAssistedMerges uint64 + + mergeNeedFreeDiskSpace uint64 mergeIdx uint64 @@ -126,21 +129,24 @@ type partition struct { // The time range for the partition. Usually this is a whole month. tr TimeRange - // partsLock protects smallParts and bigParts. + // rawRows contains recently added rows that haven't been converted into parts yet. + // rawRows are periodically converted into inmemroyParts. + // rawRows aren't used in search for performance reasons. + rawRows rawRowsShards + + // partsLock protects inmemoryParts, smallParts and bigParts. partsLock sync.Mutex - // Contains all the inmemoryPart plus file-based parts - // with small number of items (up to maxRowsCountPerSmallPart). + // Contains inmemory parts with recently ingested data. + // It must be merged into either smallParts or bigParts to become visible to search. + inmemoryParts []*partWrapper + + // Contains file-based parts with small number of items. smallParts []*partWrapper // Contains file-based parts with big number of items. bigParts []*partWrapper - // rawRows contains recently added rows that haven't been converted into parts yet. - // - // rawRows aren't used in search for performance reasons. - rawRows rawRowsShards - snapshotLock sync.RWMutex stopCh chan struct{} @@ -164,6 +170,9 @@ type partWrapper struct { // Whether the part is in merge now. isInMerge bool + + // The deadline when in-memory part must be flushed to disk. + flushToDiskDeadline time.Time } func (pw *partWrapper) incRef() { @@ -213,8 +222,8 @@ func createPartition(timestamp int64, smallPartitionsPath, bigPartitionsPath str func (pt *partition) startBackgroundWorkers() { pt.startMergeWorkers() - pt.startRawRowsFlusher() pt.startInmemoryPartsFlusher() + pt.startPendingRowsFlusher() pt.startStalePartsRemover() } @@ -292,67 +301,83 @@ type partitionMetrics struct { IndexBlocksCacheRequests uint64 IndexBlocksCacheMisses uint64 - BigSizeBytes uint64 - SmallSizeBytes uint64 + InmemorySizeBytes uint64 + SmallSizeBytes uint64 + BigSizeBytes uint64 - BigRowsCount uint64 - SmallRowsCount uint64 + InmemoryRowsCount uint64 + SmallRowsCount uint64 + BigRowsCount uint64 - BigBlocksCount uint64 - SmallBlocksCount uint64 + InmemoryBlocksCount uint64 + SmallBlocksCount uint64 + BigBlocksCount uint64 - BigPartsCount uint64 - SmallPartsCount uint64 + InmemoryPartsCount uint64 + SmallPartsCount uint64 + BigPartsCount uint64 - ActiveBigMerges uint64 - ActiveSmallMerges uint64 + ActiveInmemoryMerges uint64 + ActiveSmallMerges uint64 + ActiveBigMerges uint64 - BigMergesCount uint64 - SmallMergesCount uint64 + InmemoryMergesCount uint64 + SmallMergesCount uint64 + BigMergesCount uint64 - BigRowsMerged uint64 - SmallRowsMerged uint64 + InmemoryRowsMerged uint64 + SmallRowsMerged uint64 + BigRowsMerged uint64 - BigRowsDeleted uint64 - SmallRowsDeleted uint64 + InmemoryRowsDeleted uint64 + SmallRowsDeleted uint64 + BigRowsDeleted uint64 - BigPartsRefCount uint64 - SmallPartsRefCount uint64 + InmemoryPartsRefCount uint64 + SmallPartsRefCount uint64 + BigPartsRefCount uint64 - SmallAssistedMerges uint64 + InmemoryAssistedMerges uint64 - SmallMergeNeedFreeDiskSpace uint64 - BigMergeNeedFreeDiskSpace uint64 + MergeNeedFreeDiskSpace uint64 +} + +// TotalRowsCount returns total number of rows in tm. +func (pm *partitionMetrics) TotalRowsCount() uint64 { + return pm.PendingRows + pm.InmemoryRowsCount + pm.SmallRowsCount + pm.BigRowsCount } // UpdateMetrics updates m with metrics from pt. func (pt *partition) UpdateMetrics(m *partitionMetrics) { - rawRowsLen := uint64(pt.rawRows.Len()) - m.PendingRows += rawRowsLen - m.SmallRowsCount += rawRowsLen + m.PendingRows += uint64(pt.rawRows.Len()) pt.partsLock.Lock() + for _, pw := range pt.inmemoryParts { + p := pw.p + m.InmemoryRowsCount += p.ph.RowsCount + m.InmemoryBlocksCount += p.ph.BlocksCount + m.InmemorySizeBytes += p.size + m.InmemoryPartsRefCount += atomic.LoadUint64(&pw.refCount) + } + for _, pw := range pt.smallParts { + p := pw.p + m.SmallRowsCount += p.ph.RowsCount + m.SmallBlocksCount += p.ph.BlocksCount + m.SmallSizeBytes += p.size + m.SmallPartsRefCount += atomic.LoadUint64(&pw.refCount) + } for _, pw := range pt.bigParts { p := pw.p - m.BigRowsCount += p.ph.RowsCount m.BigBlocksCount += p.ph.BlocksCount m.BigSizeBytes += p.size m.BigPartsRefCount += atomic.LoadUint64(&pw.refCount) } - for _, pw := range pt.smallParts { - p := pw.p - - m.SmallRowsCount += p.ph.RowsCount - m.SmallBlocksCount += p.ph.BlocksCount - m.SmallSizeBytes += p.size - m.SmallPartsRefCount += atomic.LoadUint64(&pw.refCount) - } - - m.BigPartsCount += uint64(len(pt.bigParts)) + m.InmemoryPartsCount += uint64(len(pt.inmemoryParts)) m.SmallPartsCount += uint64(len(pt.smallParts)) + m.BigPartsCount += uint64(len(pt.bigParts)) pt.partsLock.Unlock() @@ -362,22 +387,25 @@ func (pt *partition) UpdateMetrics(m *partitionMetrics) { m.IndexBlocksCacheRequests = ibCache.Requests() m.IndexBlocksCacheMisses = ibCache.Misses() - m.ActiveBigMerges += atomic.LoadUint64(&pt.activeBigMerges) + m.ActiveInmemoryMerges += atomic.LoadUint64(&pt.activeInmemoryMerges) m.ActiveSmallMerges += atomic.LoadUint64(&pt.activeSmallMerges) + m.ActiveBigMerges += atomic.LoadUint64(&pt.activeBigMerges) - m.BigMergesCount += atomic.LoadUint64(&pt.bigMergesCount) + m.InmemoryMergesCount += atomic.LoadUint64(&pt.inmemoryMergesCount) m.SmallMergesCount += atomic.LoadUint64(&pt.smallMergesCount) + m.BigMergesCount += atomic.LoadUint64(&pt.bigMergesCount) - m.BigRowsMerged += atomic.LoadUint64(&pt.bigRowsMerged) + m.InmemoryRowsMerged += atomic.LoadUint64(&pt.inmemoryRowsMerged) m.SmallRowsMerged += atomic.LoadUint64(&pt.smallRowsMerged) + m.BigRowsMerged += atomic.LoadUint64(&pt.bigRowsMerged) - m.BigRowsDeleted += atomic.LoadUint64(&pt.bigRowsDeleted) + m.InmemoryRowsDeleted += atomic.LoadUint64(&pt.inmemoryRowsDeleted) m.SmallRowsDeleted += atomic.LoadUint64(&pt.smallRowsDeleted) + m.BigRowsDeleted += atomic.LoadUint64(&pt.bigRowsDeleted) - m.SmallAssistedMerges += atomic.LoadUint64(&pt.smallAssistedMerges) + m.InmemoryAssistedMerges += atomic.LoadUint64(&pt.inmemoryAssistedMerges) - m.SmallMergeNeedFreeDiskSpace += atomic.LoadUint64(&pt.smallMergeNeedFreeDiskSpace) - m.BigMergeNeedFreeDiskSpace += atomic.LoadUint64(&pt.bigMergeNeedFreeDiskSpace) + m.MergeNeedFreeDiskSpace += atomic.LoadUint64(&pt.mergeNeedFreeDiskSpace) } // AddRows adds the given rows to the partition pt. @@ -415,11 +443,13 @@ func (rrss *rawRowsShards) init() { } func (rrss *rawRowsShards) addRows(pt *partition, rows []rawRow) { - n := atomic.AddUint32(&rrss.shardIdx, 1) shards := rrss.shards - idx := n % uint32(len(shards)) - shard := &shards[idx] - shard.addRows(pt, rows) + shardsLen := uint32(len(shards)) + for len(rows) > 0 { + n := atomic.AddUint32(&rrss.shardIdx, 1) + idx := n % shardsLen + rows = shards[idx].addRows(pt, rows) + } } func (rrss *rawRowsShards) Len() int { @@ -453,8 +483,8 @@ func (rrs *rawRowsShard) Len() int { return n } -func (rrs *rawRowsShard) addRows(pt *partition, rows []rawRow) { - var rowsToFlush []rawRow +func (rrs *rawRowsShard) addRows(pt *partition, rows []rawRow) []rawRow { + var rrb *rawRowsBlock rrs.mu.Lock() if cap(rrs.rows) == 0 { @@ -464,23 +494,25 @@ func (rrs *rawRowsShard) addRows(pt *partition, rows []rawRow) { rrs.rows = rrs.rows[:len(rrs.rows)+n] rows = rows[n:] if len(rows) > 0 { - // Slow path - rows did't fit rrs.rows capacity. - // Convert rrs.rows to rowsToFlush and convert it to a part, - // then try moving the remaining rows to rrs.rows. - rowsToFlush = rrs.rows - rrs.rows = newRawRowsBlock() - if len(rows) <= n { - rrs.rows = append(rrs.rows[:0], rows...) - } else { - // The slowest path - rows do not fit rrs.rows capacity. - // So append them directly to rowsToFlush. - rowsToFlush = append(rowsToFlush, rows...) - } + rrb = getRawRowsBlock() + rrb.rows, rrs.rows = rrs.rows, rrb.rows + n = copy(rrs.rows[:cap(rrs.rows)], rows) + rrs.rows = rrs.rows[:n] + rows = rows[n:] atomic.StoreUint64(&rrs.lastFlushTime, fasttime.UnixTimestamp()) } rrs.mu.Unlock() - pt.flushRowsToParts(rowsToFlush) + if rrb != nil { + pt.flushRowsToParts(rrb.rows) + putRawRowsBlock(rrb) + } + + return rows +} + +type rawRowsBlock struct { + rows []rawRow } func newRawRowsBlock() []rawRow { @@ -488,8 +520,30 @@ func newRawRowsBlock() []rawRow { return make([]rawRow, 0, n) } +func getRawRowsBlock() *rawRowsBlock { + v := rawRowsBlockPool.Get() + if v == nil { + return &rawRowsBlock{ + rows: newRawRowsBlock(), + } + } + return v.(*rawRowsBlock) +} + +func putRawRowsBlock(rrb *rawRowsBlock) { + rrb.rows = rrb.rows[:0] + rawRowsBlockPool.Put(rrb) +} + +var rawRowsBlockPool sync.Pool + func (pt *partition) flushRowsToParts(rows []rawRow) { + if len(rows) == 0 { + return + } maxRows := getMaxRawRowsPerShard() + var pwsLock sync.Mutex + pws := make([]*partWrapper, 0, (len(rows)+maxRows-1)/maxRows) wg := getWaitGroup() for len(rows) > 0 { n := maxRows @@ -497,14 +551,73 @@ func (pt *partition) flushRowsToParts(rows []rawRow) { n = len(rows) } wg.Add(1) - go func(rowsPart []rawRow) { - defer wg.Done() - pt.addRowsPart(rowsPart) + flushConcurrencyCh <- struct{}{} + go func(rowsChunk []rawRow) { + defer func() { + <-flushConcurrencyCh + wg.Done() + }() + pw := pt.createInmemoryPart(rowsChunk) + if pw == nil { + return + } + pwsLock.Lock() + pws = append(pws, pw) + pwsLock.Unlock() }(rows[:n]) rows = rows[n:] } wg.Wait() putWaitGroup(wg) + + pt.partsLock.Lock() + pt.inmemoryParts = append(pt.inmemoryParts, pws...) + pt.partsLock.Unlock() + + flushConcurrencyCh <- struct{}{} + pt.assistedMergeForInmemoryParts() + <-flushConcurrencyCh + // There is no need in assisted merges for small and big parts, + // since the bottleneck is possible only at inmemory parts. +} + +var flushConcurrencyCh = make(chan struct{}, cgroup.AvailableCPUs()) + +func (pt *partition) assistedMergeForInmemoryParts() { + for { + pt.partsLock.Lock() + ok := getNotInMergePartsCount(pt.inmemoryParts) < maxInmemoryPartsPerPartition + pt.partsLock.Unlock() + if ok { + return + } + + // There are too many unmerged inmemory parts. + // This usually means that the app cannot keep up with the data ingestion rate. + // Assist with mering inmemory parts. + // Prioritize assisted merges over searches. + storagepacelimiter.Search.Inc() + err := pt.mergeInmemoryParts() + storagepacelimiter.Search.Dec() + if err == nil { + atomic.AddUint64(&pt.inmemoryAssistedMerges, 1) + continue + } + if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) { + return + } + logger.Panicf("FATAL: cannot merge inmemory parts: %s", err) + } +} + +func getNotInMergePartsCount(pws []*partWrapper) int { + n := 0 + for _, pw := range pws { + if !pw.isInMerge { + n++ + } + } + return n } func getWaitGroup() *sync.WaitGroup { @@ -521,11 +634,10 @@ func putWaitGroup(wg *sync.WaitGroup) { var wgPool sync.Pool -func (pt *partition) addRowsPart(rows []rawRow) { +func (pt *partition) createInmemoryPart(rows []rawRow) *partWrapper { if len(rows) == 0 { - return + return nil } - mp := getInmemoryPart() mp.InitFromRows(rows) @@ -542,40 +654,22 @@ func (pt *partition) addRowsPart(rows []rawRow) { logger.Panicf("BUG: the part %q cannot be added to partition %q because of too big MaxTimestamp; got %d; want at least %d", &mp.ph, pt.smallPartsPath, mp.ph.MaxTimestamp, pt.tr.MaxTimestamp) } + flushToDiskDeadline := time.Now().Add(dataFlushInterval) + return newPartWrapperFromInmemoryPart(mp, flushToDiskDeadline) +} +func newPartWrapperFromInmemoryPart(mp *inmemoryPart, flushToDiskDeadline time.Time) *partWrapper { p, err := mp.NewPart() if err != nil { logger.Panicf("BUG: cannot create part from %q: %s", &mp.ph, err) } - pw := &partWrapper{ - p: p, - mp: mp, - refCount: 1, + p: p, + mp: mp, + refCount: 1, + flushToDiskDeadline: flushToDiskDeadline, } - - pt.partsLock.Lock() - pt.smallParts = append(pt.smallParts, pw) - ok := len(pt.smallParts) <= maxSmallPartsPerPartition - pt.partsLock.Unlock() - if ok { - return - } - - // The added part exceeds available limit. Help merging parts. - // - // Prioritize assisted merges over searches. - storagepacelimiter.Search.Inc() - err = pt.mergeSmallParts(false) - storagepacelimiter.Search.Dec() - if err == nil { - atomic.AddUint64(&pt.smallAssistedMerges, 1) - return - } - if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) { - return - } - logger.Panicf("FATAL: cannot merge small parts: %s", err) + return pw } // HasTimestamp returns true if the pt contains the given timestamp. @@ -588,6 +682,10 @@ func (pt *partition) HasTimestamp(timestamp int64) bool { // The appended parts must be released with PutParts. func (pt *partition) GetParts(dst []*partWrapper) []*partWrapper { pt.partsLock.Lock() + for _, pw := range pt.inmemoryParts { + pw.incRef() + } + dst = append(dst, pt.inmemoryParts...) for _, pw := range pt.smallParts { pw.incRef() } @@ -625,93 +723,132 @@ func (pt *partition) MustClose() { logger.Infof("flushing inmemory parts to files on %q...", pt.smallPartsPath) startTime = time.Now() - // Flush raw rows the last time before exit. - pt.flushPendingRows(true) + // Flush inmemory rows the last time before exit. + pt.flushInmemoryRows() - // Flush inmemory parts to disk. - var pws []*partWrapper - pt.partsLock.Lock() - for _, pw := range pt.smallParts { - if pw.mp == nil { - continue - } - if pw.isInMerge { - logger.Panicf("BUG: the inmemory part %q mustn't be in merge after stopping small parts merger in the partition %q", &pw.mp.ph, pt.smallPartsPath) - } - pw.isInMerge = true - pws = append(pws, pw) - } - pt.partsLock.Unlock() - - if err := pt.mergePartsOptimal(pws, nil); err != nil { - logger.Panicf("FATAL: cannot flush %d inmemory parts to files on %q: %s", len(pws), pt.smallPartsPath, err) - } - logger.Infof("%d inmemory parts have been flushed to files in %.3f seconds on %q", len(pws), time.Since(startTime).Seconds(), pt.smallPartsPath) - - // Remove references to smallParts from the pt, so they may be eventually closed + // Remove references from inmemoryParts, smallParts and bigParts, so they may be eventually closed // after all the searches are done. pt.partsLock.Lock() + inmemoryParts := pt.inmemoryParts smallParts := pt.smallParts - pt.smallParts = nil - pt.partsLock.Unlock() - - for _, pw := range smallParts { - pw.decRef() - } - - // Remove references to bigParts from the pt, so they may be eventually closed - // after all the searches are done. - pt.partsLock.Lock() bigParts := pt.bigParts + pt.inmemoryParts = nil + pt.smallParts = nil pt.bigParts = nil pt.partsLock.Unlock() + for _, pw := range inmemoryParts { + pw.decRef() + } + for _, pw := range smallParts { + pw.decRef() + } for _, pw := range bigParts { pw.decRef() } } -func (pt *partition) startRawRowsFlusher() { +func (pt *partition) startInmemoryPartsFlusher() { pt.wg.Add(1) go func() { - pt.rawRowsFlusher() + pt.inmemoryPartsFlusher() pt.wg.Done() }() } -func (pt *partition) rawRowsFlusher() { - ticker := time.NewTicker(rawRowsFlushInterval) +func (pt *partition) startPendingRowsFlusher() { + pt.wg.Add(1) + go func() { + pt.pendingRowsFlusher() + pt.wg.Done() + }() +} + +func (pt *partition) inmemoryPartsFlusher() { + ticker := time.NewTicker(dataFlushInterval) defer ticker.Stop() for { select { case <-pt.stopCh: return case <-ticker.C: - pt.flushPendingRows(false) + pt.flushInmemoryParts(false) } } } -func (pt *partition) flushPendingRows(isFinal bool) { - pt.rawRows.flush(pt, isFinal) +func (pt *partition) pendingRowsFlusher() { + ticker := time.NewTicker(pendingRowsFlushInterval) + defer ticker.Stop() + var rows []rawRow + for { + select { + case <-pt.stopCh: + return + case <-ticker.C: + rows = pt.flushPendingRows(rows[:0], false) + } + } } -func (rrss *rawRowsShards) flush(pt *partition, isFinal bool) { - var rowsToFlush []rawRow - for i := range rrss.shards { - rowsToFlush = rrss.shards[i].appendRawRowsToFlush(rowsToFlush, pt, isFinal) +func (pt *partition) flushPendingRows(dst []rawRow, isFinal bool) []rawRow { + return pt.rawRows.flush(pt, dst, isFinal) +} + +func (pt *partition) flushInmemoryRows() { + pt.rawRows.flush(pt, nil, true) + pt.flushInmemoryParts(true) +} + +func (pt *partition) flushInmemoryParts(isFinal bool) { + for { + currentTime := time.Now() + var pws []*partWrapper + + pt.partsLock.Lock() + for _, pw := range pt.inmemoryParts { + if !pw.isInMerge && (isFinal || pw.flushToDiskDeadline.Before(currentTime)) { + pw.isInMerge = true + pws = append(pws, pw) + } + } + pt.partsLock.Unlock() + + if err := pt.mergePartsOptimal(pws, nil); err != nil { + logger.Panicf("FATAL: cannot merge in-memory parts: %s", err) + } + if !isFinal { + return + } + pt.partsLock.Lock() + n := len(pt.inmemoryParts) + pt.partsLock.Unlock() + if n == 0 { + // All the in-memory parts were flushed to disk. + return + } + // Some parts weren't flushed to disk because they were being merged. + // Sleep for a while and try flushing them again. + time.Sleep(10 * time.Millisecond) } - pt.flushRowsToParts(rowsToFlush) +} + +func (rrss *rawRowsShards) flush(pt *partition, dst []rawRow, isFinal bool) []rawRow { + for i := range rrss.shards { + dst = rrss.shards[i].appendRawRowsToFlush(dst, pt, isFinal) + } + pt.flushRowsToParts(dst) + return dst } func (rrs *rawRowsShard) appendRawRowsToFlush(dst []rawRow, pt *partition, isFinal bool) []rawRow { currentTime := fasttime.UnixTimestamp() - flushSeconds := int64(rawRowsFlushInterval.Seconds()) + flushSeconds := int64(pendingRowsFlushInterval.Seconds()) if flushSeconds <= 0 { flushSeconds = 1 } lastFlushTime := atomic.LoadUint64(&rrs.lastFlushTime) - if !isFinal && currentTime <= lastFlushTime+uint64(flushSeconds) { + if !isFinal && currentTime < lastFlushTime+uint64(flushSeconds) { // Fast path - nothing to flush return dst } @@ -724,112 +861,73 @@ func (rrs *rawRowsShard) appendRawRowsToFlush(dst []rawRow, pt *partition, isFin return dst } -func (pt *partition) startInmemoryPartsFlusher() { - pt.wg.Add(1) - go func() { - pt.inmemoryPartsFlusher() - pt.wg.Done() - }() -} - -func (pt *partition) inmemoryPartsFlusher() { - ticker := time.NewTicker(inmemoryPartsFlushInterval) - defer ticker.Stop() - var pwsBuf []*partWrapper - var err error - for { - select { - case <-pt.stopCh: - return - case <-ticker.C: - pwsBuf, err = pt.flushInmemoryParts(pwsBuf[:0], false) - if err != nil { - logger.Panicf("FATAL: cannot flush inmemory parts: %s", err) - } +func (pt *partition) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error { + sortPartsForOptimalMerge(pws) + for len(pws) > 0 { + n := defaultPartsToMerge + if n > len(pws) { + n = len(pws) } - } -} - -func (pt *partition) flushInmemoryParts(dstPws []*partWrapper, force bool) ([]*partWrapper, error) { - currentTime := fasttime.UnixTimestamp() - flushSeconds := int64(inmemoryPartsFlushInterval.Seconds()) - if flushSeconds <= 0 { - flushSeconds = 1 - } - - // Inmemory parts may present only in small parts. - pt.partsLock.Lock() - for _, pw := range pt.smallParts { - if pw.mp == nil || pw.isInMerge { + pwsChunk := pws[:n] + pws = pws[n:] + err := pt.mergeParts(pwsChunk, stopCh, true) + if err == nil { continue } - if force || currentTime-pw.mp.creationTime >= uint64(flushSeconds) { - pw.isInMerge = true - dstPws = append(dstPws, pw) + pt.releasePartsToMerge(pws) + if errors.Is(err, errForciblyStopped) { + return nil } - } - pt.partsLock.Unlock() - - if err := pt.mergePartsOptimal(dstPws, nil); err != nil { - return dstPws, fmt.Errorf("cannot merge %d inmemory parts: %w", len(dstPws), err) - } - return dstPws, nil -} - -func (pt *partition) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error { - for len(pws) > defaultPartsToMerge { - pwsChunk := pws[:defaultPartsToMerge] - pws = pws[defaultPartsToMerge:] - if err := pt.mergeParts(pwsChunk, stopCh); err != nil { - pt.releasePartsToMerge(pws) - return fmt.Errorf("cannot merge %d parts: %w", defaultPartsToMerge, err) - } - } - if len(pws) == 0 { - return nil - } - if err := pt.mergeParts(pws, stopCh); err != nil { - return fmt.Errorf("cannot merge %d parts: %w", len(pws), err) + return fmt.Errorf("cannot merge parts optimally: %w", err) } return nil } -// ForceMergeAllParts runs merge for all the parts in pt - small and big. +// ForceMergeAllParts runs merge for all the parts in pt. func (pt *partition) ForceMergeAllParts() error { - var pws []*partWrapper - pt.partsLock.Lock() - if !hasActiveMerges(pt.smallParts) && !hasActiveMerges(pt.bigParts) { - pws = appendAllPartsToMerge(pws, pt.smallParts) - pws = appendAllPartsToMerge(pws, pt.bigParts) - } - pt.partsLock.Unlock() - + pws := pt.getAllPartsForMerge() if len(pws) == 0 { // Nothing to merge. return nil } + for { + // Check whether there is enough disk space for merging pws. + newPartSize := getPartsSize(pws) + maxOutBytes := fs.MustGetFreeSpace(pt.bigPartsPath) + if newPartSize > maxOutBytes { + freeSpaceNeededBytes := newPartSize - maxOutBytes + forceMergeLogger.Warnf("cannot initiate force merge for the partition %s; additional space needed: %d bytes", pt.name, freeSpaceNeededBytes) + return nil + } - // Check whether there is enough disk space for merging pws. - newPartSize := getPartsSize(pws) - maxOutBytes := fs.MustGetFreeSpace(pt.bigPartsPath) - if newPartSize > maxOutBytes { - freeSpaceNeededBytes := newPartSize - maxOutBytes - forceMergeLogger.Warnf("cannot initiate force merge for the partition %s; additional space needed: %d bytes", pt.name, freeSpaceNeededBytes) - return nil + // If len(pws) == 1, then the merge must run anyway. + // This allows applying the configured retention, removing the deleted series + // and performing de-duplication if needed. + if err := pt.mergePartsOptimal(pws, pt.stopCh); err != nil { + return fmt.Errorf("cannot force merge %d parts from partition %q: %w", len(pws), pt.name, err) + } + pws = pt.getAllPartsForMerge() + if len(pws) <= 1 { + return nil + } } - - // If len(pws) == 1, then the merge must run anyway. - // This allows applying the configured retention, removing the deleted series - // and performing de-duplication if needed. - if err := pt.mergePartsOptimal(pws, pt.stopCh); err != nil { - return fmt.Errorf("cannot force merge %d parts from partition %q: %w", len(pws), pt.name, err) - } - return nil } var forceMergeLogger = logger.WithThrottler("forceMerge", time.Minute) -func appendAllPartsToMerge(dst, src []*partWrapper) []*partWrapper { +func (pt *partition) getAllPartsForMerge() []*partWrapper { + var pws []*partWrapper + pt.partsLock.Lock() + if !hasActiveMerges(pt.inmemoryParts) && !hasActiveMerges(pt.smallParts) && !hasActiveMerges(pt.bigParts) { + pws = appendAllPartsForMerge(pws, pt.inmemoryParts) + pws = appendAllPartsForMerge(pws, pt.smallParts) + pws = appendAllPartsForMerge(pws, pt.bigParts) + } + pt.partsLock.Unlock() + return pws +} + +func appendAllPartsForMerge(dst, src []*partWrapper) []*partWrapper { for _, pw := range src { if pw.isInMerge { logger.Panicf("BUG: part %q is already in merge", pw.p.path) @@ -849,10 +947,9 @@ func hasActiveMerges(pws []*partWrapper) bool { return false } -var ( - bigMergeWorkersCount = getDefaultMergeConcurrency(4) - smallMergeWorkersCount = getDefaultMergeConcurrency(16) -) +var mergeWorkersLimitCh = make(chan struct{}, getDefaultMergeConcurrency(16)) + +var bigMergeWorkersLimitCh = make(chan struct{}, getDefaultMergeConcurrency(4)) func getDefaultMergeConcurrency(max int) int { v := (cgroup.AvailableCPUs() + 1) / 2 @@ -870,47 +967,28 @@ func SetBigMergeWorkersCount(n int) { // Do nothing return } - bigMergeWorkersCount = n + bigMergeWorkersLimitCh = make(chan struct{}, n) } -// SetSmallMergeWorkersCount sets the maximum number of concurrent mergers for small blocks. +// SetMergeWorkersCount sets the maximum number of concurrent mergers for parts. // // The function must be called before opening or creating any storage. -func SetSmallMergeWorkersCount(n int) { +func SetMergeWorkersCount(n int) { if n <= 0 { // Do nothing return } - smallMergeWorkersCount = n + mergeWorkersLimitCh = make(chan struct{}, n) } func (pt *partition) startMergeWorkers() { - for i := 0; i < smallMergeWorkersCount; i++ { + for i := 0; i < cap(mergeWorkersLimitCh); i++ { pt.wg.Add(1) go func() { - pt.smallPartsMerger() + pt.mergeWorker() pt.wg.Done() }() } - for i := 0; i < bigMergeWorkersCount; i++ { - pt.wg.Add(1) - go func() { - pt.bigPartsMerger() - pt.wg.Done() - }() - } -} - -func (pt *partition) bigPartsMerger() { - if err := pt.partsMerger(pt.mergeBigParts); err != nil { - logger.Panicf("FATAL: unrecoverable error when merging big parts in the partition %q: %s", pt.bigPartsPath, err) - } -} - -func (pt *partition) smallPartsMerger() { - if err := pt.partsMerger(pt.mergeSmallParts); err != nil { - logger.Panicf("FATAL: unrecoverable error when merging small parts in the partition %q: %s", pt.smallPartsPath, err) - } } const ( @@ -918,13 +996,16 @@ const ( maxMergeSleepTime = 10 * time.Second ) -func (pt *partition) partsMerger(mergerFunc func(isFinal bool) error) error { +func (pt *partition) mergeWorker() { sleepTime := minMergeSleepTime var lastMergeTime uint64 isFinal := false t := time.NewTimer(sleepTime) for { - err := mergerFunc(isFinal) + // Limit the number of concurrent calls to mergeExistingParts, cine the number of merge + mergeWorkersLimitCh <- struct{}{} + err := pt.mergeExistingParts(isFinal) + <-mergeWorkersLimitCh if err == nil { // Try merging additional parts. sleepTime = minMergeSleepTime @@ -934,10 +1015,11 @@ func (pt *partition) partsMerger(mergerFunc func(isFinal bool) error) error { } if errors.Is(err, errForciblyStopped) { // The merger has been stopped. - return nil + return } if !errors.Is(err, errNothingToMerge) && !errors.Is(err, errReadOnlyMode) { - return err + // Unexpected error. + logger.Panicf("FATAL: unrecoverable error when merging parts in the partition (%q, %q): %s", pt.smallPartsPath, pt.bigPartsPath, err) } if finalMergeDelaySeconds > 0 && fasttime.UnixTimestamp()-lastMergeTime > finalMergeDelaySeconds { // We have free time for merging into bigger parts. @@ -954,7 +1036,7 @@ func (pt *partition) partsMerger(mergerFunc func(isFinal bool) error) error { } select { case <-pt.stopCh: - return nil + return case <-t.C: t.Reset(sleepTime) } @@ -973,6 +1055,40 @@ func SetFinalMergeDelay(delay time.Duration) { return } finalMergeDelaySeconds = uint64(delay.Seconds() + 1) + mergeset.SetFinalMergeDelay(delay) +} + +func getMaxInmemoryPartSize() uint64 { + // Allocate 10% of allowed memory for in-memory parts. + n := uint64(0.1 * float64(memory.Allowed()) / maxInmemoryPartsPerPartition) + if n < 1e6 { + n = 1e6 + } + return n +} + +func (pt *partition) getMaxSmallPartSize() uint64 { + // Small parts are cached in the OS page cache, + // so limit their size by the remaining free RAM. + mem := memory.Remaining() + // It is expected no more than defaultPartsToMerge/2 parts exist + // in the OS page cache before they are merged into bigger part. + // Half of the remaining RAM must be left for lib/mergeset parts, + // so the maxItems is calculated using the below code: + n := uint64(mem) / defaultPartsToMerge + if n < 10e6 { + n = 10e6 + } + // Make sure the output part fits available disk space for small parts. + sizeLimit := getMaxOutBytes(pt.smallPartsPath, cap(mergeWorkersLimitCh)) + if n > sizeLimit { + n = sizeLimit + } + return n +} + +func (pt *partition) getMaxBigPartSize() uint64 { + return getMaxOutBytes(pt.bigPartsPath, cap(bigMergeWorkersLimitCh)) } func getMaxOutBytes(path string, workersCount int) uint64 { @@ -994,56 +1110,35 @@ func (pt *partition) canBackgroundMerge() bool { var errReadOnlyMode = fmt.Errorf("storage is in readonly mode") -func (pt *partition) mergeBigParts(isFinal bool) error { - if !pt.canBackgroundMerge() { - // Do not perform merge in read-only mode, since this may result in disk space shortage. - // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603 - return errReadOnlyMode - } - maxOutBytes := getMaxOutBytes(pt.bigPartsPath, bigMergeWorkersCount) +func (pt *partition) mergeInmemoryParts() error { + maxOutBytes := pt.getMaxBigPartSize() pt.partsLock.Lock() - pws, needFreeSpace := getPartsToMerge(pt.bigParts, maxOutBytes, isFinal) + pws, needFreeSpace := getPartsToMerge(pt.inmemoryParts, maxOutBytes, false) pt.partsLock.Unlock() - atomicSetBool(&pt.bigMergeNeedFreeDiskSpace, needFreeSpace) - return pt.mergeParts(pws, pt.stopCh) + atomicSetBool(&pt.mergeNeedFreeDiskSpace, needFreeSpace) + return pt.mergeParts(pws, pt.stopCh, false) } -func (pt *partition) mergeSmallParts(isFinal bool) error { +func (pt *partition) mergeExistingParts(isFinal bool) error { if !pt.canBackgroundMerge() { // Do not perform merge in read-only mode, since this may result in disk space shortage. // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603 return errReadOnlyMode } - // Try merging small parts to a big part at first. - maxBigPartOutBytes := getMaxOutBytes(pt.bigPartsPath, bigMergeWorkersCount) + maxOutBytes := pt.getMaxBigPartSize() + pt.partsLock.Lock() - pws, needFreeSpace := getPartsToMerge(pt.smallParts, maxBigPartOutBytes, isFinal) + dst := make([]*partWrapper, 0, len(pt.inmemoryParts)+len(pt.smallParts)+len(pt.bigParts)) + dst = append(dst, pt.inmemoryParts...) + dst = append(dst, pt.smallParts...) + dst = append(dst, pt.bigParts...) + pws, needFreeSpace := getPartsToMerge(dst, maxOutBytes, isFinal) pt.partsLock.Unlock() - atomicSetBool(&pt.bigMergeNeedFreeDiskSpace, needFreeSpace) - outSize := getPartsSize(pws) - if outSize > maxSmallPartSize() { - // Merge small parts to a big part. - return pt.mergeParts(pws, pt.stopCh) - } - - // Make sure that the output small part fits small parts storage. - maxSmallPartOutBytes := getMaxOutBytes(pt.smallPartsPath, smallMergeWorkersCount) - if outSize <= maxSmallPartOutBytes { - // Merge small parts to a small part. - return pt.mergeParts(pws, pt.stopCh) - } - - // The output small part doesn't fit small parts storage. Try merging small parts according to maxSmallPartOutBytes limit. - pt.releasePartsToMerge(pws) - pt.partsLock.Lock() - pws, needFreeSpace = getPartsToMerge(pt.smallParts, maxSmallPartOutBytes, isFinal) - pt.partsLock.Unlock() - atomicSetBool(&pt.smallMergeNeedFreeDiskSpace, needFreeSpace) - - return pt.mergeParts(pws, pt.stopCh) + atomicSetBool(&pt.mergeNeedFreeDiskSpace, needFreeSpace) + return pt.mergeParts(pws, pt.stopCh, isFinal) } func (pt *partition) releasePartsToMerge(pws []*partWrapper) { @@ -1105,12 +1200,14 @@ func getMinDedupInterval(pws []*partWrapper) int64 { return dMin } -// mergeParts merges pws. +// mergeParts merges pws to a single resulting part. // // Merging is immediately stopped if stopCh is closed. // +// if isFinal is set, then the resulting part will be saved to disk. +// // All the parts inside pws must have isInMerge field set to true. -func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) error { +func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isFinal bool) error { if len(pws) == 0 { // Nothing to merge. return errNothingToMerge @@ -1119,164 +1216,332 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro startTime := time.Now() + // Initialize destination paths. + dstPartType := pt.getDstPartType(pws, isFinal) + ptPath, tmpPartPath, mergeIdx := pt.getDstPartPaths(dstPartType) + + if dstPartType == partBig { + bigMergeWorkersLimitCh <- struct{}{} + defer func() { + <-bigMergeWorkersLimitCh + }() + } + + if isFinal && len(pws) == 1 && pws[0].mp != nil { + // Fast path: flush a single in-memory part to disk. + mp := pws[0].mp + if tmpPartPath == "" { + logger.Panicf("BUG: tmpPartPath must be non-empty") + } + if err := mp.StoreToDisk(tmpPartPath); err != nil { + return fmt.Errorf("cannot store in-memory part to %q: %w", tmpPartPath, err) + } + pwNew, err := pt.openCreatedPart(&mp.ph, pws, nil, ptPath, tmpPartPath, mergeIdx) + if err != nil { + return fmt.Errorf("cannot atomically register the created part: %w", err) + } + pt.swapSrcWithDstParts(pws, pwNew, dstPartType) + return nil + } + // Prepare BlockStreamReaders for source parts. - bsrs := make([]*blockStreamReader, 0, len(pws)) - defer func() { + bsrs, err := openBlockStreamReaders(pws) + if err != nil { + return err + } + closeBlockStreamReaders := func() { for _, bsr := range bsrs { putBlockStreamReader(bsr) } - }() + bsrs = nil + } + + // Prepare BlockStreamWriter for destination part. + srcSize := uint64(0) + srcRowsCount := uint64(0) + srcBlocksCount := uint64(0) + for _, pw := range pws { + srcSize += pw.p.size + srcRowsCount += pw.p.ph.RowsCount + srcBlocksCount += pw.p.ph.BlocksCount + } + rowsPerBlock := float64(srcRowsCount) / float64(srcBlocksCount) + compressLevel := getCompressLevel(rowsPerBlock) + bsw := getBlockStreamWriter() + var mpNew *inmemoryPart + if dstPartType == partInmemory { + mpNew = getInmemoryPart() + bsw.InitFromInmemoryPart(mpNew, compressLevel) + } else { + if tmpPartPath == "" { + logger.Panicf("BUG: tmpPartPath must be non-empty") + } + nocache := dstPartType == partBig + if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil { + closeBlockStreamReaders() + return fmt.Errorf("cannot create destination part at %q: %w", tmpPartPath, err) + } + } + + // Merge source parts to destination part. + ph, err := pt.mergePartsInternal(tmpPartPath, bsw, bsrs, dstPartType, stopCh) + putBlockStreamWriter(bsw) + closeBlockStreamReaders() + if err != nil { + return fmt.Errorf("cannot merge %d parts: %w", len(pws), err) + } + if mpNew != nil { + // Update partHeader for destination inmemory part after the merge. + mpNew.ph = *ph + } + + // Atomically move the created part from tmpPartPath to its destination + // and swap the source parts with the newly created part. + pwNew, err := pt.openCreatedPart(ph, pws, mpNew, ptPath, tmpPartPath, mergeIdx) + if err != nil { + return fmt.Errorf("cannot atomically register the created part: %w", err) + } + pt.swapSrcWithDstParts(pws, pwNew, dstPartType) + + d := time.Since(startTime) + if d <= 30*time.Second { + return nil + } + + // Log stats for long merges. + dstRowsCount := uint64(0) + dstBlocksCount := uint64(0) + dstSize := uint64(0) + dstPartPath := "" + if pwNew != nil { + pDst := pwNew.p + dstRowsCount = pDst.ph.RowsCount + dstBlocksCount = pDst.ph.BlocksCount + dstSize = pDst.size + dstPartPath = pDst.String() + } + durationSecs := d.Seconds() + rowsPerSec := int(float64(srcRowsCount) / durationSecs) + logger.Infof("merged (%d parts, %d rows, %d blocks, %d bytes) into (1 part, %d rows, %d blocks, %d bytes) in %.3f seconds at %d rows/sec to %q", + len(pws), srcRowsCount, srcBlocksCount, srcSize, dstRowsCount, dstBlocksCount, dstSize, durationSecs, rowsPerSec, dstPartPath) + + return nil +} + +func getFlushToDiskDeadline(pws []*partWrapper) time.Time { + d := pws[0].flushToDiskDeadline + for _, pw := range pws[1:] { + if pw.flushToDiskDeadline.Before(d) { + d = pw.flushToDiskDeadline + } + } + return d +} + +type partType int + +var ( + partInmemory = partType(0) + partSmall = partType(1) + partBig = partType(2) +) + +func (pt *partition) getDstPartType(pws []*partWrapper, isFinal bool) partType { + dstPartSize := getPartsSize(pws) + if dstPartSize > pt.getMaxSmallPartSize() { + return partBig + } + if isFinal || dstPartSize > getMaxInmemoryPartSize() { + return partSmall + } + if !areAllInmemoryParts(pws) { + // If at least a single source part is located in file, + // then the destination part must be in file for durability reasons. + return partSmall + } + return partInmemory +} + +func (pt *partition) getDstPartPaths(dstPartType partType) (string, string, uint64) { + ptPath := "" + switch dstPartType { + case partSmall: + ptPath = pt.smallPartsPath + case partBig: + ptPath = pt.bigPartsPath + case partInmemory: + ptPath = pt.smallPartsPath + default: + logger.Panicf("BUG: unknown partType=%d", dstPartType) + } + ptPath = filepath.Clean(ptPath) + mergeIdx := pt.nextMergeIdx() + tmpPartPath := "" + if dstPartType != partInmemory { + tmpPartPath = fmt.Sprintf("%s/tmp/%016X", ptPath, mergeIdx) + } + return ptPath, tmpPartPath, mergeIdx +} + +func openBlockStreamReaders(pws []*partWrapper) ([]*blockStreamReader, error) { + bsrs := make([]*blockStreamReader, 0, len(pws)) for _, pw := range pws { bsr := getBlockStreamReader() if pw.mp != nil { bsr.InitFromInmemoryPart(pw.mp) } else { if err := bsr.InitFromFilePart(pw.p.path); err != nil { - return fmt.Errorf("cannot open source part for merging: %w", err) + for _, bsr := range bsrs { + putBlockStreamReader(bsr) + } + return nil, fmt.Errorf("cannot open source part for merging: %w", err) } } bsrs = append(bsrs, bsr) } + return bsrs, nil +} - outSize := uint64(0) - outRowsCount := uint64(0) - outBlocksCount := uint64(0) - for _, pw := range pws { - outSize += pw.p.size - outRowsCount += pw.p.ph.RowsCount - outBlocksCount += pw.p.ph.BlocksCount - } - isBigPart := outSize > maxSmallPartSize() - nocache := isBigPart - - // Prepare BlockStreamWriter for destination part. - ptPath := pt.smallPartsPath - if isBigPart { - ptPath = pt.bigPartsPath - } - ptPath = filepath.Clean(ptPath) - mergeIdx := pt.nextMergeIdx() - tmpPartPath := fmt.Sprintf("%s/tmp/%016X", ptPath, mergeIdx) - bsw := getBlockStreamWriter() - rowsPerBlock := float64(outRowsCount) / float64(outBlocksCount) - compressLevel := getCompressLevel(rowsPerBlock) - if err := bsw.InitFromFilePart(tmpPartPath, nocache, compressLevel); err != nil { - return fmt.Errorf("cannot create destination part %q: %w", tmpPartPath, err) - } - - // Merge parts. +func (pt *partition) mergePartsInternal(tmpPartPath string, bsw *blockStreamWriter, bsrs []*blockStreamReader, dstPartType partType, stopCh <-chan struct{}) (*partHeader, error) { var ph partHeader - rowsMerged := &pt.smallRowsMerged - rowsDeleted := &pt.smallRowsDeleted - if isBigPart { + var rowsMerged *uint64 + var rowsDeleted *uint64 + var mergesCount *uint64 + var activeMerges *uint64 + switch dstPartType { + case partInmemory: + rowsMerged = &pt.inmemoryRowsMerged + rowsDeleted = &pt.inmemoryRowsDeleted + mergesCount = &pt.inmemoryMergesCount + activeMerges = &pt.activeInmemoryMerges + case partSmall: + rowsMerged = &pt.smallRowsMerged + rowsDeleted = &pt.smallRowsDeleted + mergesCount = &pt.smallMergesCount + activeMerges = &pt.activeSmallMerges + case partBig: rowsMerged = &pt.bigRowsMerged rowsDeleted = &pt.bigRowsDeleted - atomic.AddUint64(&pt.bigMergesCount, 1) - atomic.AddUint64(&pt.activeBigMerges, 1) - } else { - atomic.AddUint64(&pt.smallMergesCount, 1) - atomic.AddUint64(&pt.activeSmallMerges, 1) + mergesCount = &pt.bigMergesCount + activeMerges = &pt.activeBigMerges + default: + logger.Panicf("BUG: unknown partType=%d", dstPartType) } - retentionDeadline := timestampFromTime(startTime) - pt.s.retentionMsecs + retentionDeadline := timestampFromTime(time.Now()) - pt.s.retentionMsecs + atomic.AddUint64(activeMerges, 1) err := mergeBlockStreams(&ph, bsw, bsrs, stopCh, pt.s, retentionDeadline, rowsMerged, rowsDeleted) - if isBigPart { - atomic.AddUint64(&pt.activeBigMerges, ^uint64(0)) - } else { - atomic.AddUint64(&pt.activeSmallMerges, ^uint64(0)) - } - putBlockStreamWriter(bsw) + atomic.AddUint64(activeMerges, ^uint64(0)) + atomic.AddUint64(mergesCount, 1) if err != nil { - return fmt.Errorf("error when merging parts to %q: %w", tmpPartPath, err) + return nil, fmt.Errorf("cannot merge parts to %q: %w", tmpPartPath, err) } - - // Close bsrs. - for _, bsr := range bsrs { - putBlockStreamReader(bsr) + if tmpPartPath != "" { + ph.MinDedupInterval = GetDedupInterval() + if err := ph.writeMinDedupInterval(tmpPartPath); err != nil { + return nil, fmt.Errorf("cannot store min dedup interval: %w", err) + } } - bsrs = nil + return &ph, nil +} - ph.MinDedupInterval = GetDedupInterval() - if err := ph.writeMinDedupInterval(tmpPartPath); err != nil { - return fmt.Errorf("cannot store min dedup interval for part %q: %w", tmpPartPath, err) +func (pt *partition) openCreatedPart(ph *partHeader, pws []*partWrapper, mpNew *inmemoryPart, ptPath, tmpPartPath string, mergeIdx uint64) (*partWrapper, error) { + dstPartPath := "" + if mpNew == nil || !areAllInmemoryParts(pws) { + // Either source or destination parts are located on disk. + // Create a transaction for atomic deleting of old parts and moving new part to its destination on disk. + var bb bytesutil.ByteBuffer + for _, pw := range pws { + if pw.mp == nil { + fmt.Fprintf(&bb, "%s\n", pw.p.path) + } + } + if ph.RowsCount > 0 { + // The destination part may have no rows if they are deleted during the merge. + dstPartPath = ph.Path(ptPath, mergeIdx) + } + fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath) + txnPath := fmt.Sprintf("%s/txn/%016X", ptPath, mergeIdx) + if err := fs.WriteFileAtomically(txnPath, bb.B, false); err != nil { + return nil, fmt.Errorf("cannot create transaction file %q: %w", txnPath, err) + } + + // Run the created transaction. + if err := runTransaction(&pt.snapshotLock, pt.smallPartsPath, pt.bigPartsPath, txnPath); err != nil { + return nil, fmt.Errorf("cannot execute transaction %q: %w", txnPath, err) + } } + // Open the created part. + if ph.RowsCount == 0 { + // The created part is empty. + return nil, nil + } + if mpNew != nil { + // Open the created part from memory. + flushToDiskDeadline := getFlushToDiskDeadline(pws) + pwNew := newPartWrapperFromInmemoryPart(mpNew, flushToDiskDeadline) + return pwNew, nil + } + // Open the created part from disk. + pNew, err := openFilePart(dstPartPath) + if err != nil { + return nil, fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err) + } + pwNew := &partWrapper{ + p: pNew, + refCount: 1, + } + return pwNew, nil +} - // Create a transaction for atomic deleting old parts and moving - // new part to its destination place. - var bb bytesutil.ByteBuffer +func areAllInmemoryParts(pws []*partWrapper) bool { for _, pw := range pws { if pw.mp == nil { - fmt.Fprintf(&bb, "%s\n", pw.p.path) + return false } } - dstPartPath := "" - if ph.RowsCount > 0 { - // The destination part may have no rows if they are deleted - // during the merge due to deleted time series. - dstPartPath = ph.Path(ptPath, mergeIdx) - } - fmt.Fprintf(&bb, "%s -> %s\n", tmpPartPath, dstPartPath) - txnPath := fmt.Sprintf("%s/txn/%016X", ptPath, mergeIdx) - if err := fs.WriteFileAtomically(txnPath, bb.B, false); err != nil { - return fmt.Errorf("cannot create transaction file %q: %w", txnPath, err) - } + return true +} - // Run the created transaction. - if err := runTransaction(&pt.snapshotLock, pt.smallPartsPath, pt.bigPartsPath, txnPath); err != nil { - return fmt.Errorf("cannot execute transaction %q: %w", txnPath, err) - } - - var newPW *partWrapper - var newPSize uint64 - if len(dstPartPath) > 0 { - // Open the merged part if it is non-empty. - newP, err := openFilePart(dstPartPath) - if err != nil { - return fmt.Errorf("cannot open merged part %q: %w", dstPartPath, err) - } - newPSize = newP.size - newPW = &partWrapper{ - p: newP, - refCount: 1, - } - } - - // Atomically remove old parts and add new part. +func (pt *partition) swapSrcWithDstParts(pws []*partWrapper, pwNew *partWrapper, dstPartType partType) { + // Atomically unregister old parts and add new part to pt. m := make(map[*partWrapper]bool, len(pws)) for _, pw := range pws { m[pw] = true } if len(m) != len(pws) { - logger.Panicf("BUG: %d duplicate parts found in the merge of %d parts", len(pws)-len(m), len(pws)) + logger.Panicf("BUG: %d duplicate parts found when merging %d parts", len(pws)-len(m), len(pws)) } + removedInmemoryParts := 0 removedSmallParts := 0 removedBigParts := 0 + pt.partsLock.Lock() - pt.smallParts, removedSmallParts = removeParts(pt.smallParts, m, false) - pt.bigParts, removedBigParts = removeParts(pt.bigParts, m, true) - if newPW != nil { - if isBigPart { - pt.bigParts = append(pt.bigParts, newPW) - } else { - pt.smallParts = append(pt.smallParts, newPW) + pt.inmemoryParts, removedInmemoryParts = removeParts(pt.inmemoryParts, m) + pt.smallParts, removedSmallParts = removeParts(pt.smallParts, m) + pt.bigParts, removedBigParts = removeParts(pt.bigParts, m) + if pwNew != nil { + switch dstPartType { + case partInmemory: + pt.inmemoryParts = append(pt.inmemoryParts, pwNew) + case partSmall: + pt.smallParts = append(pt.smallParts, pwNew) + case partBig: + pt.bigParts = append(pt.bigParts, pwNew) + default: + logger.Panicf("BUG: unknown partType=%d", dstPartType) } } pt.partsLock.Unlock() - if removedSmallParts+removedBigParts != len(m) { - logger.Panicf("BUG: unexpected number of parts removed; got %d, want %d", removedSmallParts+removedBigParts, len(m)) + + removedParts := removedInmemoryParts + removedSmallParts + removedBigParts + if removedParts != len(m) { + logger.Panicf("BUG: unexpected number of parts removed; got %d, want %d", removedParts, len(m)) } // Remove partition references from old parts. for _, pw := range pws { pw.decRef() } - - d := time.Since(startTime) - if d > 30*time.Second { - logger.Infof("merged %d rows across %d blocks in %.3f seconds at %d rows/sec to %q; sizeBytes: %d", - outRowsCount, outBlocksCount, d.Seconds(), int(float64(outRowsCount)/d.Seconds()), dstPartPath, newPSize) - } - - return nil } func getCompressLevel(rowsPerBlock float64) int { @@ -1309,17 +1574,17 @@ func (pt *partition) nextMergeIdx() uint64 { return atomic.AddUint64(&pt.mergeIdx, 1) } -func removeParts(pws []*partWrapper, partsToRemove map[*partWrapper]bool, isBig bool) ([]*partWrapper, int) { - removedParts := 0 +func removeParts(pws []*partWrapper, partsToRemove map[*partWrapper]bool) ([]*partWrapper, int) { dst := pws[:0] for _, pw := range pws { if !partsToRemove[pw] { dst = append(dst, pw) - continue } - removedParts++ } - return dst, removedParts + for i := len(dst); i < len(pws); i++ { + pws[i] = nil + } + return dst, len(pws) - len(dst) } func (pt *partition) startStalePartsRemover() { @@ -1349,9 +1614,9 @@ func (pt *partition) removeStaleParts() { retentionDeadline := timestampFromTime(startTime) - pt.s.retentionMsecs pt.partsLock.Lock() - for _, pw := range pt.bigParts { + for _, pw := range pt.inmemoryParts { if !pw.isInMerge && pw.p.ph.MaxTimestamp < retentionDeadline { - atomic.AddUint64(&pt.bigRowsDeleted, pw.p.ph.RowsCount) + atomic.AddUint64(&pt.inmemoryRowsDeleted, pw.p.ph.RowsCount) m[pw] = true } } @@ -1361,28 +1626,38 @@ func (pt *partition) removeStaleParts() { m[pw] = true } } + for _, pw := range pt.bigParts { + if !pw.isInMerge && pw.p.ph.MaxTimestamp < retentionDeadline { + atomic.AddUint64(&pt.bigRowsDeleted, pw.p.ph.RowsCount) + m[pw] = true + } + } + removedInmemoryParts := 0 removedSmallParts := 0 removedBigParts := 0 if len(m) > 0 { - pt.smallParts, removedSmallParts = removeParts(pt.smallParts, m, false) - pt.bigParts, removedBigParts = removeParts(pt.bigParts, m, true) + pt.inmemoryParts, removedInmemoryParts = removeParts(pt.inmemoryParts, m) + pt.smallParts, removedSmallParts = removeParts(pt.smallParts, m) + pt.bigParts, removedBigParts = removeParts(pt.bigParts, m) } pt.partsLock.Unlock() - if removedSmallParts+removedBigParts != len(m) { - logger.Panicf("BUG: unexpected number of stale parts removed; got %d, want %d", removedSmallParts+removedBigParts, len(m)) + removedParts := removedInmemoryParts + removedSmallParts + removedBigParts + if removedParts != len(m) { + logger.Panicf("BUG: unexpected number of stale parts removed; got %d, want %d", removedParts, len(m)) } // Physically remove stale parts under snapshotLock in order to provide // consistent snapshots with table.CreateSnapshot(). pt.snapshotLock.RLock() for pw := range m { - logger.Infof("removing part %q, since its data is out of the configured retention (%d secs)", pw.p.path, pt.s.retentionMsecs/1000) - fs.MustRemoveDirAtomic(pw.p.path) + if pw.mp == nil { + logger.Infof("removing part %q, since its data is out of the configured retention (%d secs)", pw.p.path, pt.s.retentionMsecs/1000) + fs.MustRemoveDirAtomic(pw.p.path) + } } // There is no need in calling fs.MustSyncPath() on pt.smallPartsPath and pt.bigPartsPath, // since they should be automatically called inside fs.MustRemoveDirAtomic(). - pt.snapshotLock.RUnlock() // Remove partition references from removed parts. @@ -1458,16 +1733,7 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte src = tmp needFreeSpace := skippedBigParts > 1 - // Sort src parts by size and backwards timestamp. - // This should improve adjanced points' locality in the merged parts. - sort.Slice(src, func(i, j int) bool { - a := src[i].p - b := src[j].p - if a.size == b.size { - return a.ph.MinTimestamp > b.ph.MinTimestamp - } - return a.size < b.size - }) + sortPartsForOptimalMerge(src) maxSrcParts := maxPartsToMerge if maxSrcParts > len(src) { @@ -1518,6 +1784,19 @@ func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutByte return append(dst, pws...), needFreeSpace } +func sortPartsForOptimalMerge(pws []*partWrapper) { + // Sort src parts by size and backwards timestamp. + // This should improve adjanced points' locality in the merged parts. + sort.Slice(pws, func(i, j int) bool { + a := pws[i].p + b := pws[j].p + if a.size == b.size { + return a.ph.MinTimestamp > b.ph.MinTimestamp + } + return a.size < b.size + }) +} + func getPartsSize(pws []*partWrapper) uint64 { n := uint64(0) for _, pw := range pws { @@ -1534,7 +1813,7 @@ func openParts(pathPrefix1, pathPrefix2, path string) ([]*partWrapper, error) { fs.MustRemoveTemporaryDirs(path) d, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("cannot open directory %q: %w", path, err) + return nil, fmt.Errorf("cannot open partition directory: %w", err) } defer fs.MustClose(d) @@ -1616,10 +1895,7 @@ func (pt *partition) CreateSnapshotAt(smallPath, bigPath string) error { startTime := time.Now() // Flush inmemory data to disk. - pt.flushPendingRows(true) - if _, err := pt.flushInmemoryParts(nil, true); err != nil { - return fmt.Errorf("cannot flush inmemory parts: %w", err) - } + pt.flushInmemoryRows() // The snapshot must be created under the lock in order to prevent from // concurrent modifications via runTransaction. @@ -1645,13 +1921,13 @@ func (pt *partition) createSnapshot(srcDir, dstDir string) error { d, err := os.Open(srcDir) if err != nil { - return fmt.Errorf("cannot open difrectory: %w", err) + return fmt.Errorf("cannot open partition difrectory: %w", err) } defer fs.MustClose(d) fis, err := d.Readdir(-1) if err != nil { - return fmt.Errorf("cannot read directory: %w", err) + return fmt.Errorf("cannot read partition directory: %w", err) } for _, fi := range fis { fn := fi.Name() @@ -1700,7 +1976,7 @@ func runTransactions(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, path strin if os.IsNotExist(err) { return nil } - return fmt.Errorf("cannot open %q: %w", txnDir, err) + return fmt.Errorf("cannot open transaction directory: %w", err) } defer fs.MustClose(d) @@ -1764,30 +2040,32 @@ func runTransaction(txnLock *sync.RWMutex, pathPrefix1, pathPrefix2, txnPath str // Move the new part to new directory. srcPath := mvPaths[0] dstPath := mvPaths[1] - srcPath, err = validatePath(pathPrefix1, pathPrefix2, srcPath) - if err != nil { - return fmt.Errorf("invalid source path to rename: %w", err) - } - if len(dstPath) > 0 { - // Move srcPath to dstPath. - dstPath, err = validatePath(pathPrefix1, pathPrefix2, dstPath) + if len(srcPath) > 0 { + srcPath, err = validatePath(pathPrefix1, pathPrefix2, srcPath) if err != nil { - return fmt.Errorf("invalid destination path to rename: %w", err) + return fmt.Errorf("invalid source path to rename: %w", err) } - if fs.IsPathExist(srcPath) { - if err := os.Rename(srcPath, dstPath); err != nil { - return fmt.Errorf("cannot rename %q to %q: %w", srcPath, dstPath, err) + if len(dstPath) > 0 { + // Move srcPath to dstPath. + dstPath, err = validatePath(pathPrefix1, pathPrefix2, dstPath) + if err != nil { + return fmt.Errorf("invalid destination path to rename: %w", err) } - } else if !fs.IsPathExist(dstPath) { - // Emit info message for the expected condition after unclean shutdown on NFS disk. - // The dstPath part may be missing because it could be already merged into bigger part - // while old source parts for the current txn weren't still deleted due to NFS locks. - logger.Infof("cannot find both source and destination paths: %q -> %q; this may be the case after unclean shutdown (OOM, `kill -9`, hard reset) on NFS disk", - srcPath, dstPath) + if fs.IsPathExist(srcPath) { + if err := os.Rename(srcPath, dstPath); err != nil { + return fmt.Errorf("cannot rename %q to %q: %w", srcPath, dstPath, err) + } + } else if !fs.IsPathExist(dstPath) { + // Emit info message for the expected condition after unclean shutdown on NFS disk. + // The dstPath part may be missing because it could be already merged into bigger part + // while old source parts for the current txn weren't still deleted due to NFS locks. + logger.Infof("cannot find both source and destination paths: %q -> %q; this may be the case after unclean shutdown "+ + "(OOM, `kill -9`, hard reset) on NFS disk", srcPath, dstPath) + } + } else { + // Just remove srcPath. + fs.MustRemoveDirAtomic(srcPath) } - } else { - // Just remove srcPath. - fs.MustRemoveDirAtomic(srcPath) } // Flush pathPrefix* directory metadata to the underying storage, diff --git a/lib/storage/partition_search_test.go b/lib/storage/partition_search_test.go index b0e39ef7d0..afd5b7fce6 100644 --- a/lib/storage/partition_search_test.go +++ b/lib/storage/partition_search_test.go @@ -181,11 +181,12 @@ func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, ma t.Fatalf("cannot remove big parts directory: %s", err) } }() + var tmpRows []rawRow for _, rows := range rowss { pt.AddRows(rows) - // Flush just added rows to a separate partition. - pt.flushPendingRows(true) + // Flush just added rows to a separate partitions. + tmpRows = pt.flushPendingRows(tmpRows[:0], true) } testPartitionSearch(t, pt, tsids, tr, rbsExpected, -1) pt.MustClose() @@ -232,8 +233,7 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp // due to the race with raw rows flusher. var m partitionMetrics pt.UpdateMetrics(&m) - rowsCount := m.BigRowsCount + m.SmallRowsCount - if rowsCount != uint64(rowsCountExpected) { + if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) { return fmt.Errorf("unexpected rows count; got %d; want %d", rowsCount, rowsCountExpected) } } @@ -258,8 +258,7 @@ func testPartitionSearchSerial(pt *partition, tsids []TSID, tr TimeRange, rbsExp if rowsCountExpected >= 0 { var m partitionMetrics pt.UpdateMetrics(&m) - rowsCount := m.BigRowsCount + m.SmallRowsCount - if rowsCount != uint64(rowsCountExpected) { + if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) { return fmt.Errorf("unexpected rows count after search; got %d; want %d", rowsCount, rowsCountExpected) } } diff --git a/lib/storage/storage_test.go b/lib/storage/storage_test.go index d4d330a7e7..4dbc627007 100644 --- a/lib/storage/storage_test.go +++ b/lib/storage/storage_test.go @@ -454,7 +454,7 @@ func TestStorageOpenMultipleTimes(t *testing.T) { func TestStorageRandTimestamps(t *testing.T) { path := "TestStorageRandTimestamps" - retentionMsecs := int64(60 * msecsPerMonth) + retentionMsecs := int64(10 * msecsPerMonth) s, err := OpenStorage(path, retentionMsecs, 0, 0) if err != nil { t.Fatalf("cannot open storage: %s", err) @@ -462,10 +462,13 @@ func TestStorageRandTimestamps(t *testing.T) { t.Run("serial", func(t *testing.T) { for i := 0; i < 3; i++ { if err := testStorageRandTimestamps(s); err != nil { - t.Fatal(err) + t.Fatalf("error on iteration %d: %s", i, err) } s.MustClose() s, err = OpenStorage(path, retentionMsecs, 0, 0) + if err != nil { + t.Fatalf("cannot open storage on iteration %d: %s", i, err) + } } }) t.Run("concurrent", func(t *testing.T) { @@ -479,14 +482,15 @@ func TestStorageRandTimestamps(t *testing.T) { ch <- err }() } + tt := time.NewTimer(time.Second * 10) for i := 0; i < cap(ch); i++ { select { case err := <-ch: if err != nil { - t.Fatal(err) + t.Fatalf("error on iteration %d: %s", i, err) } - case <-time.After(time.Second * 10): - t.Fatal("timeout") + case <-tt.C: + t.Fatalf("timeout on iteration %d", i) } } }) @@ -497,9 +501,9 @@ func TestStorageRandTimestamps(t *testing.T) { } func testStorageRandTimestamps(s *Storage) error { - const rowsPerAdd = 1e3 - const addsCount = 2 - typ := reflect.TypeOf(int64(0)) + currentTime := timestampFromTime(time.Now()) + const rowsPerAdd = 5e3 + const addsCount = 3 rnd := rand.New(rand.NewSource(1)) for i := 0; i < addsCount; i++ { @@ -512,15 +516,8 @@ func testStorageRandTimestamps(s *Storage) error { for j := 0; j < rowsPerAdd; j++ { mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", rand.Intn(100))) metricNameRaw := mn.marshalRaw(nil) - timestamp := int64(rnd.NormFloat64() * 1e12) - if j%2 == 0 { - ts, ok := quick.Value(typ, rnd) - if !ok { - return fmt.Errorf("cannot create random timestamp via quick.Value") - } - timestamp = ts.Interface().(int64) - } - value := rnd.NormFloat64() * 1e12 + timestamp := currentTime - int64((rnd.Float64()-0.2)*float64(2*s.retentionMsecs)) + value := rnd.NormFloat64() * 1e11 mr := MetricRow{ MetricNameRaw: metricNameRaw, @@ -540,8 +537,8 @@ func testStorageRandTimestamps(s *Storage) error { // Verify the storage contains rows. var m Metrics s.UpdateMetrics(&m) - if m.TableMetrics.SmallRowsCount == 0 { - return fmt.Errorf("expecting at least one row in the table") + if rowsCount := m.TableMetrics.TotalRowsCount(); rowsCount == 0 { + return fmt.Errorf("expecting at least one row in storage") } return nil } @@ -592,14 +589,15 @@ func TestStorageDeleteSeries(t *testing.T) { ch <- err }(i) } + tt := time.NewTimer(30 * time.Second) for i := 0; i < cap(ch); i++ { select { case err := <-ch: if err != nil { - t.Fatalf("unexpected error: %s", err) + t.Fatalf("unexpected error on iteration %d: %s", i, err) } - case <-time.After(30 * time.Second): - t.Fatalf("timeout") + case <-tt.C: + t.Fatalf("timeout on iteration %d", i) } } }) @@ -932,7 +930,8 @@ func testStorageRegisterMetricNames(s *Storage) error { func TestStorageAddRowsSerial(t *testing.T) { path := "TestStorageAddRowsSerial" - s, err := OpenStorage(path, 0, 1e5, 1e5) + retentionMsecs := int64(msecsPerMonth * 10) + s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5) if err != nil { t.Fatalf("cannot open storage: %s", err) } @@ -947,7 +946,8 @@ func TestStorageAddRowsSerial(t *testing.T) { func TestStorageAddRowsConcurrent(t *testing.T) { path := "TestStorageAddRowsConcurrent" - s, err := OpenStorage(path, 0, 1e5, 1e5) + retentionMsecs := int64(msecsPerMonth * 10) + s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5) if err != nil { t.Fatalf("cannot open storage: %s", err) } @@ -1000,8 +1000,10 @@ func testStorageAddRows(s *Storage) error { const rowsPerAdd = 1e3 const addsCount = 10 + maxTimestamp := timestampFromTime(time.Now()) + minTimestamp := maxTimestamp - s.retentionMsecs for i := 0; i < addsCount; i++ { - mrs := testGenerateMetricRows(rowsPerAdd, 0, 1e10) + mrs := testGenerateMetricRows(rowsPerAdd, minTimestamp, maxTimestamp) if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { return fmt.Errorf("unexpected error when adding mrs: %w", err) } @@ -1011,8 +1013,8 @@ func testStorageAddRows(s *Storage) error { minRowsExpected := uint64(rowsPerAdd * addsCount) var m Metrics s.UpdateMetrics(&m) - if m.TableMetrics.SmallRowsCount < minRowsExpected { - return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, m.TableMetrics.SmallRowsCount) + if rowsCount := m.TableMetrics.TotalRowsCount(); rowsCount < minRowsExpected { + return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, rowsCount) } // Try creating a snapshot from the storage. @@ -1040,8 +1042,8 @@ func testStorageAddRows(s *Storage) error { // Verify the snapshot contains rows var m1 Metrics s1.UpdateMetrics(&m1) - if m1.TableMetrics.SmallRowsCount < minRowsExpected { - return fmt.Errorf("snapshot %q must contain at least %d rows; got %d", snapshotPath, minRowsExpected, m1.TableMetrics.SmallRowsCount) + if rowsCount := m1.TableMetrics.TotalRowsCount(); rowsCount < minRowsExpected { + return fmt.Errorf("snapshot %q must contain at least %d rows; got %d", snapshotPath, minRowsExpected, rowsCount) } // Verify that force merge for the snapshot leaves only a single part per partition. @@ -1155,22 +1157,25 @@ func testStorageAddMetrics(s *Storage, workerNum int) error { minRowsExpected := uint64(rowsCount) var m Metrics s.UpdateMetrics(&m) - if m.TableMetrics.SmallRowsCount < minRowsExpected { - return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, m.TableMetrics.SmallRowsCount) + if rowsCount := m.TableMetrics.TotalRowsCount(); rowsCount < minRowsExpected { + return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, rowsCount) } return nil } func TestStorageDeleteStaleSnapshots(t *testing.T) { path := "TestStorageDeleteStaleSnapshots" - s, err := OpenStorage(path, 0, 1e5, 1e5) + retentionMsecs := int64(msecsPerMonth * 10) + s, err := OpenStorage(path, retentionMsecs, 1e5, 1e5) if err != nil { t.Fatalf("cannot open storage: %s", err) } const rowsPerAdd = 1e3 const addsCount = 10 + maxTimestamp := timestampFromTime(time.Now()) + minTimestamp := maxTimestamp - s.retentionMsecs for i := 0; i < addsCount; i++ { - mrs := testGenerateMetricRows(rowsPerAdd, 0, 1e10) + mrs := testGenerateMetricRows(rowsPerAdd, minTimestamp, maxTimestamp) if err := s.AddRows(mrs, defaultPrecisionBits); err != nil { t.Fatalf("unexpected error when adding mrs: %s", err) } diff --git a/lib/storage/table.go b/lib/storage/table.go index e4b6030449..5eca5e4a3a 100644 --- a/lib/storage/table.go +++ b/lib/storage/table.go @@ -215,15 +215,16 @@ func (tb *table) MustClose() { } } -// flushPendingRows flushes all the pending rows, so they become visible to search. +// flushPendingRows flushes all the pending raw rows, so they become visible to search. // // This function is for debug purposes only. func (tb *table) flushPendingRows() { ptws := tb.GetPartitions(nil) defer tb.PutPartitions(ptws) + var rows []rawRow for _, ptw := range ptws { - ptw.pt.flushPendingRows(true) + rows = ptw.pt.flushPendingRows(rows[:0], true) } } @@ -524,7 +525,7 @@ func openPartitions(smallPartitionsPath, bigPartitionsPath string, s *Storage) ( func populatePartitionNames(partitionsPath string, ptNames map[string]bool) error { d, err := os.Open(partitionsPath) if err != nil { - return fmt.Errorf("cannot open directory with partitions %q: %w", partitionsPath, err) + return fmt.Errorf("cannot open directory with partitions: %w", err) } defer fs.MustClose(d) diff --git a/lib/storage/table_search_test.go b/lib/storage/table_search_test.go index 17e1f5b86c..fb28939d62 100644 --- a/lib/storage/table_search_test.go +++ b/lib/storage/table_search_test.go @@ -35,7 +35,7 @@ func TestTableSearch(t *testing.T) { MinTimestamp: trData.MinTimestamp + 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3, } - testTableSearchEx(t, trData, trSearch, 12, 100, 1, 10) + testTableSearchEx(t, trData, trSearch, 12, 20, 1, 10) }) t.Run("SingleTSID", func(t *testing.T) { @@ -51,7 +51,7 @@ func TestTableSearch(t *testing.T) { MinTimestamp: trData.MinTimestamp + 4e3, MaxTimestamp: trData.MaxTimestamp - 4e3, } - testTableSearchEx(t, trData, trSearch, 60, 20, 30, 20) + testTableSearchEx(t, trData, trSearch, 20, 10, 30, 20) }) t.Run("ManyTSIDs", func(t *testing.T) { @@ -244,8 +244,7 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected [] // they may race with raw rows flusher. var m TableMetrics tb.UpdateMetrics(&m) - rowsCount := m.BigRowsCount + m.SmallRowsCount - if rowsCount != uint64(rowsCountExpected) { + if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) { return fmt.Errorf("unexpected rows count in the table; got %d; want %d", rowsCount, rowsCountExpected) } } @@ -270,8 +269,7 @@ func testTableSearchSerial(tb *table, tsids []TSID, tr TimeRange, rbsExpected [] if rowsCountExpected >= 0 { var m TableMetrics tb.UpdateMetrics(&m) - rowsCount := m.BigRowsCount + m.SmallRowsCount - if rowsCount != uint64(rowsCountExpected) { + if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) { return fmt.Errorf("unexpected rows count in the table; got %d; want %d", rowsCount, rowsCountExpected) } } diff --git a/lib/storage/table_search_timing_test.go b/lib/storage/table_search_timing_test.go index cf046c5137..eb1a72b67f 100644 --- a/lib/storage/table_search_timing_test.go +++ b/lib/storage/table_search_timing_test.go @@ -55,9 +55,8 @@ func openBenchTable(b *testing.B, startTimestamp int64, rowsPerInsert, rowsCount rowsCountExpected := insertsCount * uint64(rowsPerInsert) var m TableMetrics tb.UpdateMetrics(&m) - rowsCountActual := m.BigRowsCount + m.SmallRowsCount - if rowsCountActual != rowsCountExpected { - b.Fatalf("unexpected rows count in the table %q; got %d; want %d", path, rowsCountActual, rowsCountExpected) + if rowsCount := m.TotalRowsCount(); rowsCount != rowsCountExpected { + b.Fatalf("unexpected rows count in the table %q; got %d; want %d", path, rowsCount, rowsCountExpected) } return tb diff --git a/lib/storage/table_timing_test.go b/lib/storage/table_timing_test.go index 0f7ae00d1c..11a3766fdd 100644 --- a/lib/storage/table_timing_test.go +++ b/lib/storage/table_timing_test.go @@ -101,8 +101,7 @@ func benchmarkTableAddRows(b *testing.B, rowsPerInsert, tsidsCount int) { } var m TableMetrics tb.UpdateMetrics(&m) - rowsCount := m.BigRowsCount + m.SmallRowsCount - if rowsCount != uint64(rowsCountExpected) { + if rowsCount := m.TotalRowsCount(); rowsCount != uint64(rowsCountExpected) { b.Fatalf("unexpected rows count in the final table %q: got %d; want %d", tablePath, rowsCount, rowsCountExpected) } tb.MustClose() From f3e84b4deaa003051d57a72586cdd201c8ce91b0 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 5 Dec 2022 15:59:52 -0800 Subject: [PATCH 25/38] {dashboards,alerts}: subtitute `{type="indexdb"}` with `{type=~"indexdb.*"}` inside queries after 8189770c50165b62867327ad388f2c2ef237ab6f Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3337 --- README.md | 2 +- dashboards/victoriametrics-cluster.json | 18 +++++++++--------- dashboards/victoriametrics.json | 12 ++++++------ deployment/docker/alerts-cluster.yml | 4 ++-- deployment/docker/alerts.yml | 4 ++-- docs/CHANGELOG.md | 2 +- docs/README.md | 2 +- docs/Single-server-VictoriaMetrics.md | 2 +- 8 files changed, 23 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index c19d003e3c..c697791ee8 100644 --- a/README.md +++ b/README.md @@ -1745,7 +1745,7 @@ and [cardinality explorer docs](#cardinality-explorer). by requesting `/internal/force_flush` http handler. This handler is mostly needed for testing and debugging purposes. * The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset). The `-inmemoryDataFlushInterval` command-line flag allows controlling the frequency of in-memory data flush to persistent storage. - See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704). + See [storage docs](#storage) and [this article](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704) for more details. * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, then it is likely you have too many [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) for the current amount of RAM. diff --git a/dashboards/victoriametrics-cluster.json b/dashboards/victoriametrics-cluster.json index 1274cd6f27..98d9f20acb 100644 --- a/dashboards/victoriametrics-cluster.json +++ b/dashboards/victoriametrics-cluster.json @@ -179,7 +179,7 @@ "uid": "$ds" }, "exemplar": true, - "expr": "sum(vm_rows{job=~\"$job_storage\", type!=\"indexdb\"})", + "expr": "sum(vm_rows{job=~\"$job_storage\", type!~\"indexdb.*\"})", "format": "time_series", "instant": true, "interval": "", @@ -599,7 +599,7 @@ "uid": "$ds" }, "exemplar": true, - "expr": "sum(vm_data_size_bytes{job=~\"$job_storage\", type!=\"indexdb\"}) / sum(vm_rows{job=~\"$job_storage\", type!=\"indexdb\"})", + "expr": "sum(vm_data_size_bytes{job=~\"$job_storage\", type!~\"indexdb.*\"}) / sum(vm_rows{job=~\"$job_storage\", type!~\"indexdb.*\"})", "format": "time_series", "instant": true, "interval": "", @@ -4484,7 +4484,7 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "min(vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"})\n )\n))", + "expr": "min(vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n))", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -5584,7 +5584,7 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=\"indexdb\"}) by(job, instance)\n / \n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n)", + "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=~\"indexdb.*\"}) by(job, instance)\n / \n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n)", "format": "time_series", "intervalFactor": 1, "legendFormat": "indexdb", @@ -5597,7 +5597,7 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) by(job, instance)\n / \n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n)", + "expr": "max(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) by(job, instance)\n / \n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\"}) by(job, instance)\n)", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -8374,7 +8374,7 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!=\"indexdb\"})\n )\n)", + "expr": "vm_free_disk_space_bytes{job=~\"$job_storage\", instance=~\"$instance\"} \n/ \nignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job_storage\", instance=~\"$instance\"}[1d])\n - \n ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job_storage\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n / \n sum(vm_rows{job=~\"$job_storage\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n)", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -8579,7 +8579,7 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=\"indexdb\"}) by(job, instance)", + "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=~\"indexdb.*\"}) by(job, instance)", "format": "time_series", "intervalFactor": 1, "legendFormat": "{{job}}:{{instance}} (indexdb)", @@ -8592,7 +8592,7 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) by(job, instance)", + "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) by(job, instance)", "format": "time_series", "hide": false, "intervalFactor": 1, @@ -8791,4 +8791,4 @@ "uid": "oS7Bi_0Wz", "version": 1, "weekStart": "" -} \ No newline at end of file +} diff --git a/dashboards/victoriametrics.json b/dashboards/victoriametrics.json index f5bce5bd0a..9975145de5 100644 --- a/dashboards/victoriametrics.json +++ b/dashboards/victoriametrics.json @@ -225,7 +225,7 @@ "uid": "$ds" }, "exemplar": false, - "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"})", + "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})", "format": "time_series", "instant": true, "interval": "", @@ -3767,7 +3767,7 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"} \n/ ignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job\", instance=~\"$instance\"}[1d]) \n - ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"}) \n / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"})\n )\n )", + "expr": "vm_free_disk_space_bytes{job=~\"$job\", instance=~\"$instance\"} \n/ ignoring(path) (\n (\n rate(vm_rows_added_to_storage_total{job=~\"$job\", instance=~\"$instance\"}[1d]) \n - ignoring(type) rate(vm_deduplicated_samples_total{job=~\"$job\", instance=~\"$instance\", type=\"merge\"}[1d])\n ) * scalar(\n sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"}) \n / sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})\n )\n )", "format": "time_series", "hide": false, "interval": "", @@ -3874,7 +3874,7 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!=\"indexdb\"})", + "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -3900,7 +3900,7 @@ "uid": "$ds" }, "editorMode": "code", - "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=\"indexdb\"})", + "expr": "sum(vm_data_size_bytes{job=~\"$job\", instance=~\"$instance\", type=~\"indexdb.*\"})", "format": "time_series", "hide": false, "interval": "", @@ -4156,7 +4156,7 @@ "type": "prometheus", "uid": "$ds" }, - "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type != \"indexdb\"})", + "expr": "sum(vm_rows{job=~\"$job\", instance=~\"$instance\", type!~\"indexdb.*\"})", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -5306,4 +5306,4 @@ "uid": "wNf0q_kZk", "version": 1, "weekStart": "" -} \ No newline at end of file +} diff --git a/deployment/docker/alerts-cluster.yml b/deployment/docker/alerts-cluster.yml index 15c305452c..3e68bd6e36 100644 --- a/deployment/docker/alerts-cluster.yml +++ b/deployment/docker/alerts-cluster.yml @@ -18,8 +18,8 @@ groups: ignoring(type) rate(vm_deduplicated_samples_total{type="merge"}[1d]) ) * scalar( - sum(vm_data_size_bytes{type!="indexdb"}) / - sum(vm_rows{type!="indexdb"}) + sum(vm_data_size_bytes{type!~"indexdb.*"}) / + sum(vm_rows{type!~"indexdb.*"}) ) ) < 3 * 24 * 3600 > 0 for: 30m diff --git a/deployment/docker/alerts.yml b/deployment/docker/alerts.yml index 5d478f0c76..efa3c5f7e7 100644 --- a/deployment/docker/alerts.yml +++ b/deployment/docker/alerts.yml @@ -18,8 +18,8 @@ groups: ignoring(type) rate(vm_deduplicated_samples_total{type="merge"}[1d]) ) * scalar( - sum(vm_data_size_bytes{type!="indexdb"}) / - sum(vm_rows{type!="indexdb"}) + sum(vm_data_size_bytes{type!~"indexdb.*"}) / + sum(vm_rows{type!~"indexdb.*"}) ) ) < 3 * 24 * 3600 > 0 for: 30m diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 96694cc09d..e6a92f3b5c 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -17,7 +17,7 @@ The following tip changes can be tested by building VictoriaMetrics components f **Update note 1:** this release drops support for direct upgrade from VictoriaMetrics versions prior [v1.28.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.28.0). Please upgrade to `v1.84.0`, wait until `finished round 2 of background conversion` line is emitted to log by single-node VictoriaMetrics or by `vmstorage`, and then upgrade to newer releases. -**Update note 2:** this release splits `type="indexdb"` metrics into `type="indexdb/inmemory"` and `type="indexdb/file"` metrics. This may break old dashboards and alerting rules, which contain label filters on `{type="indexdb"}`. It is recommended upgrading to the latest available dashboards and alerting rules mentioned in [these docs](https://docs.victoriametrics.com/#monitoring). +**Update note 2:** this release splits `type="indexdb"` metrics into `type="indexdb/inmemory"` and `type="indexdb/file"` metrics. This may break old dashboards and alerting rules, which contain [label filter](https://docs.victoriametrics.com/keyConcepts.html#filtering) on `{type="indexdb"}`. Such label filter must be substituted with `{type=~"indexdb.*"}`, so it matches `indexdb` from the previous releases and `indexdb/inmemory` + `indexdb/file` from new releases. It is recommended upgrading to the latest available dashboards and alerting rules mentioned in [these docs](https://docs.victoriametrics.com/#monitoring), since they already contain fixed label filters. * FEATURE: add `-inmemoryDataFlushInterval` command-line flag, which can be used for controlling the frequency of in-memory data flush to disk. The data flush frequency can be reduced when VictoriaMetrics stores data to low-end flash device with limited number of write cycles (for example, on Raspberry PI). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3337). * FEATURE: expose additional metrics for `indexdb` and `storage` parts stored in memory and for `indexdb` parts stored in files (see [storage docs](https://docs.victoriametrics.com/#storage) for technical details): diff --git a/docs/README.md b/docs/README.md index 1fe27c686d..7a716a12eb 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1746,7 +1746,7 @@ and [cardinality explorer docs](#cardinality-explorer). by requesting `/internal/force_flush` http handler. This handler is mostly needed for testing and debugging purposes. * The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset). The `-inmemoryDataFlushInterval` command-line flag allows controlling the frequency of in-memory data flush to persistent storage. - See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704). + See [storage docs](#storage) and [this article](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704) for more details. * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, then it is likely you have too many [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) for the current amount of RAM. diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index ed764040bc..8fe2a4103f 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -1749,7 +1749,7 @@ and [cardinality explorer docs](#cardinality-explorer). by requesting `/internal/force_flush` http handler. This handler is mostly needed for testing and debugging purposes. * The last few seconds of inserted data may be lost on unclean shutdown (i.e. OOM, `kill -9` or hardware reset). The `-inmemoryDataFlushInterval` command-line flag allows controlling the frequency of in-memory data flush to persistent storage. - See [this article for technical details](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704). + See [storage docs](#storage) and [this article](https://valyala.medium.com/wal-usage-looks-broken-in-modern-time-series-databases-b62a627ab704) for more details. * If VictoriaMetrics works slowly and eats more than a CPU core per 100K ingested data points per second, then it is likely you have too many [active time series](https://docs.victoriametrics.com/FAQ.html#what-is-an-active-time-series) for the current amount of RAM. From 86c31f2955c291da5bb652bffa9e0d5bfd59c96e Mon Sep 17 00:00:00 2001 From: Zakhar Bessarab Date: Tue, 6 Dec 2022 05:18:09 +0400 Subject: [PATCH 26/38] app/vmctl: add option to migrate between clusters with automatic tenants discovery (#3450) --- app/vmctl/README.md | 74 +++++++++++++++++++++++++++++ app/vmctl/flags.go | 8 ++++ app/vmctl/main.go | 3 +- app/vmctl/vm_native.go | 104 +++++++++++++++++++++++++++++++++-------- docs/CHANGELOG.md | 1 + 5 files changed, 169 insertions(+), 21 deletions(-) diff --git a/app/vmctl/README.md b/app/vmctl/README.md index 21a03138fa..38be9a1c72 100644 --- a/app/vmctl/README.md +++ b/app/vmctl/README.md @@ -833,6 +833,80 @@ Total: 16 B ↗ Speed: 186.32 KiB p/s 2022/08/30 19:48:24 Total time: 12.680582ms ``` +#### Cluster-to-cluster migration mode + +Using cluster-to-cluster migration mode helps to migrate all tenants data in a single `vmctl` run. + +Cluster-to-cluster uses `/admin/tenants` endpoint (available starting from [v1.84.0](https://docs.victoriametrics.com/CHANGELOG.html#v1840)) to discover list of tenants from source cluster. + +To use this mode you need to set `--vm-intercluster` flag to `true`, `--vm-native-src-addr` flag to 'http://vmselect:8481/' and `--vm-native-dst-addr` value to http://vminsert:8480/: + +```console +./bin/vmctl vm-native --vm-intercluster=true --vm-native-src-addr=http://localhost:8481/ --vm-native-dst-addr=http://172.17.0.3:8480/ +VictoriaMetrics Native import mode +2022/12/05 21:20:06 Discovered tenants: [123:1 12812919:1 1289198:1 1289:1283 12:1 1:0 1:1 1:1231231 1:1271727 1:12819 1:281 812891298:1] +2022/12/05 21:20:06 Initing export pipe from "http://localhost:8481/select/123:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/123:1/prometheus/api/v1/import/native": +Total: 61.13 MiB ↖ Speed: 2.05 MiB p/s +Total: 61.13 MiB ↗ Speed: 2.30 MiB p/s +2022/12/05 21:20:33 Initing export pipe from "http://localhost:8481/select/12812919:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/12812919:1/prometheus/api/v1/import/native": +Total: 43.14 MiB ↘ Speed: 1.86 MiB p/s +Total: 43.14 MiB ↙ Speed: 2.36 MiB p/s +2022/12/05 21:20:51 Initing export pipe from "http://localhost:8481/select/1289198:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1289198:1/prometheus/api/v1/import/native": +Total: 16.64 MiB ↗ Speed: 2.66 MiB p/s +Total: 16.64 MiB ↘ Speed: 2.19 MiB p/s +2022/12/05 21:20:59 Initing export pipe from "http://localhost:8481/select/1289:1283/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1289:1283/prometheus/api/v1/import/native": +Total: 43.33 MiB ↙ Speed: 1.94 MiB p/s +Total: 43.33 MiB ↖ Speed: 2.35 MiB p/s +2022/12/05 21:21:18 Initing export pipe from "http://localhost:8481/select/12:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/12:1/prometheus/api/v1/import/native": +Total: 63.78 MiB ↙ Speed: 1.96 MiB p/s +Total: 63.78 MiB ↖ Speed: 2.28 MiB p/s +2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:0/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:0/prometheus/api/v1/import/native": +2022/12/05 21:21:46 Import finished! +Total: 330 B ↗ Speed: 3.53 MiB p/s +2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:1/prometheus/api/v1/import/native": +Total: 63.81 MiB ↙ Speed: 1.96 MiB p/s +Total: 63.81 MiB ↖ Speed: 2.28 MiB p/s +2022/12/05 21:22:14 Initing export pipe from "http://localhost:8481/select/1:1231231/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:1231231/prometheus/api/v1/import/native": +Total: 63.84 MiB ↙ Speed: 1.93 MiB p/s +Total: 63.84 MiB ↖ Speed: 2.29 MiB p/s +2022/12/05 21:22:42 Initing export pipe from "http://localhost:8481/select/1:1271727/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:1271727/prometheus/api/v1/import/native": +Total: 54.37 MiB ↘ Speed: 1.90 MiB p/s +Total: 54.37 MiB ↙ Speed: 2.37 MiB p/s +2022/12/05 21:23:05 Initing export pipe from "http://localhost:8481/select/1:12819/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:12819/prometheus/api/v1/import/native": +Total: 17.01 MiB ↙ Speed: 1.75 MiB p/s +Total: 17.01 MiB ↖ Speed: 2.15 MiB p/s +2022/12/05 21:23:13 Initing export pipe from "http://localhost:8481/select/1:281/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:281/prometheus/api/v1/import/native": +Total: 63.89 MiB ↘ Speed: 1.90 MiB p/s +Total: 63.89 MiB ↙ Speed: 2.29 MiB p/s +2022/12/05 21:23:42 Initing export pipe from "http://localhost:8481/select/812891298:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/812891298:1/prometheus/api/v1/import/native": +Total: 63.84 MiB ↖ Speed: 1.99 MiB p/s +Total: 63.84 MiB ↗ Speed: 2.26 MiB p/s +2022/12/05 21:24:10 Total time: 4m4.1466565s +``` ## Verifying exported blocks from VictoriaMetrics diff --git a/app/vmctl/flags.go b/app/vmctl/flags.go index b6254351bd..dccfb1024d 100644 --- a/app/vmctl/flags.go +++ b/app/vmctl/flags.go @@ -44,6 +44,8 @@ const ( // also used in vm-native vmExtraLabel = "vm-extra-label" vmRateLimit = "vm-rate-limit" + + vmInterCluster = "vm-intercluster" ) var ( @@ -398,6 +400,12 @@ var ( Usage: "Optional data transfer rate limit in bytes per second.\n" + "By default the rate limit is disabled. It can be useful for limiting load on source or destination databases.", }, + &cli.BoolFlag{ + Name: vmInterCluster, + Usage: "Enables cluster-to-cluster migration mode with automatic tenants data migration.\n" + + fmt.Sprintf(" In this mode --%s flag format is: 'http://vmselect:8481/'. --%s flag format is: http://vminsert:8480/. \n", vmNativeSrcAddr, vmNativeDstAddr) + + " TenantID will be appended automatically after discovering tenants from src.", + }, } ) diff --git a/app/vmctl/main.go b/app/vmctl/main.go index 19406d5118..51ac55c515 100644 --- a/app/vmctl/main.go +++ b/app/vmctl/main.go @@ -200,7 +200,8 @@ func main() { } p := vmNativeProcessor{ - rateLimit: c.Int64(vmRateLimit), + rateLimit: c.Int64(vmRateLimit), + interCluster: c.Bool(vmInterCluster), filter: filter{ match: c.String(vmNativeFilterMatch), timeStart: c.String(vmNativeFilterTimeStart), diff --git a/app/vmctl/vm_native.go b/app/vmctl/vm_native.go index d2f8013bbd..dd85d8b748 100644 --- a/app/vmctl/vm_native.go +++ b/app/vmctl/vm_native.go @@ -2,6 +2,7 @@ package main import ( "context" + "encoding/json" "fmt" "io" "log" @@ -19,8 +20,9 @@ type vmNativeProcessor struct { filter filter rateLimit int64 - dst *vmNativeClient - src *vmNativeClient + dst *vmNativeClient + src *vmNativeClient + interCluster bool } type vmNativeClient struct { @@ -49,15 +51,16 @@ func (f filter) String() string { } const ( - nativeExportAddr = "api/v1/export/native" - nativeImportAddr = "api/v1/import/native" + nativeExportAddr = "api/v1/export/native" + nativeImportAddr = "api/v1/import/native" + nativeTenantsAddr = "admin/tenants" nativeBarTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}` ) func (p *vmNativeProcessor) run(ctx context.Context) error { if p.filter.chunk == "" { - return p.runSingle(ctx, p.filter) + return p.runWithFilter(ctx, p.filter) } startOfRange, err := time.Parse(time.RFC3339, p.filter.timeStart) @@ -89,7 +92,7 @@ func (p *vmNativeProcessor) run(ctx context.Context) error { timeStart: formattedStartTime, timeEnd: formattedEndTime, } - err := p.runSingle(ctx, f) + err := p.runWithFilter(ctx, f) if err != nil { log.Printf("processing failed for range %d/%d: %s - %s \n", rangeIdx+1, len(ranges), formattedStartTime, formattedEndTime) @@ -99,25 +102,52 @@ func (p *vmNativeProcessor) run(ctx context.Context) error { return nil } -func (p *vmNativeProcessor) runSingle(ctx context.Context, f filter) error { - pr, pw := io.Pipe() +func (p *vmNativeProcessor) runWithFilter(ctx context.Context, f filter) error { + nativeImportAddr, err := vm.AddExtraLabelsToImportPath(nativeImportAddr, p.dst.extraLabels) - log.Printf("Initing export pipe from %q with filters: %s\n", p.src.addr, f) - exportReader, err := p.exportPipe(ctx, f) + if err != nil { + return fmt.Errorf("failed to add labels to import path: %s", err) + } + + if !p.interCluster { + srcURL := fmt.Sprintf("%s/%s", p.src.addr, nativeExportAddr) + dstURL := fmt.Sprintf("%s/%s", p.dst.addr, nativeImportAddr) + + return p.runSingle(ctx, f, srcURL, dstURL) + } + + tenants, err := p.getSourceTenants(ctx, f) + if err != nil { + return fmt.Errorf("failed to get source tenants: %s", err) + } + + log.Printf("Discovered tenants: %v", tenants) + for _, tenant := range tenants { + // src and dst expected formats: http://vminsert:8480/ and http://vmselect:8481/ + srcURL := fmt.Sprintf("%s/select/%s/prometheus/%s", p.src.addr, tenant, nativeExportAddr) + dstURL := fmt.Sprintf("%s/insert/%s/prometheus/%s", p.dst.addr, tenant, nativeImportAddr) + + if err := p.runSingle(ctx, f, srcURL, dstURL); err != nil { + return fmt.Errorf("failed to migrate data for tenant %q: %s", tenant, err) + } + } + + return nil +} + +func (p *vmNativeProcessor) runSingle(ctx context.Context, f filter, srcURL, dstURL string) error { + log.Printf("Initing export pipe from %q with filters: %s\n", srcURL, f) + + exportReader, err := p.exportPipe(ctx, srcURL, f) if err != nil { return fmt.Errorf("failed to init export pipe: %s", err) } - nativeImportAddr, err := vm.AddExtraLabelsToImportPath(nativeImportAddr, p.dst.extraLabels) - if err != nil { - return err - } - + pr, pw := io.Pipe() sync := make(chan struct{}) go func() { defer func() { close(sync) }() - u := fmt.Sprintf("%s/%s", p.dst.addr, nativeImportAddr) - req, err := http.NewRequestWithContext(ctx, "POST", u, pr) + req, err := http.NewRequestWithContext(ctx, "POST", dstURL, pr) if err != nil { log.Fatalf("cannot create import request to %q: %s", p.dst.addr, err) } @@ -130,7 +160,7 @@ func (p *vmNativeProcessor) runSingle(ctx context.Context, f filter) error { } }() - fmt.Printf("Initing import process to %q:\n", p.dst.addr) + fmt.Printf("Initing import process to %q:\n", dstURL) pool := pb.NewPool() bar := pb.ProgressBarTemplate(nativeBarTpl).New(0) pool.Add(bar) @@ -166,9 +196,43 @@ func (p *vmNativeProcessor) runSingle(ctx context.Context, f filter) error { return nil } -func (p *vmNativeProcessor) exportPipe(ctx context.Context, f filter) (io.ReadCloser, error) { - u := fmt.Sprintf("%s/%s", p.src.addr, nativeExportAddr) +func (p *vmNativeProcessor) getSourceTenants(ctx context.Context, f filter) ([]string, error) { + u := fmt.Sprintf("%s/%s", p.src.addr, nativeTenantsAddr) req, err := http.NewRequestWithContext(ctx, "GET", u, nil) + if err != nil { + return nil, fmt.Errorf("cannot create request to %q: %s", u, err) + } + + params := req.URL.Query() + if f.timeStart != "" { + params.Set("start", f.timeStart) + } + if f.timeEnd != "" { + params.Set("end", f.timeEnd) + } + req.URL.RawQuery = params.Encode() + + resp, err := p.src.do(req, http.StatusOK) + if err != nil { + return nil, fmt.Errorf("tenants request failed: %s", err) + } + + var r struct { + Tenants []string `json:"data"` + } + if err := json.NewDecoder(resp.Body).Decode(&r); err != nil { + return nil, fmt.Errorf("cannot decode tenants response: %s", err) + } + + if err := resp.Body.Close(); err != nil { + return nil, fmt.Errorf("cannot close tenants response body: %s", err) + } + + return r.Tenants, nil +} + +func (p *vmNativeProcessor) exportPipe(ctx context.Context, url string, f filter) (io.ReadCloser, error) { + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) if err != nil { return nil, fmt.Errorf("cannot create request to %q: %s", p.src.addr, err) } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index e6a92f3b5c..a13a6273c1 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -50,6 +50,7 @@ The following tip changes can be tested by building VictoriaMetrics components f * FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve error message when the requested path cannot be properly parsed, so users could identify the issue and properly fix the path. Now the error message links to [url format docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3402). * FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to copy data from sources via Prometheus `remote_read` protocol. See [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-by-remote-read-protocol). The related issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3132) and [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1101). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `-remoteWrite.sendTimeout` command-line flag, which allows configuring timeout for sending data to `-remoteWrite.url`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3408). +* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to migrate data between VictoriaMetrics clusters with automatic tenants discovery. See [these docs](https://docs.victoriametrics.com/vmctl.html#cluster-to-cluster-migration-mode) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2930) * BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly pass HTTP headers during the alert state restore procedure. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3418). * BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly specify rule evaluation step during the [replay mode](https://docs.victoriametrics.com/vmalert.html#rules-backfilling). The `step` value was previously overriden by `-datasource.queryStep` command-line flag. From eed32b368c2bd971095cfaebde7c54d735f2ddd4 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 5 Dec 2022 17:24:08 -0800 Subject: [PATCH 27/38] docs/vmctl.md: `make docs-sync` after 86c31f2955c291da5bb652bffa9e0d5bfd59c96e Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2930 --- docs/vmctl.md | 74 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/docs/vmctl.md b/docs/vmctl.md index 6f77c01f34..dbbaf6e2cd 100644 --- a/docs/vmctl.md +++ b/docs/vmctl.md @@ -837,6 +837,80 @@ Total: 16 B ↗ Speed: 186.32 KiB p/s 2022/08/30 19:48:24 Total time: 12.680582ms ``` +#### Cluster-to-cluster migration mode + +Using cluster-to-cluster migration mode helps to migrate all tenants data in a single `vmctl` run. + +Cluster-to-cluster uses `/admin/tenants` endpoint (available starting from [v1.84.0](https://docs.victoriametrics.com/CHANGELOG.html#v1840)) to discover list of tenants from source cluster. + +To use this mode you need to set `--vm-intercluster` flag to `true`, `--vm-native-src-addr` flag to 'http://vmselect:8481/' and `--vm-native-dst-addr` value to http://vminsert:8480/: + +```console +./bin/vmctl vm-native --vm-intercluster=true --vm-native-src-addr=http://localhost:8481/ --vm-native-dst-addr=http://172.17.0.3:8480/ +VictoriaMetrics Native import mode +2022/12/05 21:20:06 Discovered tenants: [123:1 12812919:1 1289198:1 1289:1283 12:1 1:0 1:1 1:1231231 1:1271727 1:12819 1:281 812891298:1] +2022/12/05 21:20:06 Initing export pipe from "http://localhost:8481/select/123:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/123:1/prometheus/api/v1/import/native": +Total: 61.13 MiB ↖ Speed: 2.05 MiB p/s +Total: 61.13 MiB ↗ Speed: 2.30 MiB p/s +2022/12/05 21:20:33 Initing export pipe from "http://localhost:8481/select/12812919:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/12812919:1/prometheus/api/v1/import/native": +Total: 43.14 MiB ↘ Speed: 1.86 MiB p/s +Total: 43.14 MiB ↙ Speed: 2.36 MiB p/s +2022/12/05 21:20:51 Initing export pipe from "http://localhost:8481/select/1289198:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1289198:1/prometheus/api/v1/import/native": +Total: 16.64 MiB ↗ Speed: 2.66 MiB p/s +Total: 16.64 MiB ↘ Speed: 2.19 MiB p/s +2022/12/05 21:20:59 Initing export pipe from "http://localhost:8481/select/1289:1283/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1289:1283/prometheus/api/v1/import/native": +Total: 43.33 MiB ↙ Speed: 1.94 MiB p/s +Total: 43.33 MiB ↖ Speed: 2.35 MiB p/s +2022/12/05 21:21:18 Initing export pipe from "http://localhost:8481/select/12:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/12:1/prometheus/api/v1/import/native": +Total: 63.78 MiB ↙ Speed: 1.96 MiB p/s +Total: 63.78 MiB ↖ Speed: 2.28 MiB p/s +2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:0/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:0/prometheus/api/v1/import/native": +2022/12/05 21:21:46 Import finished! +Total: 330 B ↗ Speed: 3.53 MiB p/s +2022/12/05 21:21:46 Initing export pipe from "http://localhost:8481/select/1:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:1/prometheus/api/v1/import/native": +Total: 63.81 MiB ↙ Speed: 1.96 MiB p/s +Total: 63.81 MiB ↖ Speed: 2.28 MiB p/s +2022/12/05 21:22:14 Initing export pipe from "http://localhost:8481/select/1:1231231/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:1231231/prometheus/api/v1/import/native": +Total: 63.84 MiB ↙ Speed: 1.93 MiB p/s +Total: 63.84 MiB ↖ Speed: 2.29 MiB p/s +2022/12/05 21:22:42 Initing export pipe from "http://localhost:8481/select/1:1271727/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:1271727/prometheus/api/v1/import/native": +Total: 54.37 MiB ↘ Speed: 1.90 MiB p/s +Total: 54.37 MiB ↙ Speed: 2.37 MiB p/s +2022/12/05 21:23:05 Initing export pipe from "http://localhost:8481/select/1:12819/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:12819/prometheus/api/v1/import/native": +Total: 17.01 MiB ↙ Speed: 1.75 MiB p/s +Total: 17.01 MiB ↖ Speed: 2.15 MiB p/s +2022/12/05 21:23:13 Initing export pipe from "http://localhost:8481/select/1:281/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/1:281/prometheus/api/v1/import/native": +Total: 63.89 MiB ↘ Speed: 1.90 MiB p/s +Total: 63.89 MiB ↙ Speed: 2.29 MiB p/s +2022/12/05 21:23:42 Initing export pipe from "http://localhost:8481/select/812891298:1/prometheus/api/v1/export/native" with filters: + filter: match[]={__name__!=""} +Initing import process to "http://172.17.0.3:8480/insert/812891298:1/prometheus/api/v1/import/native": +Total: 63.84 MiB ↖ Speed: 1.99 MiB p/s +Total: 63.84 MiB ↗ Speed: 2.26 MiB p/s +2022/12/05 21:24:10 Total time: 4m4.1466565s +``` ## Verifying exported blocks from VictoriaMetrics From d99d222f0ad54bd95f8c1a95f682881591861823 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 5 Dec 2022 21:30:48 -0800 Subject: [PATCH 28/38] lib/{storage,mergeset}: log the duration for flushing in-memory parts on graceful shutdown --- lib/mergeset/table.go | 3 +-- lib/storage/partition.go | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index ea18b78add..08b1e4e378 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -374,9 +374,8 @@ func (tb *Table) MustClose() { logger.Infof("flushing inmemory parts to files on %q...", tb.path) startTime = time.Now() - - // Flush inmemory items the last time before exit. tb.flushInmemoryItems() + logger.Infof("inmemory parts have been successfully flushed to files in %.3f seconds at %q", time.Since(startTime).Seconds(), tb.path) logger.Infof("waiting for flush callback worker to stop on %q...", tb.path) startTime = time.Now() diff --git a/lib/storage/partition.go b/lib/storage/partition.go index 714a50f8b2..020e6a3201 100644 --- a/lib/storage/partition.go +++ b/lib/storage/partition.go @@ -722,9 +722,8 @@ func (pt *partition) MustClose() { logger.Infof("flushing inmemory parts to files on %q...", pt.smallPartsPath) startTime = time.Now() - - // Flush inmemory rows the last time before exit. pt.flushInmemoryRows() + logger.Infof("inmemory parts have been flushed to files in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath) // Remove references from inmemoryParts, smallParts and bigParts, so they may be eventually closed // after all the searches are done. From 5eae9a9914d4272582514ba69a9dc2196a3ecd29 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 5 Dec 2022 21:55:01 -0800 Subject: [PATCH 29/38] app/vmselect/promql: add range_trim_spikes(phi, q) function for trimming phi percent of largest spikes per each time series returned by q --- app/vmselect/promql/exec_test.go | 12 +++++ app/vmselect/promql/transform.go | 49 +++++++++++++++++++ docs/CHANGELOG.md | 3 +- docs/MetricsQL.md | 5 ++ go.mod | 2 +- go.sum | 4 +- .../VictoriaMetrics/metricsql/optimizer.go | 2 +- .../VictoriaMetrics/metricsql/transform.go | 1 + vendor/modules.txt | 2 +- 9 files changed, 74 insertions(+), 6 deletions(-) diff --git a/app/vmselect/promql/exec_test.go b/app/vmselect/promql/exec_test.go index 49eb8d5402..0ca7c6224b 100644 --- a/app/vmselect/promql/exec_test.go +++ b/app/vmselect/promql/exec_test.go @@ -6385,6 +6385,17 @@ func TestExecSuccess(t *testing.T) { resultExpected := []netstorage.Result{r1, r2} f(q, resultExpected) }) + t.Run(`range_trim_spikes()`, func(t *testing.T) { + t.Parallel() + q := `range_trim_spikes(0.2, time())` + r := netstorage.Result{ + MetricName: metricNameExpected, + Values: []float64{nan, 1200, 1400, 1600, 1800, nan}, + Timestamps: timestampsExpected, + } + resultExpected := []netstorage.Result{r} + f(q, resultExpected) + }) t.Run(`range_quantile(0.5)`, func(t *testing.T) { t.Parallel() q := `range_quantile(0.5, time())` @@ -8189,6 +8200,7 @@ func TestExecError(t *testing.T) { f(`step(1)`) f(`running_sum(1, 2)`) f(`range_sum(1, 2)`) + f(`range_trim_spikes()`) f(`range_first(1, 2)`) f(`range_last(1, 2)`) f(`range_linear_regression(1, 2)`) diff --git a/app/vmselect/promql/transform.go b/app/vmselect/promql/transform.go index f4d1512954..752775b4ed 100644 --- a/app/vmselect/promql/transform.go +++ b/app/vmselect/promql/transform.go @@ -96,6 +96,7 @@ var transformFuncs = map[string]transformFunc{ "range_stddev": transformRangeStddev, "range_stdvar": transformRangeStdvar, "range_sum": newTransformFuncRange(runningSum), + "range_trim_spikes": transformRangeTrimSpikes, "remove_resets": transformRemoveResets, "round": transformRound, "running_avg": newTransformFuncRunning(runningAvg), @@ -1274,6 +1275,54 @@ func transformRangeNormalize(tfa *transformFuncArg) ([]*timeseries, error) { return rvs, nil } +func transformRangeTrimSpikes(tfa *transformFuncArg) ([]*timeseries, error) { + args := tfa.args + if err := expectTransformArgsNum(args, 2); err != nil { + return nil, err + } + phis, err := getScalar(args[0], 0) + if err != nil { + return nil, err + } + phi := float64(0) + if len(phis) > 0 { + phi = phis[0] + } + // Trim 100% * (phi / 2) samples with the lowest / highest values per each time series + phi /= 2 + phiUpper := 1 - phi + phiLower := phi + rvs := args[1] + a := getFloat64s() + values := a.A[:0] + for _, ts := range rvs { + values := values[:0] + originValues := ts.Values + for _, v := range originValues { + if math.IsNaN(v) { + continue + } + values = append(values, v) + } + sort.Float64s(values) + vMax := quantileSorted(phiUpper, values) + vMin := quantileSorted(phiLower, values) + for i, v := range originValues { + if math.IsNaN(v) { + continue + } + if v > vMax { + originValues[i] = nan + } else if v < vMin { + originValues[i] = nan + } + } + } + a.A = values + putFloat64s(a) + return rvs, nil +} + func transformRangeLinearRegression(tfa *transformFuncArg) ([]*timeseries, error) { args := tfa.args if err := expectTransformArgsNum(args, 1); err != nil { diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index a13a6273c1..af033101da 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -48,9 +48,10 @@ The following tip changes can be tested by building VictoriaMetrics components f * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): improve [service discovery](https://docs.victoriametrics.com/sd_configs.html) performance when discovering big number of targets (10K and more). * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `exported_` prefix to metric names exported by scrape targets if these metric names clash with [automatically generated metrics](https://docs.victoriametrics.com/vmagent.html#automatically-generated-metrics) such as `up`, `scrape_samples_scraped`, etc. This prevents from corruption of automatically generated metrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3406). * FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve error message when the requested path cannot be properly parsed, so users could identify the issue and properly fix the path. Now the error message links to [url format docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3402). -* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to copy data from sources via Prometheus `remote_read` protocol. See [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-by-remote-read-protocol). The related issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3132) and [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1101). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `-remoteWrite.sendTimeout` command-line flag, which allows configuring timeout for sending data to `-remoteWrite.url`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3408). * FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to migrate data between VictoriaMetrics clusters with automatic tenants discovery. See [these docs](https://docs.victoriametrics.com/vmctl.html#cluster-to-cluster-migration-mode) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2930) +* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to copy data from sources via Prometheus `remote_read` protocol. See [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-by-remote-read-protocol). The related issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3132) and [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1101). +* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `range_trim_spikes(phi, q)` function for trimming `phi` percent of the largest spikes per each time series returned by `q`. See [these docs](https://docs.victoriametrics.com/MetricsQL.html#range_trim_spikes). * BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly pass HTTP headers during the alert state restore procedure. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3418). * BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly specify rule evaluation step during the [replay mode](https://docs.victoriametrics.com/vmalert.html#rules-backfilling). The `step` value was previously overriden by `-datasource.queryStep` command-line flag. diff --git a/docs/MetricsQL.md b/docs/MetricsQL.md index 67bd9aa610..9b0a8c447f 100644 --- a/docs/MetricsQL.md +++ b/docs/MetricsQL.md @@ -1247,6 +1247,11 @@ per each time series returned by `q` on the selected time range. `range_sum(q)` is a [transform function](#transform-functions), which calculates the sum of points per each time series returned by `q`. +#### range_trim_spikes + +`range_trim_spikes(phi, q)` is a [transform function](#transform-functions), which drops `phi` percent of biggest spikes from time series returned by `q`. +The `phi` must be in the range `[0..1]`, where `0` means `0%` and `1` means `100%`. + #### remove_resets `remove_resets(q)` is a [transform function](#transform-functions), which removes counter resets from time series returned by `q`. diff --git a/go.mod b/go.mod index 164b550bd8..35f09f6f9d 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( // like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b github.com/VictoriaMetrics/fasthttp v1.1.0 github.com/VictoriaMetrics/metrics v1.23.0 - github.com/VictoriaMetrics/metricsql v0.49.1 + github.com/VictoriaMetrics/metricsql v0.50.0 github.com/aws/aws-sdk-go-v2 v1.17.2 github.com/aws/aws-sdk-go-v2/config v1.18.4 github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.43 diff --git a/go.sum b/go.sum index a942cb491d..44d818e924 100644 --- a/go.sum +++ b/go.sum @@ -71,8 +71,8 @@ github.com/VictoriaMetrics/fasthttp v1.1.0/go.mod h1:/7DMcogqd+aaD3G3Hg5kFgoFwlR github.com/VictoriaMetrics/metrics v1.18.1/go.mod h1:ArjwVz7WpgpegX/JpB0zpNF2h2232kErkEnzH1sxMmA= github.com/VictoriaMetrics/metrics v1.23.0 h1:WzfqyzCaxUZip+OBbg1+lV33WChDSu4ssYII3nxtpeA= github.com/VictoriaMetrics/metrics v1.23.0/go.mod h1:rAr/llLpEnAdTehiNlUxKgnjcOuROSzpw0GvjpEbvFc= -github.com/VictoriaMetrics/metricsql v0.49.1 h1:9JAbpiZhlQnylclcf5xNtYRaBd5dr2CTPQ85RIoruuk= -github.com/VictoriaMetrics/metricsql v0.49.1/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0= +github.com/VictoriaMetrics/metricsql v0.50.0 h1:MCBhjn1qlfMqPGP6HiR9JgmEw7oTRGm/O8YwSeoaI1E= +github.com/VictoriaMetrics/metricsql v0.50.0/go.mod h1:6pP1ZeLVJHqJrHlF6Ij3gmpQIznSsgktEcZgsAWYel0= github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= diff --git a/vendor/github.com/VictoriaMetrics/metricsql/optimizer.go b/vendor/github.com/VictoriaMetrics/metricsql/optimizer.go index 3a432e63e0..3415285863 100644 --- a/vendor/github.com/VictoriaMetrics/metricsql/optimizer.go +++ b/vendor/github.com/VictoriaMetrics/metricsql/optimizer.go @@ -392,7 +392,7 @@ func getTransformArgIdxForOptimization(funcName string, args []Expr) int { return -1 case "limit_offset": return 2 - case "buckets_limit", "histogram_quantile", "histogram_share", "range_quantile": + case "buckets_limit", "histogram_quantile", "histogram_share", "range_quantile", "range_trim_spikes": return 1 case "histogram_quantiles": return len(args) - 1 diff --git a/vendor/github.com/VictoriaMetrics/metricsql/transform.go b/vendor/github.com/VictoriaMetrics/metricsql/transform.go index 5876c82908..31029f2c3e 100644 --- a/vendor/github.com/VictoriaMetrics/metricsql/transform.go +++ b/vendor/github.com/VictoriaMetrics/metricsql/transform.go @@ -81,6 +81,7 @@ var transformFuncs = map[string]bool{ "range_stddev": true, "range_stdvar": true, "range_sum": true, + "range_trim_spikes": true, "remove_resets": true, "round": true, "running_avg": true, diff --git a/vendor/modules.txt b/vendor/modules.txt index dfb5e08c68..4bc16ef084 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -69,7 +69,7 @@ github.com/VictoriaMetrics/fasthttp/stackless # github.com/VictoriaMetrics/metrics v1.23.0 ## explicit; go 1.15 github.com/VictoriaMetrics/metrics -# github.com/VictoriaMetrics/metricsql v0.49.1 +# github.com/VictoriaMetrics/metricsql v0.50.0 ## explicit; go 1.13 github.com/VictoriaMetrics/metricsql github.com/VictoriaMetrics/metricsql/binaryop From fd43b5bad022bdca9fbfef9a9831559f0299018f Mon Sep 17 00:00:00 2001 From: Yury Molodov Date: Tue, 6 Dec 2022 06:56:54 +0100 Subject: [PATCH 30/38] vmui: fix multi-line query (#3448) * fix: remove prevent nav by up/down keys for multi-line query * fix: add query params encode in URL --- .../vmui/src/components/Main/Autocomplete/Autocomplete.tsx | 5 +++-- app/vmui/packages/vmui/src/utils/query-string.ts | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/app/vmui/packages/vmui/src/components/Main/Autocomplete/Autocomplete.tsx b/app/vmui/packages/vmui/src/components/Main/Autocomplete/Autocomplete.tsx index 021e05797c..3cb15557e8 100644 --- a/app/vmui/packages/vmui/src/components/Main/Autocomplete/Autocomplete.tsx +++ b/app/vmui/packages/vmui/src/components/Main/Autocomplete/Autocomplete.tsx @@ -56,13 +56,14 @@ const Autocomplete: FC = ({ const handleKeyDown = (e: KeyboardEvent) => { const { key, ctrlKey, metaKey, shiftKey } = e; const modifiers = ctrlKey || metaKey || shiftKey; + const hasOptions = foundOptions.length; - if (key === "ArrowUp" && !modifiers) { + if (key === "ArrowUp" && !modifiers && hasOptions) { e.preventDefault(); setFocusOption((prev) => prev <= 0 ? 0 : prev - 1); } - if (key === "ArrowDown" && !modifiers) { + if (key === "ArrowDown" && !modifiers && hasOptions) { e.preventDefault(); const lastIndex = foundOptions.length - 1; setFocusOption((prev) => prev >= lastIndex ? lastIndex : prev + 1); diff --git a/app/vmui/packages/vmui/src/utils/query-string.ts b/app/vmui/packages/vmui/src/utils/query-string.ts index 170a163795..333bd93550 100644 --- a/app/vmui/packages/vmui/src/utils/query-string.ts +++ b/app/vmui/packages/vmui/src/utils/query-string.ts @@ -5,7 +5,7 @@ import { MAX_QUERY_FIELDS } from "../constants/graph"; export const setQueryStringWithoutPageReload = (params: Record): void => { const w = window; if (w) { - const qsValue = Object.entries(params).map(([k, v]) => `${k}=${v}`).join("&"); + const qsValue = Object.entries(params).map(([k, v]) => `${k}=${encodeURIComponent(String(v))}`).join("&"); const qs = qsValue ? `?${qsValue}` : ""; const newurl = `${w.location.protocol}//${w.location.host}${w.location.pathname}${qs}${w.location.hash}`; w.history.pushState({ path: newurl }, "", newurl); From 718d1d90b6bc784069ec6e7cb12bbeee5498e353 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 5 Dec 2022 22:01:39 -0800 Subject: [PATCH 31/38] docs/CHANGELOG.md: document fd43b5bad022bdca9fbfef9a9831559f0299018f Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3444 Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3445 --- docs/CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index af033101da..468e3f6ac1 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -49,12 +49,14 @@ The following tip changes can be tested by building VictoriaMetrics components f * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `exported_` prefix to metric names exported by scrape targets if these metric names clash with [automatically generated metrics](https://docs.victoriametrics.com/vmagent.html#automatically-generated-metrics) such as `up`, `scrape_samples_scraped`, etc. This prevents from corruption of automatically generated metrics. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3406). * FEATURE: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): improve error message when the requested path cannot be properly parsed, so users could identify the issue and properly fix the path. Now the error message links to [url format docs](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#url-format). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3402). * FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `-remoteWrite.sendTimeout` command-line flag, which allows configuring timeout for sending data to `-remoteWrite.url`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3408). -* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to migrate data between VictoriaMetrics clusters with automatic tenants discovery. See [these docs](https://docs.victoriametrics.com/vmctl.html#cluster-to-cluster-migration-mode) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2930) +* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to migrate data between VictoriaMetrics clusters with automatic tenants discovery. See [these docs](https://docs.victoriametrics.com/vmctl.html#cluster-to-cluster-migration-mode) and [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2930). * FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): add ability to copy data from sources via Prometheus `remote_read` protocol. See [these docs](https://docs.victoriametrics.com/vmctl.html#migrating-data-by-remote-read-protocol). The related issues: [one](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3132) and [two](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1101). * FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add `range_trim_spikes(phi, q)` function for trimming `phi` percent of the largest spikes per each time series returned by `q`. See [these docs](https://docs.victoriametrics.com/MetricsQL.html#range_trim_spikes). * BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly pass HTTP headers during the alert state restore procedure. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3418). * BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): properly specify rule evaluation step during the [replay mode](https://docs.victoriametrics.com/vmalert.html#rules-backfilling). The `step` value was previously overriden by `-datasource.queryStep` command-line flag. +* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): properly put multi-line queries in the url, so it could be copy-n-pasted and opened without issues in a new browser tab. Previously the url for multi-line query couldn't be opened. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3444). +* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): correctly handle `up` and `down` keypresses when editing multi-line queries. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3445). ## [v1.84.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.84.0) From 71f0bbbe39973c2962e9ee459931d5bc2b7085b7 Mon Sep 17 00:00:00 2001 From: Roman Khavronenko Date: Tue, 6 Dec 2022 07:05:31 +0100 Subject: [PATCH 32/38] deployment: update the README (#3447) Signed-off-by: hagen1778 Signed-off-by: hagen1778 --- deployment/docker/README.md | 52 ++++++++++++++++++------------------- 1 file changed, 25 insertions(+), 27 deletions(-) diff --git a/deployment/docker/README.md b/deployment/docker/README.md index e1fa97d18c..342f592e98 100644 --- a/deployment/docker/README.md +++ b/deployment/docker/README.md @@ -5,14 +5,14 @@ Docker compose environment for VictoriaMetrics includes VictoriaMetrics componen and [Grafana](https://grafana.com/). For starting the docker-compose environment ensure you have docker installed and running and access to the Internet. -All commands should be executed from the root directory of this repo. +**All commands should be executed from the root directory of [the repo](https://github.com/VictoriaMetrics/VictoriaMetrics).** -To spin-up environment for single server VictoriaMetrics run the following command : +To spin-up environment for single server VictoriaMetrics run the following command: ``` make docker-single-up ``` -To shutdown the docker compose environment for single server run the following command: +To shut down the docker-compose environment for single server run the following command: ``` make docker-single-down ``` @@ -22,7 +22,7 @@ For cluster version the command will be the following: make docker-cluster-up ``` -To shutdown the docker compose environment for cluster version run the following command: +To shut down the docker compose environment for cluster version run the following command: ``` make docker-cluster-down ``` @@ -36,51 +36,49 @@ VictoriaMetrics will be accessible on the following ports: * `--httpListenAddr=:8428` The communication scheme between components is the following: -* [vmagent](#vmagent) sends scraped metrics to VictoriaMetrics; -* [grafana](#grafana) is configured with datasource pointing to VictoriaMetrics; -* [vmalert](#vmalert) is configured to query VictoriaMetrics and send alerts state +* [vmagent](#vmagent) sends scraped metrics to `single server VictoriaMetrics`; +* [grafana](#grafana) is configured with datasource pointing to `single server VictoriaMetrics`; +* [vmalert](#vmalert) is configured to query `single server VictoriaMetrics` and send alerts state and recording rules back to it; -* [alertmanager](#alertmanager) is configured to receive notifications from vmalert. +* [alertmanager](#alertmanager) is configured to receive notifications from `vmalert`. -To access `vmalert` via `vmselect` -use link [http://localhost:8428/vmalert](http://localhost:8428/vmalert/). +To access `vmalert` use link [http://localhost:8428/vmalert](http://localhost:8428/vmalert/). To access [vmui](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#vmui) use link [http://localhost:8428/vmui](http://localhost:8428/vmui). ## VictoriaMetrics cluster -VictoriaMetrics cluster environemnt consists of vminsert, vmstorage and vmselect components. vmselect -has exposed port `:8481`, vminsert has exposed port `:8480` and the rest of components are available -only inside of environment. +VictoriaMetrics cluster environment consists of `vminsert`, `vmstorage` and `vmselect` components. +`vmselect` has exposed port `:8481`, `vminsert` has exposed port `:8480` and the rest of components +are available only inside the environment. The communication scheme between components is the following: -* [vmagent](#vmagent) sends scraped metrics to vminsert; -* vminsert forwards data to vmstorage; -* vmselect is connected to vmstorage for querying data; -* [grafana](#grafana) is configured with datasource pointing to vmselect; -* [vmalert](#vmalert) is configured to query vmselect and send alerts state - and recording rules to vminsert; -* [alertmanager](#alertmanager) is configured to receive notifications from vmalert. +* [vmagent](#vmagent) sends scraped metrics to `vminsert`; +* `vminsert` forwards data to `vmstorage`; +* `vmselect` is connected to `vmstorage` for querying data; +* [grafana](#grafana) is configured with datasource pointing to `vmselect`; +* [vmalert](#vmalert) is configured to query `vmselect` and send alerts state + and recording rules to `vminsert`; +* [alertmanager](#alertmanager) is configured to receive notifications from `vmalert`. -To access `vmalert` via `vmselect` -use link [http://localhost:8481/select/0/prometheus/vmalert](http://localhost:8481/select/0/prometheus/vmalert/). +To access `vmalert` use link [http://localhost:8481/select/0/prometheus/vmalert](http://localhost:8481/select/0/prometheus/vmalert/). To access [vmui](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#vmui) use link [http://localhost:8481/select/0/prometheus/vmui](http://localhost:8481/select/0/prometheus/vmui). ## vmagent -vmagent is used for scraping and pushing timeseries to -VictoriaMetrics instance. It accepts Prometheus-compatible -configuration `prometheus.yml` with listed targets for scraping. +vmagent is used for scraping and pushing time series to VictoriaMetrics instance. +It accepts Prometheus-compatible configuration [prometheus.yml](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/prometheus.yml) +with listed targets for scraping. [Web interface link](http://localhost:8429/). ## vmalert -vmalert evaluates alerting rules (`alerts.yml`) to track VictoriaMetrics -health state. It is connected with AlertManager for firing alerts, +vmalert evaluates alerting rules [alerts.yml(https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/deployment/docker/alerts.yml) +to track VictoriaMetrics health state. It is connected with AlertManager for firing alerts, and with VictoriaMetrics for executing queries and storing alert's state. [Web interface link](http://localhost:8880/). From 01a9b36a95ee10374d21b959a7a4db2b80b0cc11 Mon Sep 17 00:00:00 2001 From: Yury Molodov Date: Tue, 6 Dec 2022 07:44:31 +0100 Subject: [PATCH 33/38] vmui: timezone select (#3414) * feat: add timezone selection * vmui: provide feature timezone select * fix: correct timezone with relative time Co-authored-by: Aliaksandr Valialkin --- .../Chart/ChartTooltip/ChartTooltip.tsx | 2 +- .../components/Chart/LineChart/LineChart.tsx | 9 +- .../CardinalityDatePicker.tsx | 2 +- .../GlobalSettings/GlobalSettings.tsx | 12 ++ .../LimitsConfigurator/LimitsConfigurator.tsx | 2 +- .../LimitsConfigurator/style.scss | 7 - .../ServerConfigurator/ServerConfigurator.tsx | 20 ++- .../GlobalSettings/Timezones/Timezones.tsx | 143 ++++++++++++++++++ .../GlobalSettings/Timezones/style.scss | 96 ++++++++++++ .../Configurators/GlobalSettings/style.scss | 9 ++ .../TimeDurationSelector/style.scss | 2 +- .../TimeSelector/TimeSelector.tsx | 41 +++-- .../TimeRangeSettings/TimeSelector/style.scss | 24 +++ .../Main/DatePicker/Calendar/Calendar.tsx | 6 +- .../Calendar/CalendarBody/CalendarBody.tsx | 2 +- .../Main/DatePicker/Calendar/style.scss | 1 + .../components/Main/DatePicker/DatePicker.tsx | 2 +- .../components/Views/GraphView/GraphView.tsx | 4 +- .../vmui/src/constants/dayjsPlugins.ts | 8 + .../packages/vmui/src/contexts/Snackbar.tsx | 2 +- .../vmui/src/hooks/useClickOutside.ts | 8 +- app/vmui/packages/vmui/src/index.tsx | 1 + .../vmui/src/state/cardinality/reducer.ts | 2 +- .../packages/vmui/src/state/time/reducer.ts | 23 ++- app/vmui/packages/vmui/src/types/index.ts | 6 + app/vmui/packages/vmui/src/utils/storage.ts | 1 + app/vmui/packages/vmui/src/utils/time.ts | 70 ++++++--- .../packages/vmui/src/utils/uplot/axes.ts | 14 +- docs/CHANGELOG.md | 1 + 29 files changed, 453 insertions(+), 67 deletions(-) create mode 100644 app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/Timezones.tsx create mode 100644 app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/style.scss create mode 100644 app/vmui/packages/vmui/src/constants/dayjsPlugins.ts diff --git a/app/vmui/packages/vmui/src/components/Chart/ChartTooltip/ChartTooltip.tsx b/app/vmui/packages/vmui/src/components/Chart/ChartTooltip/ChartTooltip.tsx index 0997719467..f8a38280d1 100644 --- a/app/vmui/packages/vmui/src/components/Chart/ChartTooltip/ChartTooltip.tsx +++ b/app/vmui/packages/vmui/src/components/Chart/ChartTooltip/ChartTooltip.tsx @@ -49,7 +49,7 @@ const ChartTooltip: FC = ({ const value = useMemo(() => get(u, ["data", seriesIdx, dataIdx], 0), [u, seriesIdx, dataIdx]); const valueFormat = useMemo(() => formatPrettyNumber(value), [value]); const dataTime = useMemo(() => u.data[0][dataIdx], [u, dataIdx]); - const date = useMemo(() => dayjs(new Date(dataTime * 1000)).format(DATE_FULL_TIMEZONE_FORMAT), [dataTime]); + const date = useMemo(() => dayjs(dataTime * 1000).tz().format(DATE_FULL_TIMEZONE_FORMAT), [dataTime]); const color = useMemo(() => getColorLine(series[seriesIdx]?.label || ""), [series, seriesIdx]); diff --git a/app/vmui/packages/vmui/src/components/Chart/LineChart/LineChart.tsx b/app/vmui/packages/vmui/src/components/Chart/LineChart/LineChart.tsx index 1e7e4ff7c0..b7d8e475f8 100644 --- a/app/vmui/packages/vmui/src/components/Chart/LineChart/LineChart.tsx +++ b/app/vmui/packages/vmui/src/components/Chart/LineChart/LineChart.tsx @@ -11,7 +11,7 @@ import { defaultOptions } from "../../../utils/uplot/helpers"; import { dragChart } from "../../../utils/uplot/events"; import { getAxes, getMinMaxBuffer } from "../../../utils/uplot/axes"; import { MetricResult } from "../../../api/types"; -import { limitsDurations } from "../../../utils/time"; +import { dateFromSeconds, formatDateForNativeInput, limitsDurations } from "../../../utils/time"; import throttle from "lodash.throttle"; import useResize from "../../../hooks/useResize"; import { TimeParams } from "../../../types"; @@ -20,6 +20,7 @@ import "uplot/dist/uPlot.min.css"; import "./style.scss"; import classNames from "classnames"; import ChartTooltip, { ChartTooltipProps } from "../ChartTooltip/ChartTooltip"; +import dayjs from "dayjs"; export interface LineChartProps { metrics: MetricResult[]; @@ -57,7 +58,10 @@ const LineChart: FC = ({ const tooltipId = useMemo(() => `${tooltipIdx.seriesIdx}_${tooltipIdx.dataIdx}`, [tooltipIdx]); const setScale = ({ min, max }: { min: number, max: number }): void => { - setPeriod({ from: new Date(min * 1000), to: new Date(max * 1000) }); + setPeriod({ + from: dayjs(min * 1000).toDate(), + to: dayjs(max * 1000).toDate() + }); }; const throttledSetScale = useCallback(throttle(setScale, 500), []); const setPlotScale = ({ u, min, max }: { u: uPlot, min: number, max: number }) => { @@ -163,6 +167,7 @@ const LineChart: FC = ({ const options: uPlotOptions = { ...defaultOptions, + tzDate: ts => dayjs(formatDateForNativeInput(dateFromSeconds(ts))).local().toDate(), series, axes: getAxes( [{}, { scale: "1" }], unit), scales: { ...getScales() }, diff --git a/app/vmui/packages/vmui/src/components/Configurators/CardinalityDatePicker/CardinalityDatePicker.tsx b/app/vmui/packages/vmui/src/components/Configurators/CardinalityDatePicker/CardinalityDatePicker.tsx index ed6b5bb489..04fa6c3c83 100644 --- a/app/vmui/packages/vmui/src/components/Configurators/CardinalityDatePicker/CardinalityDatePicker.tsx +++ b/app/vmui/packages/vmui/src/components/Configurators/CardinalityDatePicker/CardinalityDatePicker.tsx @@ -15,7 +15,7 @@ const CardinalityDatePicker: FC = () => { const { date } = useCardinalityState(); const cardinalityDispatch = useCardinalityDispatch(); - const dateFormatted = useMemo(() => dayjs(date).format(DATE_FORMAT), [date]); + const dateFormatted = useMemo(() => dayjs.tz(date).format(DATE_FORMAT), [date]); const handleChangeDate = (val: string) => { cardinalityDispatch({ type: "SET_DATE", payload: val }); diff --git a/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/GlobalSettings.tsx b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/GlobalSettings.tsx index 09e351f678..589b353f34 100644 --- a/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/GlobalSettings.tsx +++ b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/GlobalSettings.tsx @@ -11,6 +11,8 @@ import { SeriesLimits } from "../../../types"; import { useCustomPanelDispatch, useCustomPanelState } from "../../../state/customPanel/CustomPanelStateContext"; import { getAppModeEnable } from "../../../utils/app-mode"; import classNames from "classnames"; +import Timezones from "./Timezones/Timezones"; +import { useTimeDispatch, useTimeState } from "../../../state/time/TimeStateContext"; const title = "Settings"; @@ -18,13 +20,16 @@ const GlobalSettings: FC = () => { const appModeEnable = getAppModeEnable(); const { serverUrl: stateServerUrl } = useAppState(); + const { timezone: stateTimezone } = useTimeState(); const { seriesLimits } = useCustomPanelState(); const dispatch = useAppDispatch(); + const timeDispatch = useTimeDispatch(); const customPanelDispatch = useCustomPanelDispatch(); const [serverUrl, setServerUrl] = useState(stateServerUrl); const [limits, setLimits] = useState(seriesLimits); + const [timezone, setTimezone] = useState(stateTimezone); const [open, setOpen] = useState(false); const handleOpen = () => setOpen(true); @@ -32,6 +37,7 @@ const GlobalSettings: FC = () => { const handlerApply = () => { dispatch({ type: "SET_SERVER", payload: serverUrl }); + timeDispatch({ type: "SET_TIMEZONE", payload: timezone }); customPanelDispatch({ type: "SET_SERIES_LIMITS", payload: limits }); handleClose(); }; @@ -70,6 +76,12 @@ const GlobalSettings: FC = () => { onEnter={handlerApply} /> +
+ +
+ ); +}; + +export default Timezones; diff --git a/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/style.scss b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/style.scss new file mode 100644 index 0000000000..185d7e845a --- /dev/null +++ b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/Timezones/style.scss @@ -0,0 +1,96 @@ +@use "src/styles/variables" as *; + +.vm-timezones { + + &-item { + display: flex; + align-items: center; + justify-content: space-between; + gap: $padding-small; + cursor: pointer; + + &_selected { + border: $border-divider; + padding: $padding-small $padding-global; + border-radius: $border-radius-small; + } + + &__title { + text-transform: capitalize; + } + + &__utc { + display: inline-flex; + align-items: center; + justify-content: center; + background-color: rgba($color-black, 0.06); + padding: calc($padding-small/2); + border-radius: $border-radius-small; + } + + &__icon { + display: inline-flex; + align-items: center; + justify-content: flex-end; + margin: 0 0 0 auto; + transition: transform 200ms ease-in; + + svg { + width: 14px; + } + + &_open { + transform: rotate(180deg); + } + } + } + + &-list { + min-width: 600px; + max-height: 300px; + background-color: $color-background-block; + border-radius: $border-radius-medium; + overflow: auto; + + &-header { + position: sticky; + top: 0; + background-color: $color-background-block; + z-index: 2; + border-bottom: $border-divider; + + &__search { + padding: $padding-small; + } + } + + &-group { + padding: $padding-small 0; + border-bottom: $border-divider; + + &:last-child { + border-bottom: none; + } + + &__title { + font-weight: bold; + color: $color-text-secondary; + padding: $padding-small $padding-global; + } + + &-options { + display: grid; + align-items: flex-start; + + &__item { + padding: $padding-small $padding-global; + transition: background-color 200ms ease; + + &:hover { + background-color: rgba($color-black, 0.1); + } + } + } + } + } +} diff --git a/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/style.scss b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/style.scss index 5f40cf32d2..ec0d5c391d 100644 --- a/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/style.scss +++ b/app/vmui/packages/vmui/src/components/Configurators/GlobalSettings/style.scss @@ -10,6 +10,15 @@ } + &__title { + display: flex; + align-items: center; + justify-content: flex-start; + font-size: $font-size; + font-weight: bold; + margin-bottom: $padding-global; + } + &__footer { display: inline-grid; grid-template-columns: repeat(2, 1fr); diff --git a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeDurationSelector/style.scss b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeDurationSelector/style.scss index 79eb3be594..e00038492d 100644 --- a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeDurationSelector/style.scss +++ b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeDurationSelector/style.scss @@ -1,7 +1,7 @@ @use "src/styles/variables" as *; .vm-time-duration { - max-height: 168px; + max-height: 200px; overflow: auto; font-size: $font-size; } diff --git a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector.tsx b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector.tsx index 75d5a82cfc..d362277fc2 100644 --- a/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector.tsx +++ b/app/vmui/packages/vmui/src/components/Configurators/TimeRangeSettings/TimeSelector/TimeSelector.tsx @@ -1,5 +1,5 @@ import React, { FC, useEffect, useState, useMemo, useRef } from "preact/compat"; -import { dateFromSeconds, formatDateForNativeInput } from "../../../../utils/time"; +import { dateFromSeconds, formatDateForNativeInput, getRelativeTime, getUTCByTimezone } from "../../../../utils/time"; import TimeDurationSelector from "../TimeDurationSelector/TimeDurationSelector"; import dayjs from "dayjs"; import { getAppModeEnable } from "../../../../utils/app-mode"; @@ -22,20 +22,25 @@ export const TimeSelector: FC = () => { const [until, setUntil] = useState(); const [from, setFrom] = useState(); - const formFormat = useMemo(() => dayjs(from).format(DATE_TIME_FORMAT), [from]); - const untilFormat = useMemo(() => dayjs(until).format(DATE_TIME_FORMAT), [until]); + const formFormat = useMemo(() => dayjs.tz(from).format(DATE_TIME_FORMAT), [from]); + const untilFormat = useMemo(() => dayjs.tz(until).format(DATE_TIME_FORMAT), [until]); - const { period: { end, start }, relativeTime } = useTimeState(); + const { period: { end, start }, relativeTime, timezone, duration } = useTimeState(); const dispatch = useTimeDispatch(); const appModeEnable = getAppModeEnable(); + const activeTimezone = useMemo(() => ({ + region: timezone, + utc: getUTCByTimezone(timezone) + }), [timezone]); + useEffect(() => { setUntil(formatDateForNativeInput(dateFromSeconds(end))); - }, [end]); + }, [timezone, end]); useEffect(() => { setFrom(formatDateForNativeInput(dateFromSeconds(start))); - }, [start]); + }, [timezone, start]); const setDuration = ({ duration, until, id }: {duration: string, until: Date, id: string}) => { dispatch({ type: "SET_RELATIVE_TIME", payload: { duration, until, id } }); @@ -43,13 +48,13 @@ export const TimeSelector: FC = () => { }; const formatRange = useMemo(() => { - const startFormat = dayjs(dateFromSeconds(start)).format(DATE_TIME_FORMAT); - const endFormat = dayjs(dateFromSeconds(end)).format(DATE_TIME_FORMAT); + const startFormat = dayjs.tz(dateFromSeconds(start)).format(DATE_TIME_FORMAT); + const endFormat = dayjs.tz(dateFromSeconds(end)).format(DATE_TIME_FORMAT); return { start: startFormat, end: endFormat }; - }, [start, end]); + }, [start, end, timezone]); const dateTitle = useMemo(() => { const isRelativeTime = relativeTime && relativeTime !== "none"; @@ -65,7 +70,10 @@ export const TimeSelector: FC = () => { const setTimeAndClosePicker = () => { if (from && until) { - dispatch({ type: "SET_PERIOD", payload: { from: new Date(from), to: new Date(until) } }); + dispatch({ type: "SET_PERIOD", payload: { + from: dayjs(from).toDate(), + to: dayjs(until).toDate() + } }); } setOpenOptions(false); }; @@ -91,6 +99,15 @@ export const TimeSelector: FC = () => { setOpenOptions(false); }; + useEffect(() => { + const value = getRelativeTime({ + relativeTimeId: relativeTime, + defaultDuration: duration, + defaultEndInput: dateFromSeconds(end), + }); + setDuration({ id: value.relativeTimeId, duration: value.duration, until: value.endInput }); + }, [timezone]); + useClickOutside(wrapperRef, (e) => { const target = e.target as HTMLElement; const isFromButton = fromRef?.current && fromRef.current.contains(target); @@ -159,6 +176,10 @@ export const TimeSelector: FC = () => { /> +
+
{activeTimezone.region}
+
{activeTimezone.utc}
+