lib/storage: consistently use OS-independent separator in file paths

This is needed for Windows support, which uses `\` instead of `/` as file separator

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/70
This commit is contained in:
Aliaksandr Valialkin 2023-03-25 14:33:54 -07:00
parent 36bbdd7d4b
commit c8f2febaa1
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
18 changed files with 135 additions and 97 deletions

View file

@ -8,6 +8,7 @@ import (
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/backupnames"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common" "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/common"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fscommon" "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fscommon"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fslocal" "github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/fslocal"
@ -209,7 +210,7 @@ func (sw *statWriter) Write(p []byte) (int, error) {
} }
func createRestoreLock(dstDir string) error { func createRestoreLock(dstDir string) error {
lockF := path.Join(dstDir, "restore-in-progress") lockF := path.Join(dstDir, backupnames.RestoreInProgressFilename)
f, err := os.Create(lockF) f, err := os.Create(lockF)
if err != nil { if err != nil {
return fmt.Errorf("cannot create restore lock file %q: %w", lockF, err) return fmt.Errorf("cannot create restore lock file %q: %w", lockF, err)
@ -218,7 +219,7 @@ func createRestoreLock(dstDir string) error {
} }
func removeRestoreLock(dstDir string) error { func removeRestoreLock(dstDir string) error {
lockF := path.Join(dstDir, "restore-in-progress") lockF := path.Join(dstDir, backupnames.RestoreInProgressFilename)
if err := os.Remove(lockF); err != nil { if err := os.Remove(lockF); err != nil {
return fmt.Errorf("cannote remove restore lock file %q: %w", lockF, err) return fmt.Errorf("cannote remove restore lock file %q: %w", lockF, err)
} }

View file

@ -0,0 +1,9 @@
package backupnames
const (
// RestoreInProgressFilename is the filename for "restore in progress" file
//
// This file is created at the beginning of the restore process and is deleted at the end of the restore process.
// If this file exists, then it is unsafe to read the storage data, since it can be incomplete.
RestoreInProgressFilename = "restore-in-progress"
)

View file

@ -6,6 +6,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/backupnames"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
) )
@ -135,7 +136,7 @@ func appendFilesInternal(dst []string, d *os.File) ([]string, error) {
} }
func isSpecialFile(name string) bool { func isSpecialFile(name string) bool {
return name == "flock.lock" || name == "restore-in-progress" return name == "flock.lock" || name == backupnames.RestoreInProgressFilename
} }
// RemoveEmptyDirs recursively removes empty directories under the given dir. // RemoveEmptyDirs recursively removes empty directories under the given dir.
@ -249,7 +250,7 @@ func removeEmptyDirsInternal(d *os.File) (bool, error) {
if dirEntries > 0 { if dirEntries > 0 {
return false, nil return false, nil
} }
// Use os.RemoveAll() instead of os.Remove(), since the dir may contain special files such as flock.lock and restore-in-progress, // Use os.RemoveAll() instead of os.Remove(), since the dir may contain special files such as flock.lock and backupnames.RestoreInProgressFilename,
// which must be ignored. // which must be ignored.
if err := os.RemoveAll(dir); err != nil { if err := os.RemoveAll(dir); err != nil {
return false, fmt.Errorf("cannot remove %q: %w", dir, err) return false, fmt.Errorf("cannot remove %q: %w", dir, err)

View file

@ -1529,9 +1529,9 @@ func mustReadPartNames(srcDir string) []string {
return partNames return partNames
} }
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
logger.Panicf("FATAL: cannot read parts.json file: %s", err) logger.Panicf("FATAL: cannot read %s file: %s", partsFilename, err)
} }
// The parts.json is missing. This is the upgrade from versions previous to v1.90.0. // The partsFilename is missing. This is the upgrade from versions previous to v1.90.0.
// Read part names from directories under srcDir // Read part names from directories under srcDir
des, err := os.ReadDir(srcDir) des, err := os.ReadDir(srcDir)
if err != nil { if err != nil {

View file

@ -145,20 +145,20 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
return fmt.Errorf("cannot parse path to part: %w", err) return fmt.Errorf("cannot parse path to part: %w", err)
} }
timestampsPath := path + "/timestamps.bin" timestampsPath := filepath.Join(path, timestampsFilename)
timestampsFile, err := filestream.Open(timestampsPath, true) timestampsFile, err := filestream.Open(timestampsPath, true)
if err != nil { if err != nil {
return fmt.Errorf("cannot open timestamps file in stream mode: %w", err) return fmt.Errorf("cannot open timestamps file in stream mode: %w", err)
} }
valuesPath := path + "/values.bin" valuesPath := filepath.Join(path, valuesFilename)
valuesFile, err := filestream.Open(valuesPath, true) valuesFile, err := filestream.Open(valuesPath, true)
if err != nil { if err != nil {
timestampsFile.MustClose() timestampsFile.MustClose()
return fmt.Errorf("cannot open values file in stream mode: %w", err) return fmt.Errorf("cannot open values file in stream mode: %w", err)
} }
indexPath := path + "/index.bin" indexPath := filepath.Join(path, indexFilename)
indexFile, err := filestream.Open(indexPath, true) indexFile, err := filestream.Open(indexPath, true)
if err != nil { if err != nil {
timestampsFile.MustClose() timestampsFile.MustClose()
@ -166,7 +166,7 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
return fmt.Errorf("cannot open index file in stream mode: %w", err) return fmt.Errorf("cannot open index file in stream mode: %w", err)
} }
metaindexPath := path + "/metaindex.bin" metaindexPath := filepath.Join(path, metaindexFilename)
metaindexFile, err := filestream.Open(metaindexPath, true) metaindexFile, err := filestream.Open(metaindexPath, true)
if err != nil { if err != nil {
timestampsFile.MustClose() timestampsFile.MustClose()

View file

@ -104,14 +104,14 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
} }
// Create part files in the directory. // Create part files in the directory.
timestampsPath := path + "/timestamps.bin" timestampsPath := filepath.Join(path, timestampsFilename)
timestampsFile, err := filestream.Create(timestampsPath, nocache) timestampsFile, err := filestream.Create(timestampsPath, nocache)
if err != nil { if err != nil {
fs.MustRemoveDirAtomic(path) fs.MustRemoveDirAtomic(path)
return fmt.Errorf("cannot create timestamps file: %w", err) return fmt.Errorf("cannot create timestamps file: %w", err)
} }
valuesPath := path + "/values.bin" valuesPath := filepath.Join(path, valuesFilename)
valuesFile, err := filestream.Create(valuesPath, nocache) valuesFile, err := filestream.Create(valuesPath, nocache)
if err != nil { if err != nil {
timestampsFile.MustClose() timestampsFile.MustClose()
@ -119,7 +119,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
return fmt.Errorf("cannot create values file: %w", err) return fmt.Errorf("cannot create values file: %w", err)
} }
indexPath := path + "/index.bin" indexPath := filepath.Join(path, indexFilename)
indexFile, err := filestream.Create(indexPath, nocache) indexFile, err := filestream.Create(indexPath, nocache)
if err != nil { if err != nil {
timestampsFile.MustClose() timestampsFile.MustClose()
@ -130,7 +130,7 @@ func (bsw *blockStreamWriter) InitFromFilePart(path string, nocache bool, compre
// Always cache metaindex file in OS page cache, since it is immediately // Always cache metaindex file in OS page cache, since it is immediately
// read after the merge. // read after the merge.
metaindexPath := path + "/metaindex.bin" metaindexPath := filepath.Join(path, metaindexFilename)
metaindexFile, err := filestream.Create(metaindexPath, false) metaindexFile, err := filestream.Create(metaindexPath, false)
if err != nil { if err != nil {
timestampsFile.MustClose() timestampsFile.MustClose()

24
lib/storage/filenames.go Normal file
View file

@ -0,0 +1,24 @@
package storage
const (
metaindexFilename = "metaindex.bin"
indexFilename = "index.bin"
valuesFilename = "values.bin"
timestampsFilename = "timestamps.bin"
partsFilename = "parts.json"
metadataFilename = "metadata.json"
appliedRetentionFilename = "appliedRetention.txt"
resetCacheOnStartupFilename = "reset_cache_on_startup"
)
const (
smallDirname = "small"
bigDirname = "big"
indexdbDirname = "indexdb"
dataDirname = "data"
metadataDirname = "metadata"
snapshotsDirname = "snapshots"
cacheDirname = "cache"
)

View file

@ -40,19 +40,19 @@ func (mp *inmemoryPart) StoreToDisk(path string) error {
if err := fs.MkdirAllIfNotExist(path); err != nil { if err := fs.MkdirAllIfNotExist(path); err != nil {
return fmt.Errorf("cannot create directory %q: %w", path, err) return fmt.Errorf("cannot create directory %q: %w", path, err)
} }
timestampsPath := path + "/timestamps.bin" timestampsPath := filepath.Join(path, timestampsFilename)
if err := fs.WriteFileAndSync(timestampsPath, mp.timestampsData.B); err != nil { if err := fs.WriteFileAndSync(timestampsPath, mp.timestampsData.B); err != nil {
return fmt.Errorf("cannot store timestamps: %w", err) return fmt.Errorf("cannot store timestamps: %w", err)
} }
valuesPath := path + "/values.bin" valuesPath := filepath.Join(path, valuesFilename)
if err := fs.WriteFileAndSync(valuesPath, mp.valuesData.B); err != nil { if err := fs.WriteFileAndSync(valuesPath, mp.valuesData.B); err != nil {
return fmt.Errorf("cannot store values: %w", err) return fmt.Errorf("cannot store values: %w", err)
} }
indexPath := path + "/index.bin" indexPath := filepath.Join(path, indexFilename)
if err := fs.WriteFileAndSync(indexPath, mp.indexData.B); err != nil { if err := fs.WriteFileAndSync(indexPath, mp.indexData.B); err != nil {
return fmt.Errorf("cannot store index: %w", err) return fmt.Errorf("cannot store index: %w", err)
} }
metaindexPath := path + "/metaindex.bin" metaindexPath := filepath.Join(path, metaindexFilename)
if err := fs.WriteFileAndSync(metaindexPath, mp.metaindexData.B); err != nil { if err := fs.WriteFileAndSync(metaindexPath, mp.metaindexData.B); err != nil {
return fmt.Errorf("cannot store metaindex: %w", err) return fmt.Errorf("cannot store metaindex: %w", err)
} }

View file

@ -54,19 +54,19 @@ func openFilePart(path string) (*part, error) {
return nil, fmt.Errorf("cannot parse path to part: %w", err) return nil, fmt.Errorf("cannot parse path to part: %w", err)
} }
timestampsPath := path + "/timestamps.bin" timestampsPath := filepath.Join(path, timestampsFilename)
timestampsFile := fs.MustOpenReaderAt(timestampsPath) timestampsFile := fs.MustOpenReaderAt(timestampsPath)
timestampsSize := fs.MustFileSize(timestampsPath) timestampsSize := fs.MustFileSize(timestampsPath)
valuesPath := path + "/values.bin" valuesPath := filepath.Join(path, valuesFilename)
valuesFile := fs.MustOpenReaderAt(valuesPath) valuesFile := fs.MustOpenReaderAt(valuesPath)
valuesSize := fs.MustFileSize(valuesPath) valuesSize := fs.MustFileSize(valuesPath)
indexPath := path + "/index.bin" indexPath := filepath.Join(path, indexFilename)
indexFile := fs.MustOpenReaderAt(indexPath) indexFile := fs.MustOpenReaderAt(indexPath)
indexSize := fs.MustFileSize(indexPath) indexSize := fs.MustFileSize(indexPath)
metaindexPath := path + "/metaindex.bin" metaindexPath := filepath.Join(path, metaindexFilename)
metaindexFile, err := filestream.Open(metaindexPath, true) metaindexFile, err := filestream.Open(metaindexPath, true)
if err != nil { if err != nil {
timestampsFile.MustClose() timestampsFile.MustClose()

View file

@ -48,7 +48,7 @@ func (ph *partHeader) Reset() {
} }
func (ph *partHeader) readMinDedupInterval(partPath string) error { func (ph *partHeader) readMinDedupInterval(partPath string) error {
filePath := partPath + "/min_dedup_interval" filePath := filepath.Join(partPath, "min_dedup_interval")
data, err := os.ReadFile(filePath) data, err := os.ReadFile(filePath)
if err != nil { if err != nil {
if errors.Is(err, os.ErrNotExist) { if errors.Is(err, os.ErrNotExist) {
@ -134,7 +134,7 @@ func (ph *partHeader) ParseFromPath(path string) error {
func (ph *partHeader) ReadMetadata(partPath string) error { func (ph *partHeader) ReadMetadata(partPath string) error {
ph.Reset() ph.Reset()
metadataPath := partPath + "/metadata.json" metadataPath := filepath.Join(partPath, metadataFilename)
metadata, err := os.ReadFile(metadataPath) metadata, err := os.ReadFile(metadataPath)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
@ -170,7 +170,7 @@ func (ph *partHeader) WriteMetadata(partPath string) error {
if err != nil { if err != nil {
logger.Panicf("BUG: cannot marshal partHeader metadata: %s", err) logger.Panicf("BUG: cannot marshal partHeader metadata: %s", err)
} }
metadataPath := partPath + "/metadata.json" metadataPath := filepath.Join(partPath, metadataFilename)
if err := fs.WriteFileAtomically(metadataPath, metadata, false); err != nil { if err := fs.WriteFileAtomically(metadataPath, metadata, false); err != nil {
return fmt.Errorf("cannot create %q: %w", metadataPath, err) return fmt.Errorf("cannot create %q: %w", metadataPath, err)
} }

View file

@ -216,8 +216,8 @@ func (pw *partWrapper) decRef() {
// to small and big partitions. // to small and big partitions.
func createPartition(timestamp int64, smallPartitionsPath, bigPartitionsPath string, s *Storage) (*partition, error) { func createPartition(timestamp int64, smallPartitionsPath, bigPartitionsPath string, s *Storage) (*partition, error) {
name := timestampToPartitionName(timestamp) name := timestampToPartitionName(timestamp)
smallPartsPath := filepath.Clean(smallPartitionsPath) + "/" + name smallPartsPath := filepath.Join(filepath.Clean(smallPartitionsPath), name)
bigPartsPath := filepath.Clean(bigPartitionsPath) + "/" + name bigPartsPath := filepath.Join(filepath.Clean(bigPartitionsPath), name)
logger.Infof("creating a partition %q with smallPartsPath=%q, bigPartsPath=%q", name, smallPartsPath, bigPartsPath) logger.Infof("creating a partition %q with smallPartsPath=%q, bigPartsPath=%q", name, smallPartsPath, bigPartsPath)
if err := fs.MkdirAllFailIfExist(smallPartsPath); err != nil { if err := fs.MkdirAllFailIfExist(smallPartsPath); err != nil {
@ -1422,7 +1422,7 @@ func (pt *partition) getDstPartPath(dstPartType partType, mergeIdx uint64) strin
} }
dstPartPath := "" dstPartPath := ""
if dstPartType != partInmemory { if dstPartType != partInmemory {
dstPartPath = fmt.Sprintf("%s/%016X", ptPath, mergeIdx) dstPartPath = filepath.Join(ptPath, fmt.Sprintf("%016X", mergeIdx))
} }
return dstPartPath return dstPartPath
} }
@ -1821,8 +1821,8 @@ func openParts(path string, partNames []string) ([]*partWrapper, error) {
// Remove txn and tmp directories, which may be left after the upgrade // Remove txn and tmp directories, which may be left after the upgrade
// to v1.90.0 and newer versions. // to v1.90.0 and newer versions.
fs.MustRemoveAll(path + "/txn") fs.MustRemoveAll(filepath.Join(path, "txn"))
fs.MustRemoveAll(path + "/tmp") fs.MustRemoveAll(filepath.Join(path, "tmp"))
// Remove dirs missing in partNames. These dirs may be left after unclean shutdown // Remove dirs missing in partNames. These dirs may be left after unclean shutdown
// or after the update from versions prior to v1.90.0. // or after the update from versions prior to v1.90.0.
@ -1841,7 +1841,7 @@ func openParts(path string, partNames []string) ([]*partWrapper, error) {
} }
fn := de.Name() fn := de.Name()
if _, ok := m[fn]; !ok { if _, ok := m[fn]; !ok {
deletePath := path + "/" + fn deletePath := filepath.Join(path, fn)
fs.MustRemoveAll(deletePath) fs.MustRemoveAll(deletePath)
} }
} }
@ -1850,7 +1850,7 @@ func openParts(path string, partNames []string) ([]*partWrapper, error) {
// Open parts // Open parts
var pws []*partWrapper var pws []*partWrapper
for _, partName := range partNames { for _, partName := range partNames {
partPath := path + "/" + partName partPath := filepath.Join(path, partName)
p, err := openFilePart(partPath) p, err := openFilePart(partPath)
if err != nil { if err != nil {
mustCloseParts(pws) mustCloseParts(pws)
@ -1926,19 +1926,19 @@ func (pt *partition) createSnapshot(srcDir, dstDir string, pws []*partWrapper) e
// Make hardlinks for pws at dstDir // Make hardlinks for pws at dstDir
for _, pw := range pws { for _, pw := range pws {
srcPartPath := pw.p.path srcPartPath := pw.p.path
dstPartPath := dstDir + "/" + filepath.Base(srcPartPath) dstPartPath := filepath.Join(dstDir, filepath.Base(srcPartPath))
if err := fs.HardLinkFiles(srcPartPath, dstPartPath); err != nil { if err := fs.HardLinkFiles(srcPartPath, dstPartPath); err != nil {
return fmt.Errorf("cannot create hard links from %q to %q: %w", srcPartPath, dstPartPath, err) return fmt.Errorf("cannot create hard links from %q to %q: %w", srcPartPath, dstPartPath, err)
} }
} }
// Copy the appliedRetention.txt file to dstDir. // Copy the appliedRetentionFilename to dstDir.
// This file can be created by VictoriaMetrics enterprise. // This file can be created by VictoriaMetrics enterprise.
// See https://docs.victoriametrics.com/#retention-filters . // See https://docs.victoriametrics.com/#retention-filters .
// Do not make hard link to this file, since it can be modified over time. // Do not make hard link to this file, since it can be modified over time.
srcPath := srcDir + "/appliedRetention.txt" srcPath := filepath.Join(srcDir, appliedRetentionFilename)
if fs.IsPathExist(srcPath) { if fs.IsPathExist(srcPath) {
dstPath := dstDir + "/" + filepath.Base(srcPath) dstPath := filepath.Join(dstDir, filepath.Base(srcPath))
if err := fs.CopyFile(srcPath, dstPath); err != nil { if err := fs.CopyFile(srcPath, dstPath); err != nil {
return fmt.Errorf("cannot copy %q to %q: %w", srcPath, dstPath, err) return fmt.Errorf("cannot copy %q to %q: %w", srcPath, dstPath, err)
} }
@ -1967,7 +1967,7 @@ func mustWritePartNames(pwsSmall, pwsBig []*partWrapper, dstDir string) {
if err != nil { if err != nil {
logger.Panicf("BUG: cannot marshal partNames to JSON: %s", err) logger.Panicf("BUG: cannot marshal partNames to JSON: %s", err)
} }
partNamesPath := dstDir + "/parts.json" partNamesPath := filepath.Join(dstDir, partsFilename)
if err := fs.WriteFileAtomically(partNamesPath, data, true); err != nil { if err := fs.WriteFileAtomically(partNamesPath, data, true); err != nil {
logger.Panicf("FATAL: cannot update %s: %s", partNamesPath, err) logger.Panicf("FATAL: cannot update %s: %s", partNamesPath, err)
} }
@ -1988,7 +1988,7 @@ func getPartNames(pws []*partWrapper) []string {
} }
func mustReadPartNames(smallPartsPath, bigPartsPath string) ([]string, []string) { func mustReadPartNames(smallPartsPath, bigPartsPath string) ([]string, []string) {
partNamesPath := smallPartsPath + "/parts.json" partNamesPath := filepath.Join(smallPartsPath, partsFilename)
data, err := os.ReadFile(partNamesPath) data, err := os.ReadFile(partNamesPath)
if err == nil { if err == nil {
var partNames partNamesJSON var partNames partNamesJSON
@ -1998,9 +1998,9 @@ func mustReadPartNames(smallPartsPath, bigPartsPath string) ([]string, []string)
return partNames.Small, partNames.Big return partNames.Small, partNames.Big
} }
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
logger.Panicf("FATAL: cannot read parts.json file: %s", err) logger.Panicf("FATAL: cannot read %s file: %s", partsFilename, err)
} }
// The parts.json is missing. This is the upgrade from versions previous to v1.90.0. // The partsFilename is missing. This is the upgrade from versions previous to v1.90.0.
// Read part names from smallPartsPath and bigPartsPath directories // Read part names from smallPartsPath and bigPartsPath directories
partNamesSmall := mustReadPartNamesFromDir(smallPartsPath) partNamesSmall := mustReadPartNamesFromDir(smallPartsPath)
partNamesBig := mustReadPartNamesFromDir(bigPartsPath) partNamesBig := mustReadPartNamesFromDir(bigPartsPath)
@ -2032,5 +2032,5 @@ func mustReadPartNamesFromDir(srcDir string) []string {
} }
func isSpecialDir(name string) bool { func isSpecialDir(name string) bool {
return name == "tmp" || name == "txn" || name == "snapshots" || fs.IsScheduledForRemoval(name) return name == "tmp" || name == "txn" || name == snapshotsDirname || fs.IsScheduledForRemoval(name)
} }

View file

@ -169,17 +169,17 @@ func testPartitionSearchEx(t *testing.T, ptt int64, tr TimeRange, partsCount, ma
// Create partition from rowss and test search on it. // Create partition from rowss and test search on it.
strg := newTestStorage() strg := newTestStorage()
strg.retentionMsecs = timestampFromTime(time.Now()) - ptr.MinTimestamp + 3600*1000 strg.retentionMsecs = timestampFromTime(time.Now()) - ptr.MinTimestamp + 3600*1000
pt, err := createPartition(ptt, "./small-table", "./big-table", strg) pt, err := createPartition(ptt, "small-table", "big-table", strg)
if err != nil { if err != nil {
t.Fatalf("cannot create partition: %s", err) t.Fatalf("cannot create partition: %s", err)
} }
smallPartsPath := pt.smallPartsPath smallPartsPath := pt.smallPartsPath
bigPartsPath := pt.bigPartsPath bigPartsPath := pt.bigPartsPath
defer func() { defer func() {
if err := os.RemoveAll("./small-table"); err != nil { if err := os.RemoveAll("small-table"); err != nil {
t.Fatalf("cannot remove small parts directory: %s", err) t.Fatalf("cannot remove small parts directory: %s", err)
} }
if err := os.RemoveAll("./big-table"); err != nil { if err := os.RemoveAll("big-table"); err != nil {
t.Fatalf("cannot remove big parts directory: %s", err) t.Fatalf("cannot remove big parts directory: %s", err)
} }
}() }()

View file

@ -16,6 +16,7 @@ import (
"time" "time"
"unsafe" "unsafe"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/backup/backupnames"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bloomfilter" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bloomfilter"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal" "github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
@ -147,7 +148,7 @@ func OpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDailySer
} }
s := &Storage{ s := &Storage{
path: path, path: path,
cachePath: path + "/cache", cachePath: filepath.Join(path, cacheDirname),
retentionMsecs: retentionMsecs, retentionMsecs: retentionMsecs,
stop: make(chan struct{}), stop: make(chan struct{}),
} }
@ -156,10 +157,10 @@ func OpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDailySer
} }
// Check whether the cache directory must be removed // Check whether the cache directory must be removed
// It is removed if it contains reset_cache_on_startup file. // It is removed if it contains resetCacheOnStartupFilename.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1447 for details. // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1447 for details.
if fs.IsPathExist(s.cachePath + "/reset_cache_on_startup") { if fs.IsPathExist(filepath.Join(s.cachePath, resetCacheOnStartupFilename)) {
logger.Infof("removing cache directory at %q, since it contains `reset_cache_on_startup` file...", s.cachePath) logger.Infof("removing cache directory at %q, since it contains `%s` file...", s.cachePath, resetCacheOnStartupFilename)
// Do not use fs.MustRemoveAll() here, since the cache directory may be mounted // Do not use fs.MustRemoveAll() here, since the cache directory may be mounted
// to a separate filesystem. In this case the fs.MustRemoveAll() will fail while // to a separate filesystem. In this case the fs.MustRemoveAll() will fail while
// trying to remove the mount root. // trying to remove the mount root.
@ -176,13 +177,13 @@ func OpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDailySer
s.flockF = flockF s.flockF = flockF
// Check whether restore process finished successfully // Check whether restore process finished successfully
restoreLockF := path + "/restore-in-progress" restoreLockF := filepath.Join(path, backupnames.RestoreInProgressFilename)
if fs.IsPathExist(restoreLockF) { if fs.IsPathExist(restoreLockF) {
return nil, fmt.Errorf("restore lock file exists, incomplete vmrestore run. Run vmrestore again or remove lock file %q", restoreLockF) return nil, fmt.Errorf("restore lock file exists, incomplete vmrestore run. Run vmrestore again or remove lock file %q", restoreLockF)
} }
// Pre-create snapshots directory if it is missing. // Pre-create snapshots directory if it is missing.
snapshotsPath := path + "/snapshots" snapshotsPath := filepath.Join(path, snapshotsDirname)
if err := fs.MkdirAllIfNotExist(snapshotsPath); err != nil { if err := fs.MkdirAllIfNotExist(snapshotsPath); err != nil {
return nil, fmt.Errorf("cannot create %q: %w", snapshotsPath, err) return nil, fmt.Errorf("cannot create %q: %w", snapshotsPath, err)
} }
@ -218,16 +219,16 @@ func OpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDailySer
s.prefetchedMetricIDs.Store(&uint64set.Set{}) s.prefetchedMetricIDs.Store(&uint64set.Set{})
// Load metadata // Load metadata
metadataDir := path + "/metadata" metadataDir := filepath.Join(path, metadataDirname)
isEmptyDB := !fs.IsPathExist(path + "/indexdb") isEmptyDB := !fs.IsPathExist(filepath.Join(path, indexdbDirname))
if err := fs.MkdirAllIfNotExist(metadataDir); err != nil { if err := fs.MkdirAllIfNotExist(metadataDir); err != nil {
return nil, fmt.Errorf("cannot create %q: %w", metadataDir, err) return nil, fmt.Errorf("cannot create %q: %w", metadataDir, err)
} }
s.minTimestampForCompositeIndex = mustGetMinTimestampForCompositeIndex(metadataDir, isEmptyDB) s.minTimestampForCompositeIndex = mustGetMinTimestampForCompositeIndex(metadataDir, isEmptyDB)
// Load indexdb // Load indexdb
idbPath := path + "/indexdb" idbPath := filepath.Join(path, indexdbDirname)
idbSnapshotsPath := idbPath + "/snapshots" idbSnapshotsPath := filepath.Join(idbPath, snapshotsDirname)
if err := fs.MkdirAllIfNotExist(idbSnapshotsPath); err != nil { if err := fs.MkdirAllIfNotExist(idbSnapshotsPath); err != nil {
return nil, fmt.Errorf("cannot create %q: %w", idbSnapshotsPath, err) return nil, fmt.Errorf("cannot create %q: %w", idbSnapshotsPath, err)
} }
@ -252,7 +253,7 @@ func OpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDailySer
s.updateDeletedMetricIDs(dmisPrev) s.updateDeletedMetricIDs(dmisPrev)
// Load data // Load data
tablePath := path + "/data" tablePath := filepath.Join(path, dataDirname)
tb, err := openTable(tablePath, s) tb, err := openTable(tablePath, s)
if err != nil { if err != nil {
s.idb().MustClose() s.idb().MustClose()
@ -322,7 +323,7 @@ func (s *Storage) CreateSnapshot(deadline uint64) (string, error) {
snapshotName := snapshot.NewName() snapshotName := snapshot.NewName()
srcDir := s.path srcDir := s.path
dstDir := fmt.Sprintf("%s/snapshots/%s", srcDir, snapshotName) dstDir := filepath.Join(srcDir, snapshotsDirname, snapshotName)
if err := fs.MkdirAllFailIfExist(dstDir); err != nil { if err := fs.MkdirAllFailIfExist(dstDir); err != nil {
return "", fmt.Errorf("cannot create dir %q: %w", dstDir, err) return "", fmt.Errorf("cannot create dir %q: %w", dstDir, err)
} }
@ -334,42 +335,42 @@ func (s *Storage) CreateSnapshot(deadline uint64) (string, error) {
} }
dirsToRemoveOnError = append(dirsToRemoveOnError, smallDir, bigDir) dirsToRemoveOnError = append(dirsToRemoveOnError, smallDir, bigDir)
dstDataDir := dstDir + "/data" dstDataDir := filepath.Join(dstDir, dataDirname)
if err := fs.MkdirAllFailIfExist(dstDataDir); err != nil { if err := fs.MkdirAllFailIfExist(dstDataDir); err != nil {
return "", fmt.Errorf("cannot create dir %q: %w", dstDataDir, err) return "", fmt.Errorf("cannot create dir %q: %w", dstDataDir, err)
} }
dstSmallDir := dstDataDir + "/small" dstSmallDir := filepath.Join(dstDataDir, smallDirname)
if err := fs.SymlinkRelative(smallDir, dstSmallDir); err != nil { if err := fs.SymlinkRelative(smallDir, dstSmallDir); err != nil {
return "", fmt.Errorf("cannot create symlink from %q to %q: %w", smallDir, dstSmallDir, err) return "", fmt.Errorf("cannot create symlink from %q to %q: %w", smallDir, dstSmallDir, err)
} }
dstBigDir := dstDataDir + "/big" dstBigDir := filepath.Join(dstDataDir, bigDirname)
if err := fs.SymlinkRelative(bigDir, dstBigDir); err != nil { if err := fs.SymlinkRelative(bigDir, dstBigDir); err != nil {
return "", fmt.Errorf("cannot create symlink from %q to %q: %w", bigDir, dstBigDir, err) return "", fmt.Errorf("cannot create symlink from %q to %q: %w", bigDir, dstBigDir, err)
} }
fs.MustSyncPath(dstDataDir) fs.MustSyncPath(dstDataDir)
srcMetadataDir := srcDir + "/metadata" srcMetadataDir := filepath.Join(srcDir, metadataDirname)
dstMetadataDir := dstDir + "/metadata" dstMetadataDir := filepath.Join(dstDir, metadataDirname)
if err := fs.CopyDirectory(srcMetadataDir, dstMetadataDir); err != nil { if err := fs.CopyDirectory(srcMetadataDir, dstMetadataDir); err != nil {
return "", fmt.Errorf("cannot copy metadata: %w", err) return "", fmt.Errorf("cannot copy metadata: %w", err)
} }
idbSnapshot := fmt.Sprintf("%s/indexdb/snapshots/%s", srcDir, snapshotName) idbSnapshot := filepath.Join(srcDir, indexdbDirname, snapshotsDirname, snapshotName)
idb := s.idb() idb := s.idb()
currSnapshot := idbSnapshot + "/" + idb.name currSnapshot := filepath.Join(idbSnapshot, idb.name)
if err := idb.tb.CreateSnapshotAt(currSnapshot, deadline); err != nil { if err := idb.tb.CreateSnapshotAt(currSnapshot, deadline); err != nil {
return "", fmt.Errorf("cannot create curr indexDB snapshot: %w", err) return "", fmt.Errorf("cannot create curr indexDB snapshot: %w", err)
} }
dirsToRemoveOnError = append(dirsToRemoveOnError, idbSnapshot) dirsToRemoveOnError = append(dirsToRemoveOnError, idbSnapshot)
ok := idb.doExtDB(func(extDB *indexDB) { ok := idb.doExtDB(func(extDB *indexDB) {
prevSnapshot := idbSnapshot + "/" + extDB.name prevSnapshot := filepath.Join(idbSnapshot, extDB.name)
err = extDB.tb.CreateSnapshotAt(prevSnapshot, deadline) err = extDB.tb.CreateSnapshotAt(prevSnapshot, deadline)
}) })
if ok && err != nil { if ok && err != nil {
return "", fmt.Errorf("cannot create prev indexDB snapshot: %w", err) return "", fmt.Errorf("cannot create prev indexDB snapshot: %w", err)
} }
dstIdbDir := dstDir + "/indexdb" dstIdbDir := filepath.Join(dstDir, indexdbDirname)
if err := fs.SymlinkRelative(idbSnapshot, dstIdbDir); err != nil { if err := fs.SymlinkRelative(idbSnapshot, dstIdbDir); err != nil {
return "", fmt.Errorf("cannot create symlink from %q to %q: %w", idbSnapshot, dstIdbDir, err) return "", fmt.Errorf("cannot create symlink from %q to %q: %w", idbSnapshot, dstIdbDir, err)
} }
@ -383,7 +384,7 @@ func (s *Storage) CreateSnapshot(deadline uint64) (string, error) {
// ListSnapshots returns sorted list of existing snapshots for s. // ListSnapshots returns sorted list of existing snapshots for s.
func (s *Storage) ListSnapshots() ([]string, error) { func (s *Storage) ListSnapshots() ([]string, error) {
snapshotsPath := s.path + "/snapshots" snapshotsPath := filepath.Join(s.path, snapshotsDirname)
d, err := os.Open(snapshotsPath) d, err := os.Open(snapshotsPath)
if err != nil { if err != nil {
return nil, fmt.Errorf("cannot open snapshots directory: %w", err) return nil, fmt.Errorf("cannot open snapshots directory: %w", err)
@ -410,13 +411,13 @@ func (s *Storage) DeleteSnapshot(snapshotName string) error {
if err := snapshot.Validate(snapshotName); err != nil { if err := snapshot.Validate(snapshotName); err != nil {
return fmt.Errorf("invalid snapshotName %q: %w", snapshotName, err) return fmt.Errorf("invalid snapshotName %q: %w", snapshotName, err)
} }
snapshotPath := s.path + "/snapshots/" + snapshotName snapshotPath := filepath.Join(s.path, snapshotsDirname, snapshotName)
logger.Infof("deleting snapshot %q...", snapshotPath) logger.Infof("deleting snapshot %q...", snapshotPath)
startTime := time.Now() startTime := time.Now()
s.tb.MustDeleteSnapshot(snapshotName) s.tb.MustDeleteSnapshot(snapshotName)
idbPath := fmt.Sprintf("%s/indexdb/snapshots/%s", s.path, snapshotName) idbPath := filepath.Join(s.path, indexdbDirname, snapshotsDirname, snapshotName)
fs.MustRemoveDirAtomic(idbPath) fs.MustRemoveDirAtomic(idbPath)
fs.MustRemoveDirAtomic(snapshotPath) fs.MustRemoveDirAtomic(snapshotPath)
@ -723,7 +724,7 @@ func (s *Storage) nextDayMetricIDsUpdater() {
func (s *Storage) mustRotateIndexDB() { func (s *Storage) mustRotateIndexDB() {
// Create new indexdb table. // Create new indexdb table.
newTableName := nextIndexDBTableName() newTableName := nextIndexDBTableName()
idbNewPath := s.path + "/indexdb/" + newTableName idbNewPath := filepath.Join(s.path, indexdbDirname, newTableName)
rotationTimestamp := fasttime.UnixTimestamp() rotationTimestamp := fasttime.UnixTimestamp()
idbNew, err := openIndexDB(idbNewPath, s, rotationTimestamp, &s.isReadOnly) idbNew, err := openIndexDB(idbNewPath, s, rotationTimestamp, &s.isReadOnly)
if err != nil { if err != nil {
@ -835,7 +836,7 @@ func (s *Storage) mustLoadNextDayMetricIDs(date uint64) *byDateMetricIDEntry {
date: date, date: date,
} }
name := "next_day_metric_ids" name := "next_day_metric_ids"
path := s.cachePath + "/" + name path := filepath.Join(s.cachePath, name)
logger.Infof("loading %s from %q...", name, path) logger.Infof("loading %s from %q...", name, path)
startTime := time.Now() startTime := time.Now()
if !fs.IsPathExist(path) { if !fs.IsPathExist(path) {
@ -879,7 +880,7 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
hm := &hourMetricIDs{ hm := &hourMetricIDs{
hour: hour, hour: hour,
} }
path := s.cachePath + "/" + name path := filepath.Join(s.cachePath, name)
logger.Infof("loading %s from %q...", name, path) logger.Infof("loading %s from %q...", name, path)
startTime := time.Now() startTime := time.Now()
if !fs.IsPathExist(path) { if !fs.IsPathExist(path) {
@ -921,7 +922,7 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
func (s *Storage) mustSaveNextDayMetricIDs(e *byDateMetricIDEntry) { func (s *Storage) mustSaveNextDayMetricIDs(e *byDateMetricIDEntry) {
name := "next_day_metric_ids" name := "next_day_metric_ids"
path := s.cachePath + "/" + name path := filepath.Join(s.cachePath, name)
logger.Infof("saving %s to %q...", name, path) logger.Infof("saving %s to %q...", name, path)
startTime := time.Now() startTime := time.Now()
dst := make([]byte, 0, e.v.Len()*8+16) dst := make([]byte, 0, e.v.Len()*8+16)
@ -939,7 +940,7 @@ func (s *Storage) mustSaveNextDayMetricIDs(e *byDateMetricIDEntry) {
} }
func (s *Storage) mustSaveHourMetricIDs(hm *hourMetricIDs, name string) { func (s *Storage) mustSaveHourMetricIDs(hm *hourMetricIDs, name string) {
path := s.cachePath + "/" + name path := filepath.Join(s.cachePath, name)
logger.Infof("saving %s to %q...", name, path) logger.Infof("saving %s to %q...", name, path)
startTime := time.Now() startTime := time.Now()
dst := make([]byte, 0, hm.m.Len()*8+24) dst := make([]byte, 0, hm.m.Len()*8+24)
@ -983,7 +984,7 @@ func marshalUint64Set(dst []byte, m *uint64set.Set) []byte {
} }
func mustGetMinTimestampForCompositeIndex(metadataDir string, isEmptyDB bool) int64 { func mustGetMinTimestampForCompositeIndex(metadataDir string, isEmptyDB bool) int64 {
path := metadataDir + "/minTimestampForCompositeIndex" path := filepath.Join(metadataDir, "minTimestampForCompositeIndex")
minTimestamp, err := loadMinTimestampForCompositeIndex(path) minTimestamp, err := loadMinTimestampForCompositeIndex(path)
if err == nil { if err == nil {
return minTimestamp return minTimestamp
@ -1019,7 +1020,7 @@ func loadMinTimestampForCompositeIndex(path string) (int64, error) {
} }
func (s *Storage) mustLoadCache(info, name string, sizeBytes int) *workingsetcache.Cache { func (s *Storage) mustLoadCache(info, name string, sizeBytes int) *workingsetcache.Cache {
path := s.cachePath + "/" + name path := filepath.Join(s.cachePath, name)
logger.Infof("loading %s cache from %q...", info, path) logger.Infof("loading %s cache from %q...", info, path)
startTime := time.Now() startTime := time.Now()
c := workingsetcache.Load(path, sizeBytes) c := workingsetcache.Load(path, sizeBytes)
@ -1034,7 +1035,7 @@ func (s *Storage) mustSaveCache(c *workingsetcache.Cache, info, name string) {
saveCacheLock.Lock() saveCacheLock.Lock()
defer saveCacheLock.Unlock() defer saveCacheLock.Unlock()
path := s.cachePath + "/" + name path := filepath.Join(s.cachePath, name)
logger.Infof("saving %s cache to %q...", info, path) logger.Infof("saving %s cache to %q...", info, path)
startTime := time.Now() startTime := time.Now()
if err := c.Save(path); err != nil { if err := c.Save(path); err != nil {
@ -2375,7 +2376,7 @@ func (s *Storage) openIndexDBTables(path string) (curr, prev *indexDB, err error
// Remove all the tables except two last tables. // Remove all the tables except two last tables.
for _, tn := range tableNames[:len(tableNames)-2] { for _, tn := range tableNames[:len(tableNames)-2] {
pathToRemove := path + "/" + tn pathToRemove := filepath.Join(path, tn)
logger.Infof("removing obsolete indexdb dir %q...", pathToRemove) logger.Infof("removing obsolete indexdb dir %q...", pathToRemove)
fs.MustRemoveAll(pathToRemove) fs.MustRemoveAll(pathToRemove)
logger.Infof("removed obsolete indexdb dir %q", pathToRemove) logger.Infof("removed obsolete indexdb dir %q", pathToRemove)
@ -2385,13 +2386,13 @@ func (s *Storage) openIndexDBTables(path string) (curr, prev *indexDB, err error
fs.MustSyncPath(path) fs.MustSyncPath(path)
// Open the last two tables. // Open the last two tables.
currPath := path + "/" + tableNames[len(tableNames)-1] currPath := filepath.Join(path, tableNames[len(tableNames)-1])
curr, err = openIndexDB(currPath, s, 0, &s.isReadOnly) curr, err = openIndexDB(currPath, s, 0, &s.isReadOnly)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("cannot open curr indexdb table at %q: %w", currPath, err) return nil, nil, fmt.Errorf("cannot open curr indexdb table at %q: %w", currPath, err)
} }
prevPath := path + "/" + tableNames[len(tableNames)-2] prevPath := filepath.Join(path, tableNames[len(tableNames)-2])
prev, err = openIndexDB(prevPath, s, 0, &s.isReadOnly) prev, err = openIndexDB(prevPath, s, 0, &s.isReadOnly)
if err != nil { if err != nil {
curr.MustClose() curr.MustClose()

View file

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"math/rand" "math/rand"
"os" "os"
"path/filepath"
"reflect" "reflect"
"sort" "sort"
"strings" "strings"
@ -1036,7 +1037,7 @@ func testStorageAddRows(rng *rand.Rand, s *Storage) error {
} }
// Try opening the storage from snapshot. // Try opening the storage from snapshot.
snapshotPath := s.path + "/snapshots/" + snapshotName snapshotPath := filepath.Join(s.path, snapshotsDirname, snapshotName)
s1, err := OpenStorage(snapshotPath, 0, 0, 0) s1, err := OpenStorage(snapshotPath, 0, 0, 0)
if err != nil { if err != nil {
return fmt.Errorf("cannot open storage from snapshot: %w", err) return fmt.Errorf("cannot open storage from snapshot: %w", err)

View file

@ -95,23 +95,23 @@ func openTable(path string, s *Storage) (*table, error) {
} }
// Create directories for small and big partitions if they don't exist yet. // Create directories for small and big partitions if they don't exist yet.
smallPartitionsPath := path + "/small" smallPartitionsPath := filepath.Join(path, smallDirname)
if err := fs.MkdirAllIfNotExist(smallPartitionsPath); err != nil { if err := fs.MkdirAllIfNotExist(smallPartitionsPath); err != nil {
return nil, fmt.Errorf("cannot create directory for small partitions %q: %w", smallPartitionsPath, err) return nil, fmt.Errorf("cannot create directory for small partitions %q: %w", smallPartitionsPath, err)
} }
fs.MustRemoveTemporaryDirs(smallPartitionsPath) fs.MustRemoveTemporaryDirs(smallPartitionsPath)
smallSnapshotsPath := smallPartitionsPath + "/snapshots" smallSnapshotsPath := filepath.Join(smallPartitionsPath, snapshotsDirname)
if err := fs.MkdirAllIfNotExist(smallSnapshotsPath); err != nil { if err := fs.MkdirAllIfNotExist(smallSnapshotsPath); err != nil {
return nil, fmt.Errorf("cannot create %q: %w", smallSnapshotsPath, err) return nil, fmt.Errorf("cannot create %q: %w", smallSnapshotsPath, err)
} }
fs.MustRemoveTemporaryDirs(smallSnapshotsPath) fs.MustRemoveTemporaryDirs(smallSnapshotsPath)
bigPartitionsPath := path + "/big" bigPartitionsPath := filepath.Join(path, bigDirname)
if err := fs.MkdirAllIfNotExist(bigPartitionsPath); err != nil { if err := fs.MkdirAllIfNotExist(bigPartitionsPath); err != nil {
return nil, fmt.Errorf("cannot create directory for big partitions %q: %w", bigPartitionsPath, err) return nil, fmt.Errorf("cannot create directory for big partitions %q: %w", bigPartitionsPath, err)
} }
fs.MustRemoveTemporaryDirs(bigPartitionsPath) fs.MustRemoveTemporaryDirs(bigPartitionsPath)
bigSnapshotsPath := bigPartitionsPath + "/snapshots" bigSnapshotsPath := filepath.Join(bigPartitionsPath, snapshotsDirname)
if err := fs.MkdirAllIfNotExist(bigSnapshotsPath); err != nil { if err := fs.MkdirAllIfNotExist(bigSnapshotsPath); err != nil {
return nil, fmt.Errorf("cannot create %q: %w", bigSnapshotsPath, err) return nil, fmt.Errorf("cannot create %q: %w", bigSnapshotsPath, err)
} }
@ -151,11 +151,11 @@ func (tb *table) CreateSnapshot(snapshotName string, deadline uint64) (string, s
ptws := tb.GetPartitions(nil) ptws := tb.GetPartitions(nil)
defer tb.PutPartitions(ptws) defer tb.PutPartitions(ptws)
dstSmallDir := fmt.Sprintf("%s/small/snapshots/%s", tb.path, snapshotName) dstSmallDir := filepath.Join(tb.path, smallDirname, snapshotsDirname, snapshotName)
if err := fs.MkdirAllFailIfExist(dstSmallDir); err != nil { if err := fs.MkdirAllFailIfExist(dstSmallDir); err != nil {
return "", "", fmt.Errorf("cannot create dir %q: %w", dstSmallDir, err) return "", "", fmt.Errorf("cannot create dir %q: %w", dstSmallDir, err)
} }
dstBigDir := fmt.Sprintf("%s/big/snapshots/%s", tb.path, snapshotName) dstBigDir := filepath.Join(tb.path, bigDirname, snapshotsDirname, snapshotName)
if err := fs.MkdirAllFailIfExist(dstBigDir); err != nil { if err := fs.MkdirAllFailIfExist(dstBigDir); err != nil {
fs.MustRemoveAll(dstSmallDir) fs.MustRemoveAll(dstSmallDir)
return "", "", fmt.Errorf("cannot create dir %q: %w", dstBigDir, err) return "", "", fmt.Errorf("cannot create dir %q: %w", dstBigDir, err)
@ -168,8 +168,8 @@ func (tb *table) CreateSnapshot(snapshotName string, deadline uint64) (string, s
return "", "", fmt.Errorf("cannot create snapshot for %q: timeout exceeded", tb.path) return "", "", fmt.Errorf("cannot create snapshot for %q: timeout exceeded", tb.path)
} }
smallPath := dstSmallDir + "/" + ptw.pt.name smallPath := filepath.Join(dstSmallDir, ptw.pt.name)
bigPath := dstBigDir + "/" + ptw.pt.name bigPath := filepath.Join(dstBigDir, ptw.pt.name)
if err := ptw.pt.CreateSnapshotAt(smallPath, bigPath); err != nil { if err := ptw.pt.CreateSnapshotAt(smallPath, bigPath); err != nil {
fs.MustRemoveAll(dstSmallDir) fs.MustRemoveAll(dstSmallDir)
fs.MustRemoveAll(dstBigDir) fs.MustRemoveAll(dstBigDir)
@ -188,9 +188,9 @@ func (tb *table) CreateSnapshot(snapshotName string, deadline uint64) (string, s
// MustDeleteSnapshot deletes snapshot with the given snapshotName. // MustDeleteSnapshot deletes snapshot with the given snapshotName.
func (tb *table) MustDeleteSnapshot(snapshotName string) { func (tb *table) MustDeleteSnapshot(snapshotName string) {
smallDir := fmt.Sprintf("%s/small/snapshots/%s", tb.path, snapshotName) smallDir := filepath.Join(tb.path, smallDirname, snapshotsDirname, snapshotName)
fs.MustRemoveDirAtomic(smallDir) fs.MustRemoveDirAtomic(smallDir)
bigDir := fmt.Sprintf("%s/big/snapshots/%s", tb.path, snapshotName) bigDir := filepath.Join(tb.path, bigDirname, snapshotsDirname, snapshotName)
fs.MustRemoveDirAtomic(bigDir) fs.MustRemoveDirAtomic(bigDir)
} }
@ -545,7 +545,7 @@ func populatePartitionNames(partitionsPath string, ptNames map[string]bool) erro
continue continue
} }
ptName := de.Name() ptName := de.Name()
if ptName == "snapshots" { if ptName == snapshotsDirname {
// Skip directory with snapshots // Skip directory with snapshots
continue continue
} }

View file

@ -184,12 +184,12 @@ func testTableSearchEx(t *testing.T, rng *rand.Rand, trData, trSearch TimeRange,
// Create a table from rowss and test search on it. // Create a table from rowss and test search on it.
strg := newTestStorage() strg := newTestStorage()
tb, err := openTable("./test-table", strg) tb, err := openTable("test-table", strg)
if err != nil { if err != nil {
t.Fatalf("cannot create table: %s", err) t.Fatalf("cannot create table: %s", err)
} }
defer func() { defer func() {
if err := os.RemoveAll("./test-table"); err != nil { if err := os.RemoveAll("test-table"); err != nil {
t.Fatalf("cannot remove table directory: %s", err) t.Fatalf("cannot remove table directory: %s", err)
} }
}() }()
@ -205,7 +205,7 @@ func testTableSearchEx(t *testing.T, rng *rand.Rand, trData, trSearch TimeRange,
tb.MustClose() tb.MustClose()
// Open the created table and test search on it. // Open the created table and test search on it.
tb, err = openTable("./test-table", strg) tb, err = openTable("test-table", strg)
if err != nil { if err != nil {
t.Fatalf("cannot open table: %s", err) t.Fatalf("cannot open table: %s", err)
} }

View file

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"math/rand" "math/rand"
"os" "os"
"path/filepath"
"sync" "sync"
"sync/atomic" "sync/atomic"
"testing" "testing"
@ -14,7 +15,7 @@ import (
func TestMain(m *testing.M) { func TestMain(m *testing.M) {
n := m.Run() n := m.Run()
if err := os.RemoveAll("./benchmarkTableSearch"); err != nil { if err := os.RemoveAll("benchmarkTableSearch"); err != nil {
panic(fmt.Errorf("cannot remove benchmark tables: %w", err)) panic(fmt.Errorf("cannot remove benchmark tables: %w", err))
} }
os.Exit(n) os.Exit(n)
@ -39,7 +40,7 @@ func BenchmarkTableSearch(b *testing.B) {
func openBenchTable(b *testing.B, startTimestamp int64, rowsPerInsert, rowsCount, tsidsCount int) *table { func openBenchTable(b *testing.B, startTimestamp int64, rowsPerInsert, rowsCount, tsidsCount int) *table {
b.Helper() b.Helper()
path := fmt.Sprintf("./benchmarkTableSearch/rows%d_tsids%d", rowsCount, tsidsCount) path := filepath.Join("benchmarkTableSearch", fmt.Sprintf("rows%d_tsids%d", rowsCount, tsidsCount))
if !createdBenchTables[path] { if !createdBenchTables[path] {
createBenchTable(b, path, startTimestamp, rowsPerInsert, rowsCount, tsidsCount) createBenchTable(b, path, startTimestamp, rowsPerInsert, rowsCount, tsidsCount)
createdBenchTables[path] = true createdBenchTables[path] = true

View file

@ -45,7 +45,7 @@ func benchmarkTableAddRows(b *testing.B, rowsPerInsert, tsidsCount int) {
b.ResetTimer() b.ResetTimer()
b.ReportAllocs() b.ReportAllocs()
b.SetBytes(int64(rowsCountExpected)) b.SetBytes(int64(rowsCountExpected))
tablePath := "./benchmarkTableAddRows" tablePath := "benchmarkTableAddRows"
strg := newTestStorage() strg := newTestStorage()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
tb, err := openTable(tablePath, strg) tb, err := openTable(tablePath, strg)