lib/fs: add MustReadDir() function

Use fs.MustReadDir() instead of os.ReadDir() across the code in order to reduce the code verbosity.
The fs.MustReadDir() logs the error with the directory name and the call stack on error
before exit. This information should be enough for debugging the cause of the error.
This commit is contained in:
Aliaksandr Valialkin 2023-04-14 22:08:43 -07:00
parent 60d92894c5
commit 3727251910
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
14 changed files with 71 additions and 210 deletions

View file

@ -4,7 +4,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"net/url" "net/url"
"os"
"path/filepath" "path/filepath"
"strconv" "strconv"
"sync" "sync"
@ -265,10 +264,7 @@ func newRemoteWriteCtxs(at *auth.Token, urls []string) []*remoteWriteCtx {
} }
queuesDir := filepath.Join(*tmpDataPath, persistentQueueDirname) queuesDir := filepath.Join(*tmpDataPath, persistentQueueDirname)
files, err := os.ReadDir(queuesDir) files := fs.MustReadDir(queuesDir)
if err != nil {
logger.Fatalf("cannot read queues dir %q: %s", queuesDir, err)
}
removed := 0 removed := 0
for _, f := range files { for _, f := range files {
dirname := f.Name() dirname := f.Name()

View file

@ -9,7 +9,6 @@ import (
"path/filepath" "path/filepath"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs" "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
) )
var ( var (
@ -76,18 +75,11 @@ func collectDashboardsSettings(path string) ([]byte, error) {
if !fs.IsPathExist(path) { if !fs.IsPathExist(path) {
return nil, fmt.Errorf("cannot find folder %q", path) return nil, fmt.Errorf("cannot find folder %q", path)
} }
files, err := os.ReadDir(path) files := fs.MustReadDir(path)
if err != nil {
return nil, fmt.Errorf("cannot read folder %q", path)
}
var dss []dashboardSettings var dss []dashboardSettings
for _, file := range files { for _, file := range files {
filename := file.Name() filename := file.Name()
if err != nil {
logger.Errorf("skipping %q at -vmui.customDashboardsPath=%q, since the info for this file cannot be obtained: %s", filename, path, err)
continue
}
if filepath.Ext(filename) != ".json" { if filepath.Ext(filename) != ".json" {
continue continue
} }

View file

@ -234,14 +234,20 @@ func MustRemoveDirAtomic(dir string) {
var atomicDirRemoveCounter = uint64(time.Now().UnixNano()) var atomicDirRemoveCounter = uint64(time.Now().UnixNano())
// MustReadDir reads directory entries at the given dir.
func MustReadDir(dir string) []os.DirEntry {
des, err := os.ReadDir(dir)
if err != nil {
logger.Panicf("FATAL: cannot read directory contents: %s", err)
}
return des
}
// MustRemoveTemporaryDirs removes all the subdirectories with ".must-remove.<XYZ>" suffix. // MustRemoveTemporaryDirs removes all the subdirectories with ".must-remove.<XYZ>" suffix.
// //
// Such directories may be left on unclean shutdown during MustRemoveDirAtomic call. // Such directories may be left on unclean shutdown during MustRemoveDirAtomic call.
func MustRemoveTemporaryDirs(dir string) { func MustRemoveTemporaryDirs(dir string) {
des, err := os.ReadDir(dir) des := MustReadDir(dir)
if err != nil {
logger.Panicf("FATAL: cannot read dir: %s", err)
}
for _, de := range des { for _, de := range des {
if !IsDirOrSymlink(de) { if !IsDirOrSymlink(de) {
// Skip non-directories // Skip non-directories
@ -260,10 +266,7 @@ func MustRemoveTemporaryDirs(dir string) {
func MustHardLinkFiles(srcDir, dstDir string) { func MustHardLinkFiles(srcDir, dstDir string) {
mustMkdirSync(dstDir) mustMkdirSync(dstDir)
des, err := os.ReadDir(srcDir) des := MustReadDir(srcDir)
if err != nil {
logger.Panicf("FATAL: cannot read files in scrDir: %s", err)
}
for _, de := range des { for _, de := range des {
if IsDirOrSymlink(de) { if IsDirOrSymlink(de) {
// Skip directories. // Skip directories.
@ -299,10 +302,7 @@ func MustSymlinkRelative(srcPath, dstPath string) {
// MustCopyDirectory copies all the files in srcPath to dstPath. // MustCopyDirectory copies all the files in srcPath to dstPath.
func MustCopyDirectory(srcPath, dstPath string) { func MustCopyDirectory(srcPath, dstPath string) {
des, err := os.ReadDir(srcPath) des := MustReadDir(srcPath)
if err != nil {
logger.Panicf("FATAL: cannot read srcDir: %s", err)
}
MustMkdirIfNotExist(dstPath) MustMkdirIfNotExist(dstPath)
for _, de := range des { for _, de := range des {
if !de.Type().IsRegular() { if !de.Type().IsRegular() {

View file

@ -313,7 +313,7 @@ func (pw *partWrapper) decRef() {
} }
} }
// OpenTable opens a table on the given path. // MustOpenTable opens a table on the given path.
// //
// Optional flushCallback is called every time new data batch is flushed // Optional flushCallback is called every time new data batch is flushed
// to the underlying storage and becomes visible to search. // to the underlying storage and becomes visible to search.
@ -322,7 +322,7 @@ func (pw *partWrapper) decRef() {
// to persistent storage. // to persistent storage.
// //
// The table is created if it doesn't exist yet. // The table is created if it doesn't exist yet.
func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallback, isReadOnly *uint32) (*Table, error) { func MustOpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallback, isReadOnly *uint32) *Table {
path = filepath.Clean(path) path = filepath.Clean(path)
logger.Infof("opening table %q...", path) logger.Infof("opening table %q...", path)
startTime := time.Now() startTime := time.Now()
@ -334,10 +334,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
flockF := fs.MustCreateFlockFile(path) flockF := fs.MustCreateFlockFile(path)
// Open table parts. // Open table parts.
pws, err := openParts(path) pws := mustOpenParts(path)
if err != nil {
return nil, fmt.Errorf("cannot open table parts at %q: %w", path, err)
}
tb := &Table{ tb := &Table{
path: path, path: path,
@ -382,7 +379,7 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
}() }()
} }
return tb, nil return tb
} }
func (tb *Table) startBackgroundWorkers() { func (tb *Table) startBackgroundWorkers() {
@ -1354,7 +1351,7 @@ func getWorkersCount() int {
return n return n
} }
func openParts(path string) ([]*partWrapper, error) { func mustOpenParts(path string) []*partWrapper {
// The path can be missing after restoring from backup, so create it if needed. // The path can be missing after restoring from backup, so create it if needed.
fs.MustMkdirIfNotExist(path) fs.MustMkdirIfNotExist(path)
fs.MustRemoveTemporaryDirs(path) fs.MustRemoveTemporaryDirs(path)
@ -1368,10 +1365,7 @@ func openParts(path string) ([]*partWrapper, error) {
// Remove dirs missing in partNames. These dirs may be left after unclean shutdown // Remove dirs missing in partNames. These dirs may be left after unclean shutdown
// or after the update from versions prior to v1.90.0. // or after the update from versions prior to v1.90.0.
des, err := os.ReadDir(path) des := fs.MustReadDir(path)
if err != nil {
return nil, fmt.Errorf("cannot read mergetree table dir: %w", err)
}
m := make(map[string]struct{}, len(partNames)) m := make(map[string]struct{}, len(partNames))
for _, partName := range partNames { for _, partName := range partNames {
m[partName] = struct{}{} m[partName] = struct{}{}
@ -1401,7 +1395,7 @@ func openParts(path string) ([]*partWrapper, error) {
pws = append(pws, pw) pws = append(pws, pw)
} }
return pws, nil return pws
} }
// CreateSnapshotAt creates tb snapshot in the given dstDir. // CreateSnapshotAt creates tb snapshot in the given dstDir.
@ -1496,10 +1490,7 @@ func mustReadPartNames(srcDir string) []string {
} }
// The partsFilename is missing. This is the upgrade from versions previous to v1.90.0. // The partsFilename is missing. This is the upgrade from versions previous to v1.90.0.
// Read part names from directories under srcDir // Read part names from directories under srcDir
des, err := os.ReadDir(srcDir) des := fs.MustReadDir(srcDir)
if err != nil {
logger.Panicf("FATAL: cannot read mergeset table dir: %s", err)
}
var partNames []string var partNames []string
for _, de := range des { for _, de := range des {
if !fs.IsDirOrSymlink(de) { if !fs.IsDirOrSymlink(de) {

View file

@ -42,10 +42,7 @@ func TestTableSearchSerial(t *testing.T) {
func() { func() {
// Re-open the table and verify the search works. // Re-open the table and verify the search works.
var isReadOnly uint32 var isReadOnly uint32
tb, err := OpenTable(path, nil, nil, &isReadOnly) tb := MustOpenTable(path, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open table: %s", err)
}
defer tb.MustClose() defer tb.MustClose()
if err := testTableSearchSerial(tb, items); err != nil { if err := testTableSearchSerial(tb, items); err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
@ -79,10 +76,7 @@ func TestTableSearchConcurrent(t *testing.T) {
// Re-open the table and verify the search works. // Re-open the table and verify the search works.
func() { func() {
var isReadOnly uint32 var isReadOnly uint32
tb, err := OpenTable(path, nil, nil, &isReadOnly) tb := MustOpenTable(path, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open table: %s", err)
}
defer tb.MustClose() defer tb.MustClose()
if err := testTableSearchConcurrent(tb, items); err != nil { if err := testTableSearchConcurrent(tb, items); err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
@ -156,10 +150,7 @@ func newTestTable(r *rand.Rand, path string, itemsCount int) (*Table, []string,
atomic.AddUint64(&flushes, 1) atomic.AddUint64(&flushes, 1)
} }
var isReadOnly uint32 var isReadOnly uint32
tb, err := OpenTable(path, flushCallback, nil, &isReadOnly) tb := MustOpenTable(path, flushCallback, nil, &isReadOnly)
if err != nil {
return nil, nil, fmt.Errorf("cannot open table: %w", err)
}
items := make([]string, itemsCount) items := make([]string, itemsCount)
for i := 0; i < itemsCount; i++ { for i := 0; i < itemsCount; i++ {
item := fmt.Sprintf("%d:%d", r.Intn(1e9), i) item := fmt.Sprintf("%d:%d", r.Intn(1e9), i)

View file

@ -35,10 +35,7 @@ func benchmarkTableSearch(b *testing.B, itemsCount int) {
// Force finishing pending merges // Force finishing pending merges
tb.MustClose() tb.MustClose()
var isReadOnly uint32 var isReadOnly uint32
tb, err = OpenTable(path, nil, nil, &isReadOnly) tb = MustOpenTable(path, nil, nil, &isReadOnly)
if err != nil {
b.Fatalf("unexpected error when re-opening table %q: %s", path, err)
}
defer tb.MustClose() defer tb.MustClose()
keys := make([][]byte, len(items)) keys := make([][]byte, len(items))

View file

@ -21,20 +21,14 @@ func TestTableOpenClose(t *testing.T) {
// Create a new table // Create a new table
var isReadOnly uint32 var isReadOnly uint32
tb, err := OpenTable(path, nil, nil, &isReadOnly) tb := MustOpenTable(path, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot create new table: %s", err)
}
// Close it // Close it
tb.MustClose() tb.MustClose()
// Re-open created table multiple times. // Re-open created table multiple times.
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
tb, err := OpenTable(path, nil, nil, &isReadOnly) tb := MustOpenTable(path, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open created table: %s", err)
}
tb.MustClose() tb.MustClose()
} }
} }
@ -54,10 +48,7 @@ func TestTableAddItemsSerial(t *testing.T) {
atomic.AddUint64(&flushes, 1) atomic.AddUint64(&flushes, 1)
} }
var isReadOnly uint32 var isReadOnly uint32
tb, err := OpenTable(path, flushCallback, nil, &isReadOnly) tb := MustOpenTable(path, flushCallback, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open %q: %s", path, err)
}
const itemsCount = 10e3 const itemsCount = 10e3
testAddItemsSerial(r, tb, itemsCount) testAddItemsSerial(r, tb, itemsCount)
@ -80,10 +71,7 @@ func TestTableAddItemsSerial(t *testing.T) {
testReopenTable(t, path, itemsCount) testReopenTable(t, path, itemsCount)
// Add more items in order to verify merge between inmemory parts and file-based parts. // Add more items in order to verify merge between inmemory parts and file-based parts.
tb, err = OpenTable(path, nil, nil, &isReadOnly) tb = MustOpenTable(path, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open %q: %s", path, err)
}
const moreItemsCount = itemsCount * 3 const moreItemsCount = itemsCount * 3
testAddItemsSerial(r, tb, moreItemsCount) testAddItemsSerial(r, tb, moreItemsCount)
tb.MustClose() tb.MustClose()
@ -112,10 +100,7 @@ func TestTableCreateSnapshotAt(t *testing.T) {
}() }()
var isReadOnly uint32 var isReadOnly uint32
tb, err := OpenTable(path, nil, nil, &isReadOnly) tb := MustOpenTable(path, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open %q: %s", path, err)
}
defer tb.MustClose() defer tb.MustClose()
// Write a lot of items into the table, so background merges would start. // Write a lot of items into the table, so background merges would start.
@ -141,16 +126,10 @@ func TestTableCreateSnapshotAt(t *testing.T) {
}() }()
// Verify snapshots contain all the data. // Verify snapshots contain all the data.
tb1, err := OpenTable(snapshot1, nil, nil, &isReadOnly) tb1 := MustOpenTable(snapshot1, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open %q: %s", path, err)
}
defer tb1.MustClose() defer tb1.MustClose()
tb2, err := OpenTable(snapshot2, nil, nil, &isReadOnly) tb2 := MustOpenTable(snapshot2, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open %q: %s", path, err)
}
defer tb2.MustClose() defer tb2.MustClose()
var ts, ts1, ts2 TableSearch var ts, ts1, ts2 TableSearch
@ -199,10 +178,7 @@ func TestTableAddItemsConcurrent(t *testing.T) {
return data, items return data, items
} }
var isReadOnly uint32 var isReadOnly uint32
tb, err := OpenTable(path, flushCallback, prepareBlock, &isReadOnly) tb := MustOpenTable(path, flushCallback, prepareBlock, &isReadOnly)
if err != nil {
t.Fatalf("cannot open %q: %s", path, err)
}
const itemsCount = 10e3 const itemsCount = 10e3
testAddItemsConcurrent(tb, itemsCount) testAddItemsConcurrent(tb, itemsCount)
@ -225,10 +201,7 @@ func TestTableAddItemsConcurrent(t *testing.T) {
testReopenTable(t, path, itemsCount) testReopenTable(t, path, itemsCount)
// Add more items in order to verify merge between inmemory parts and file-based parts. // Add more items in order to verify merge between inmemory parts and file-based parts.
tb, err = OpenTable(path, nil, nil, &isReadOnly) tb = MustOpenTable(path, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot open %q: %s", path, err)
}
const moreItemsCount = itemsCount * 3 const moreItemsCount = itemsCount * 3
testAddItemsConcurrent(tb, moreItemsCount) testAddItemsConcurrent(tb, moreItemsCount)
tb.MustClose() tb.MustClose()
@ -267,10 +240,7 @@ func testReopenTable(t *testing.T, path string, itemsCount int) {
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
var isReadOnly uint32 var isReadOnly uint32
tb, err := OpenTable(path, nil, nil, &isReadOnly) tb := MustOpenTable(path, nil, nil, &isReadOnly)
if err != nil {
t.Fatalf("cannot re-open %q: %s", path, err)
}
var m TableMetrics var m TableMetrics
tb.UpdateMetrics(&m) tb.UpdateMetrics(&m)
if n := m.TotalItemsCount(); n != uint64(itemsCount) { if n := m.TotalItemsCount(); n != uint64(itemsCount) {

View file

@ -203,10 +203,7 @@ func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize, maxPendingB
} }
// Locate reader and writer chunks in the path. // Locate reader and writer chunks in the path.
des, err := os.ReadDir(path) des := fs.MustReadDir(path)
if err != nil {
return nil, fmt.Errorf("cannot read contents of the directory %q: %w", path, err)
}
for _, de := range des { for _, de := range des {
fname := de.Name() fname := de.Name()
filepath := filepath.Join(path, fname) filepath := filepath.Join(path, fname)

View file

@ -125,14 +125,14 @@ func getTagFiltersCacheSize() int {
return maxTagFiltersCacheSize return maxTagFiltersCacheSize
} }
// openIndexDB opens index db from the given path. // mustOpenIndexDB opens index db from the given path.
// //
// The last segment of the path should contain unique hex value which // The last segment of the path should contain unique hex value which
// will be then used as indexDB.generation // will be then used as indexDB.generation
// //
// The rotationTimestamp must be set to the current unix timestamp when openIndexDB // The rotationTimestamp must be set to the current unix timestamp when mustOpenIndexDB
// is called when creating new indexdb during indexdb rotation. // is called when creating new indexdb during indexdb rotation.
func openIndexDB(path string, s *Storage, rotationTimestamp uint64, isReadOnly *uint32) (*indexDB, error) { func mustOpenIndexDB(path string, s *Storage, rotationTimestamp uint64, isReadOnly *uint32) *indexDB {
if s == nil { if s == nil {
logger.Panicf("BUG: Storage must be nin-nil") logger.Panicf("BUG: Storage must be nin-nil")
} }
@ -140,13 +140,10 @@ func openIndexDB(path string, s *Storage, rotationTimestamp uint64, isReadOnly *
name := filepath.Base(path) name := filepath.Base(path)
gen, err := strconv.ParseUint(name, 16, 64) gen, err := strconv.ParseUint(name, 16, 64)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to parse indexdb path %q: %w", path, err) logger.Panicf("FATAL: cannot parse indexdb path %q: %s", path, err)
} }
tb, err := mergeset.OpenTable(path, invalidateTagFiltersCache, mergeTagToMetricIDsRows, isReadOnly) tb := mergeset.MustOpenTable(path, invalidateTagFiltersCache, mergeTagToMetricIDsRows, isReadOnly)
if err != nil {
return nil, fmt.Errorf("cannot open indexDB %q: %w", path, err)
}
// Do not persist tagFiltersToMetricIDsCache in files, since it is very volatile. // Do not persist tagFiltersToMetricIDsCache in files, since it is very volatile.
mem := memory.Allowed() mem := memory.Allowed()
@ -163,7 +160,7 @@ func openIndexDB(path string, s *Storage, rotationTimestamp uint64, isReadOnly *
s: s, s: s,
loopsPerDateTagFilterCache: workingsetcache.New(mem / 128), loopsPerDateTagFilterCache: workingsetcache.New(mem / 128),
} }
return db, nil return db
} }
const noDeadline = 1<<64 - 1 const noDeadline = 1<<64 - 1
@ -599,7 +596,7 @@ func (is *indexSearch) createTSIDByName(dst *TSID, metricName, metricNameRaw []b
is.createPerDayIndexes(date, dst.MetricID, mn) is.createPerDayIndexes(date, dst.MetricID, mn)
// There is no need in invalidating tag cache, since it is invalidated // There is no need in invalidating tag cache, since it is invalidated
// on db.tb flush via invalidateTagFiltersCache flushCallback passed to OpenTable. // on db.tb flush via invalidateTagFiltersCache flushCallback passed to mergeset.MustOpenTable.
if created { if created {
// Increase the newTimeseriesCreated counter only if tsid wasn't found in indexDB // Increase the newTimeseriesCreated counter only if tsid wasn't found in indexDB

View file

@ -496,10 +496,7 @@ func TestIndexDBOpenClose(t *testing.T) {
tableName := nextIndexDBTableName() tableName := nextIndexDBTableName()
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
var isReadOnly uint32 var isReadOnly uint32
db, err := openIndexDB(tableName, s, 0, &isReadOnly) db := mustOpenIndexDB(tableName, s, 0, &isReadOnly)
if err != nil {
t.Fatalf("cannot open indexDB: %s", err)
}
db.MustClose() db.MustClose()
} }
if err := os.RemoveAll(tableName); err != nil { if err := os.RemoveAll(tableName); err != nil {
@ -516,10 +513,7 @@ func TestIndexDB(t *testing.T) {
dbName := nextIndexDBTableName() dbName := nextIndexDBTableName()
var isReadOnly uint32 var isReadOnly uint32
db, err := openIndexDB(dbName, s, 0, &isReadOnly) db := mustOpenIndexDB(dbName, s, 0, &isReadOnly)
if err != nil {
t.Fatalf("cannot open indexDB: %s", err)
}
defer func() { defer func() {
db.MustClose() db.MustClose()
if err := os.RemoveAll(dbName); err != nil { if err := os.RemoveAll(dbName); err != nil {
@ -537,10 +531,7 @@ func TestIndexDB(t *testing.T) {
// Re-open the db and verify it works as expected. // Re-open the db and verify it works as expected.
db.MustClose() db.MustClose()
db, err = openIndexDB(dbName, s, 0, &isReadOnly) db = mustOpenIndexDB(dbName, s, 0, &isReadOnly)
if err != nil {
t.Fatalf("cannot open indexDB: %s", err)
}
if err := testIndexDBCheckTSIDByName(db, mns, tsids, false); err != nil { if err := testIndexDBCheckTSIDByName(db, mns, tsids, false); err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
} }
@ -552,10 +543,7 @@ func TestIndexDB(t *testing.T) {
dbName := nextIndexDBTableName() dbName := nextIndexDBTableName()
var isReadOnly uint32 var isReadOnly uint32
db, err := openIndexDB(dbName, s, 0, &isReadOnly) db := mustOpenIndexDB(dbName, s, 0, &isReadOnly)
if err != nil {
t.Fatalf("cannot open indexDB: %s", err)
}
defer func() { defer func() {
db.MustClose() db.MustClose()
if err := os.RemoveAll(dbName); err != nil { if err := os.RemoveAll(dbName); err != nil {
@ -1562,10 +1550,7 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
dbName := nextIndexDBTableName() dbName := nextIndexDBTableName()
var isReadOnly uint32 var isReadOnly uint32
db, err := openIndexDB(dbName, s, 0, &isReadOnly) db := mustOpenIndexDB(dbName, s, 0, &isReadOnly)
if err != nil {
t.Fatalf("cannot open indexDB: %s", err)
}
defer func() { defer func() {
db.MustClose() db.MustClose()
if err := os.RemoveAll(dbName); err != nil { if err := os.RemoveAll(dbName); err != nil {

View file

@ -45,10 +45,7 @@ func BenchmarkIndexDBAddTSIDs(b *testing.B) {
dbName := nextIndexDBTableName() dbName := nextIndexDBTableName()
var isReadOnly uint32 var isReadOnly uint32
db, err := openIndexDB(dbName, s, 0, &isReadOnly) db := mustOpenIndexDB(dbName, s, 0, &isReadOnly)
if err != nil {
b.Fatalf("cannot open indexDB: %s", err)
}
defer func() { defer func() {
db.MustClose() db.MustClose()
if err := os.RemoveAll(dbName); err != nil { if err := os.RemoveAll(dbName); err != nil {
@ -109,10 +106,7 @@ func BenchmarkHeadPostingForMatchers(b *testing.B) {
dbName := nextIndexDBTableName() dbName := nextIndexDBTableName()
var isReadOnly uint32 var isReadOnly uint32
db, err := openIndexDB(dbName, s, 0, &isReadOnly) db := mustOpenIndexDB(dbName, s, 0, &isReadOnly)
if err != nil {
b.Fatalf("cannot open indexDB: %s", err)
}
defer func() { defer func() {
db.MustClose() db.MustClose()
if err := os.RemoveAll(dbName); err != nil { if err := os.RemoveAll(dbName); err != nil {
@ -289,10 +283,7 @@ func BenchmarkIndexDBGetTSIDs(b *testing.B) {
dbName := nextIndexDBTableName() dbName := nextIndexDBTableName()
var isReadOnly uint32 var isReadOnly uint32
db, err := openIndexDB(dbName, s, 0, &isReadOnly) db := mustOpenIndexDB(dbName, s, 0, &isReadOnly)
if err != nil {
b.Fatalf("cannot open indexDB: %s", err)
}
defer func() { defer func() {
db.MustClose() db.MustClose()
if err := os.RemoveAll(dbName); err != nil { if err := os.RemoveAll(dbName); err != nil {

View file

@ -262,15 +262,8 @@ func openPartition(smallPartsPath, bigPartsPath string, s *Storage) (*partition,
partNamesSmall, partNamesBig := mustReadPartNames(smallPartsPath, bigPartsPath) partNamesSmall, partNamesBig := mustReadPartNames(smallPartsPath, bigPartsPath)
smallParts, err := openParts(smallPartsPath, partNamesSmall) smallParts := mustOpenParts(smallPartsPath, partNamesSmall)
if err != nil { bigParts := mustOpenParts(bigPartsPath, partNamesBig)
return nil, fmt.Errorf("cannot open small parts from %q: %w", smallPartsPath, err)
}
bigParts, err := openParts(bigPartsPath, partNamesBig)
if err != nil {
mustCloseParts(smallParts)
return nil, fmt.Errorf("cannot open big parts from %q: %w", bigPartsPath, err)
}
pt := newPartition(name, smallPartsPath, bigPartsPath, s) pt := newPartition(name, smallPartsPath, bigPartsPath, s)
pt.smallParts = smallParts pt.smallParts = smallParts
@ -1770,7 +1763,7 @@ func getPartsSize(pws []*partWrapper) uint64 {
return n return n
} }
func openParts(path string, partNames []string) ([]*partWrapper, error) { func mustOpenParts(path string, partNames []string) []*partWrapper {
// The path can be missing after restoring from backup, so create it if needed. // The path can be missing after restoring from backup, so create it if needed.
fs.MustMkdirIfNotExist(path) fs.MustMkdirIfNotExist(path)
fs.MustRemoveTemporaryDirs(path) fs.MustRemoveTemporaryDirs(path)
@ -1782,10 +1775,7 @@ func openParts(path string, partNames []string) ([]*partWrapper, error) {
// Remove dirs missing in partNames. These dirs may be left after unclean shutdown // Remove dirs missing in partNames. These dirs may be left after unclean shutdown
// or after the update from versions prior to v1.90.0. // or after the update from versions prior to v1.90.0.
des, err := os.ReadDir(path) des := fs.MustReadDir(path)
if err != nil {
return nil, fmt.Errorf("cannot read partition dir: %w", err)
}
m := make(map[string]struct{}, len(partNames)) m := make(map[string]struct{}, len(partNames))
for _, partName := range partNames { for _, partName := range partNames {
m[partName] = struct{}{} m[partName] = struct{}{}
@ -1815,16 +1805,7 @@ func openParts(path string, partNames []string) ([]*partWrapper, error) {
pws = append(pws, pw) pws = append(pws, pw)
} }
return pws, nil return pws
}
func mustCloseParts(pws []*partWrapper) {
for _, pw := range pws {
if pw.refCount != 1 {
logger.Panicf("BUG: unexpected refCount when closing part %q: %d; want 1", &pw.p.ph, pw.refCount)
}
pw.p.MustClose()
}
} }
// MustCreateSnapshotAt creates pt snapshot at the given smallPath and bigPath dirs. // MustCreateSnapshotAt creates pt snapshot at the given smallPath and bigPath dirs.
@ -1941,13 +1922,10 @@ func mustReadPartNames(smallPartsPath, bigPartsPath string) ([]string, []string)
} }
func mustReadPartNamesFromDir(srcDir string) []string { func mustReadPartNamesFromDir(srcDir string) []string {
des, err := os.ReadDir(srcDir) if !fs.IsPathExist(srcDir) {
if err != nil { return nil
if os.IsNotExist(err) {
return nil
}
logger.Panicf("FATAL: cannot read partition dir: %s", err)
} }
des := fs.MustReadDir(srcDir)
var partNames []string var partNames []string
for _, de := range des { for _, de := range des {
if !fs.IsDirOrSymlink(de) { if !fs.IsDirOrSymlink(de) {

View file

@ -220,10 +220,7 @@ func OpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDailySer
idbSnapshotsPath := filepath.Join(idbPath, snapshotsDirname) idbSnapshotsPath := filepath.Join(idbPath, snapshotsDirname)
fs.MustMkdirIfNotExist(idbSnapshotsPath) fs.MustMkdirIfNotExist(idbSnapshotsPath)
fs.MustRemoveTemporaryDirs(idbSnapshotsPath) fs.MustRemoveTemporaryDirs(idbSnapshotsPath)
idbCurr, idbPrev, err := s.openIndexDBTables(idbPath) idbCurr, idbPrev := s.mustOpenIndexDBTables(idbPath)
if err != nil {
return nil, fmt.Errorf("cannot open indexdb tables at %q: %w", idbPath, err)
}
idbCurr.SetExtDB(idbPrev) idbCurr.SetExtDB(idbPrev)
s.idbCurr.Store(idbCurr) s.idbCurr.Store(idbCurr)
@ -707,10 +704,7 @@ func (s *Storage) mustRotateIndexDB() {
newTableName := nextIndexDBTableName() newTableName := nextIndexDBTableName()
idbNewPath := filepath.Join(s.path, indexdbDirname, newTableName) idbNewPath := filepath.Join(s.path, indexdbDirname, newTableName)
rotationTimestamp := fasttime.UnixTimestamp() rotationTimestamp := fasttime.UnixTimestamp()
idbNew, err := openIndexDB(idbNewPath, s, rotationTimestamp, &s.isReadOnly) idbNew := mustOpenIndexDB(idbNewPath, s, rotationTimestamp, &s.isReadOnly)
if err != nil {
logger.Panicf("FATAL: cannot create new indexDB at %q: %s", idbNewPath, err)
}
// Drop extDB // Drop extDB
idbCurr := s.idb() idbCurr := s.idb()
@ -2320,16 +2314,13 @@ func (s *Storage) putTSIDToCache(tsid *generationTSID, metricName []byte) {
s.tsidCache.Set(metricName, buf) s.tsidCache.Set(metricName, buf)
} }
func (s *Storage) openIndexDBTables(path string) (curr, prev *indexDB, err error) { func (s *Storage) mustOpenIndexDBTables(path string) (curr, prev *indexDB) {
fs.MustMkdirIfNotExist(path) fs.MustMkdirIfNotExist(path)
fs.MustRemoveTemporaryDirs(path) fs.MustRemoveTemporaryDirs(path)
// Search for the two most recent tables - the last one is active, // Search for the two most recent tables - the last one is active,
// the previous one contains backup data. // the previous one contains backup data.
des, err := os.ReadDir(path) des := fs.MustReadDir(path)
if err != nil {
return nil, nil, fmt.Errorf("cannot read directory: %w", err)
}
var tableNames []string var tableNames []string
for _, de := range des { for _, de := range des {
if !fs.IsDirOrSymlink(de) { if !fs.IsDirOrSymlink(de) {
@ -2372,18 +2363,11 @@ func (s *Storage) openIndexDBTables(path string) (curr, prev *indexDB, err error
// Open the last two tables. // Open the last two tables.
currPath := filepath.Join(path, tableNames[len(tableNames)-1]) currPath := filepath.Join(path, tableNames[len(tableNames)-1])
curr, err = openIndexDB(currPath, s, 0, &s.isReadOnly) curr = mustOpenIndexDB(currPath, s, 0, &s.isReadOnly)
if err != nil {
return nil, nil, fmt.Errorf("cannot open curr indexdb table at %q: %w", currPath, err)
}
prevPath := filepath.Join(path, tableNames[len(tableNames)-2]) prevPath := filepath.Join(path, tableNames[len(tableNames)-2])
prev, err = openIndexDB(prevPath, s, 0, &s.isReadOnly) prev = mustOpenIndexDB(prevPath, s, 0, &s.isReadOnly)
if err != nil {
curr.MustClose()
return nil, nil, fmt.Errorf("cannot open prev indexdb table at %q: %w", prevPath, err)
}
return curr, prev, nil return curr, prev
} }
var indexDBTableNameRegexp = regexp.MustCompile("^[0-9A-F]{16}$") var indexDBTableNameRegexp = regexp.MustCompile("^[0-9A-F]{16}$")

View file

@ -486,12 +486,8 @@ func openPartitions(smallPartitionsPath, bigPartitionsPath string, s *Storage) (
// Certain partition directories in either `big` or `small` dir may be missing // Certain partition directories in either `big` or `small` dir may be missing
// after restoring from backup. So populate partition names from both dirs. // after restoring from backup. So populate partition names from both dirs.
ptNames := make(map[string]bool) ptNames := make(map[string]bool)
if err := populatePartitionNames(smallPartitionsPath, ptNames); err != nil { mustPopulatePartitionNames(smallPartitionsPath, ptNames)
return nil, err mustPopulatePartitionNames(bigPartitionsPath, ptNames)
}
if err := populatePartitionNames(bigPartitionsPath, ptNames); err != nil {
return nil, err
}
var pts []*partition var pts []*partition
for ptName := range ptNames { for ptName := range ptNames {
smallPartsPath := filepath.Join(smallPartitionsPath, ptName) smallPartsPath := filepath.Join(smallPartitionsPath, ptName)
@ -506,11 +502,8 @@ func openPartitions(smallPartitionsPath, bigPartitionsPath string, s *Storage) (
return pts, nil return pts, nil
} }
func populatePartitionNames(partitionsPath string, ptNames map[string]bool) error { func mustPopulatePartitionNames(partitionsPath string, ptNames map[string]bool) {
des, err := os.ReadDir(partitionsPath) des := fs.MustReadDir(partitionsPath)
if err != nil {
return fmt.Errorf("cannot read directory with partitions: %w", err)
}
for _, de := range des { for _, de := range des {
if !fs.IsDirOrSymlink(de) { if !fs.IsDirOrSymlink(de) {
// Skip non-directories // Skip non-directories
@ -523,7 +516,6 @@ func populatePartitionNames(partitionsPath string, ptNames map[string]bool) erro
} }
ptNames[ptName] = true ptNames[ptName] = true
} }
return nil
} }
func mustClosePartitions(pts []*partition) { func mustClosePartitions(pts []*partition) {