Add flagutil.Duration to avoid conversion bugs (#4835)

* Introduce flagutil.Duration

To avoid conversion bugs

* Fix tests

* Comment why not .Seconds()
This commit is contained in:
Dima Lazerka 2023-09-01 10:27:51 +03:00 committed by GitHub
parent 4bcc086965
commit e0e856d2e7
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 61 additions and 37 deletions

View file

@ -39,13 +39,13 @@ func Init() {
logger.Panicf("BUG: Init() has been already called") logger.Panicf("BUG: Init() has been already called")
} }
if retentionPeriod.Msecs < 24*3600*1000 { if retentionPeriod.Duration() < 24*time.Hour {
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod) logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod)
} }
cfg := &logstorage.StorageConfig{ cfg := &logstorage.StorageConfig{
Retention: time.Millisecond * time.Duration(retentionPeriod.Msecs), Retention: retentionPeriod.Duration(),
FlushInterval: *inmemoryDataFlushInterval, FlushInterval: *inmemoryDataFlushInterval,
FutureRetention: time.Millisecond * time.Duration(futureRetention.Msecs), FutureRetention: futureRetention.Duration(),
LogNewStreams: *logNewStreams, LogNewStreams: *logNewStreams,
LogIngestedRows: *logIngestedRows, LogIngestedRows: *logIngestedRows,
} }

View file

@ -102,13 +102,13 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
mergeset.SetIndexBlocksCacheSize(cacheSizeIndexDBIndexBlocks.IntN()) mergeset.SetIndexBlocksCacheSize(cacheSizeIndexDBIndexBlocks.IntN())
mergeset.SetDataBlocksCacheSize(cacheSizeIndexDBDataBlocks.IntN()) mergeset.SetDataBlocksCacheSize(cacheSizeIndexDBDataBlocks.IntN())
if retentionPeriod.Msecs < 24*3600*1000 { if retentionPeriod.Duration() < 24*time.Hour {
logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod) logger.Fatalf("-retentionPeriod cannot be smaller than a day; got %s", retentionPeriod)
} }
logger.Infof("opening storage at %q with -retentionPeriod=%s", *DataPath, retentionPeriod) logger.Infof("opening storage at %q with -retentionPeriod=%s", *DataPath, retentionPeriod)
startTime := time.Now() startTime := time.Now()
WG = syncwg.WaitGroup{} WG = syncwg.WaitGroup{}
strg := storage.MustOpenStorage(*DataPath, retentionPeriod.Msecs, *maxHourlySeries, *maxDailySeries) strg := storage.MustOpenStorage(*DataPath, retentionPeriod.Duration(), *maxHourlySeries, *maxDailySeries)
Storage = strg Storage = strg
initStaleSnapshotsRemover(strg) initStaleSnapshotsRemover(strg)
@ -380,10 +380,10 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
func initStaleSnapshotsRemover(strg *storage.Storage) { func initStaleSnapshotsRemover(strg *storage.Storage) {
staleSnapshotsRemoverCh = make(chan struct{}) staleSnapshotsRemoverCh = make(chan struct{})
if snapshotsMaxAge.Msecs <= 0 { if snapshotsMaxAge.Duration() <= 0 {
return return
} }
snapshotsMaxAgeDur := time.Duration(snapshotsMaxAge.Msecs) * time.Millisecond snapshotsMaxAgeDur := snapshotsMaxAge.Duration()
staleSnapshotsRemoverWG.Add(1) staleSnapshotsRemoverWG.Add(1)
go func() { go func() {
defer staleSnapshotsRemoverWG.Done() defer staleSnapshotsRemoverWG.Done()

View file

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"strings" "strings"
"time"
"github.com/VictoriaMetrics/metricsql" "github.com/VictoriaMetrics/metricsql"
) )
@ -30,6 +31,11 @@ type Duration struct {
valueString string valueString string
} }
// Duration convert to time.Duration.
func (d *Duration) Duration() time.Duration {
return time.Millisecond * time.Duration(d.Msecs)
}
// String implements flag.Value interface // String implements flag.Value interface
func (d *Duration) String() string { func (d *Duration) String() string {
return d.valueString return d.valueString

View file

@ -3,6 +3,7 @@ package flagutil
import ( import (
"strings" "strings"
"testing" "testing"
"time"
) )
func TestDurationSetFailure(t *testing.T) { func TestDurationSetFailure(t *testing.T) {
@ -59,3 +60,22 @@ func TestDurationSetSuccess(t *testing.T) {
f("1w", 7*24*3600*1000) f("1w", 7*24*3600*1000)
f("0.25y", 0.25*365*24*3600*1000) f("0.25y", 0.25*365*24*3600*1000)
} }
func TestDurationDuration(t *testing.T) {
f := func(value string, expected time.Duration) {
t.Helper()
var d Duration
if err := d.Set(value); err != nil {
t.Fatalf("unexpected error in d.Set(%q): %s", value, err)
}
if d.Duration() != expected {
t.Fatalf("unexpected result; got %v; want %v", d.Duration().String(), expected.String())
}
}
f("0", 0)
f("1", 31*24*time.Hour)
f("1h", time.Hour)
f("1.5d", 1.5*24*time.Hour)
f("1w", 7*24*time.Hour)
f("0.25y", 0.25*365*24*time.Hour)
}

View file

@ -508,7 +508,7 @@ func TestIndexDB(t *testing.T) {
t.Run("serial", func(t *testing.T) { t.Run("serial", func(t *testing.T) {
const path = "TestIndexDB-serial" const path = "TestIndexDB-serial"
s := MustOpenStorage(path, maxRetentionMsecs, 0, 0) s := MustOpenStorage(path, retentionMax, 0, 0)
db := s.idb() db := s.idb()
mns, tsids, err := testIndexDBGetOrCreateTSIDByName(db, metricGroups) mns, tsids, err := testIndexDBGetOrCreateTSIDByName(db, metricGroups)
@ -521,7 +521,7 @@ func TestIndexDB(t *testing.T) {
// Re-open the storage and verify it works as expected. // Re-open the storage and verify it works as expected.
s.MustClose() s.MustClose()
s = MustOpenStorage(path, maxRetentionMsecs, 0, 0) s = MustOpenStorage(path, retentionMax, 0, 0)
db = s.idb() db = s.idb()
if err := testIndexDBCheckTSIDByName(db, mns, tsids, false); err != nil { if err := testIndexDBCheckTSIDByName(db, mns, tsids, false); err != nil {
@ -534,7 +534,7 @@ func TestIndexDB(t *testing.T) {
t.Run("concurrent", func(t *testing.T) { t.Run("concurrent", func(t *testing.T) {
const path = "TestIndexDB-concurrent" const path = "TestIndexDB-concurrent"
s := MustOpenStorage(path, maxRetentionMsecs, 0, 0) s := MustOpenStorage(path, retentionMax, 0, 0)
db := s.idb() db := s.idb()
ch := make(chan error, 3) ch := make(chan error, 3)
@ -1429,7 +1429,7 @@ func TestMatchTagFilters(t *testing.T) {
func TestIndexDBRepopulateAfterRotation(t *testing.T) { func TestIndexDBRepopulateAfterRotation(t *testing.T) {
r := rand.New(rand.NewSource(1)) r := rand.New(rand.NewSource(1))
path := "TestIndexRepopulateAfterRotation" path := "TestIndexRepopulateAfterRotation"
s := MustOpenStorage(path, msecsPerMonth, 1e5, 1e5) s := MustOpenStorage(path, retentionMonth, 1e5, 1e5)
db := s.idb() db := s.idb()
if db.generation == 0 { if db.generation == 0 {
@ -1516,7 +1516,7 @@ func TestIndexDBRepopulateAfterRotation(t *testing.T) {
func TestSearchTSIDWithTimeRange(t *testing.T) { func TestSearchTSIDWithTimeRange(t *testing.T) {
const path = "TestSearchTSIDWithTimeRange" const path = "TestSearchTSIDWithTimeRange"
s := MustOpenStorage(path, maxRetentionMsecs, 0, 0) s := MustOpenStorage(path, retentionMax, 0, 0)
db := s.idb() db := s.idb()
is := db.getIndexSearch(noDeadline) is := db.getIndexSearch(noDeadline)
@ -1966,7 +1966,7 @@ func newTestStorage() *Storage {
metricNameCache: workingsetcache.New(1234), metricNameCache: workingsetcache.New(1234),
tsidCache: workingsetcache.New(1234), tsidCache: workingsetcache.New(1234),
dateMetricIDCache: newDateMetricIDCache(), dateMetricIDCache: newDateMetricIDCache(),
retentionMsecs: maxRetentionMsecs, retentionMsecs: retentionMax.Milliseconds(),
} }
s.setDeletedMetricIDs(&uint64set.Set{}) s.setDeletedMetricIDs(&uint64set.Set{})
return s return s

View file

@ -40,7 +40,7 @@ func BenchmarkRegexpFilterMismatch(b *testing.B) {
func BenchmarkIndexDBAddTSIDs(b *testing.B) { func BenchmarkIndexDBAddTSIDs(b *testing.B) {
const path = "BenchmarkIndexDBAddTSIDs" const path = "BenchmarkIndexDBAddTSIDs"
s := MustOpenStorage(path, maxRetentionMsecs, 0, 0) s := MustOpenStorage(path, retentionMax, 0, 0)
db := s.idb() db := s.idb()
const recordsPerLoop = 1e3 const recordsPerLoop = 1e3
@ -94,7 +94,7 @@ func BenchmarkHeadPostingForMatchers(b *testing.B) {
// This benchmark is equivalent to https://github.com/prometheus/prometheus/blob/23c0299d85bfeb5d9b59e994861553a25ca578e5/tsdb/head_bench_test.go#L52 // This benchmark is equivalent to https://github.com/prometheus/prometheus/blob/23c0299d85bfeb5d9b59e994861553a25ca578e5/tsdb/head_bench_test.go#L52
// See https://www.robustperception.io/evaluating-performance-and-correctness for more details. // See https://www.robustperception.io/evaluating-performance-and-correctness for more details.
const path = "BenchmarkHeadPostingForMatchers" const path = "BenchmarkHeadPostingForMatchers"
s := MustOpenStorage(path, maxRetentionMsecs, 0, 0) s := MustOpenStorage(path, retentionMax, 0, 0)
db := s.idb() db := s.idb()
// Fill the db with data as in https://github.com/prometheus/prometheus/blob/23c0299d85bfeb5d9b59e994861553a25ca578e5/tsdb/head_bench_test.go#L66 // Fill the db with data as in https://github.com/prometheus/prometheus/blob/23c0299d85bfeb5d9b59e994861553a25ca578e5/tsdb/head_bench_test.go#L66
@ -261,7 +261,7 @@ func BenchmarkHeadPostingForMatchers(b *testing.B) {
func BenchmarkIndexDBGetTSIDs(b *testing.B) { func BenchmarkIndexDBGetTSIDs(b *testing.B) {
const path = "BenchmarkIndexDBGetTSIDs" const path = "BenchmarkIndexDBGetTSIDs"
s := MustOpenStorage(path, maxRetentionMsecs, 0, 0) s := MustOpenStorage(path, retentionMax, 0, 0)
db := s.idb() db := s.idb()
const recordsPerLoop = 1000 const recordsPerLoop = 1000

View file

@ -33,8 +33,8 @@ import (
) )
const ( const (
msecsPerMonth = 31 * 24 * 3600 * 1000 retentionMonth = 31 * 24 * time.Hour
maxRetentionMsecs = 100 * 12 * msecsPerMonth retentionMax = 100 * 12 * retentionMonth
) )
// Storage represents TSDB storage. // Storage represents TSDB storage.
@ -154,21 +154,18 @@ type Storage struct {
} }
// MustOpenStorage opens storage on the given path with the given retentionMsecs. // MustOpenStorage opens storage on the given path with the given retentionMsecs.
func MustOpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDailySeries int) *Storage { func MustOpenStorage(path string, retention time.Duration, maxHourlySeries, maxDailySeries int) *Storage {
path, err := filepath.Abs(path) path, err := filepath.Abs(path)
if err != nil { if err != nil {
logger.Panicf("FATAL: cannot determine absolute path for %q: %s", path, err) logger.Panicf("FATAL: cannot determine absolute path for %q: %s", path, err)
} }
if retentionMsecs <= 0 { if retention <= 0 || retention > retentionMax {
retentionMsecs = maxRetentionMsecs retention = retentionMax
}
if retentionMsecs > maxRetentionMsecs {
retentionMsecs = maxRetentionMsecs
} }
s := &Storage{ s := &Storage{
path: path, path: path,
cachePath: filepath.Join(path, cacheDirname), cachePath: filepath.Join(path, cacheDirname),
retentionMsecs: retentionMsecs, retentionMsecs: retention.Milliseconds(),
stop: make(chan struct{}), stop: make(chan struct{}),
} }
fs.MustMkdirIfNotExist(path) fs.MustMkdirIfNotExist(path)
@ -246,7 +243,8 @@ func MustOpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDail
// Initialize nextRotationTimestamp // Initialize nextRotationTimestamp
nowSecs := time.Now().UnixNano() / 1e9 nowSecs := time.Now().UnixNano() / 1e9
nextRotationTimestamp := nextRetentionDeadlineSeconds(nowSecs, retentionMsecs/1000, retentionTimezoneOffsetSecs) retentionSecs := retention.Milliseconds() / 1000 // not .Seconds() because unnecessary float64 conversion
nextRotationTimestamp := nextRetentionDeadlineSeconds(nowSecs, retentionSecs, retentionTimezoneOffsetSecs)
atomic.StoreInt64(&s.nextRotationTimestamp, nextRotationTimestamp) atomic.StoreInt64(&s.nextRotationTimestamp, nextRotationTimestamp)
// Load nextDayMetricIDs cache // Load nextDayMetricIDs cache

View file

@ -478,15 +478,15 @@ func TestStorageOpenClose(t *testing.T) {
func TestStorageRandTimestamps(t *testing.T) { func TestStorageRandTimestamps(t *testing.T) {
path := "TestStorageRandTimestamps" path := "TestStorageRandTimestamps"
retentionMsecs := int64(10 * msecsPerMonth) retention := 10 * retentionMonth
s := MustOpenStorage(path, retentionMsecs, 0, 0) s := MustOpenStorage(path, retention, 0, 0)
t.Run("serial", func(t *testing.T) { t.Run("serial", func(t *testing.T) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
if err := testStorageRandTimestamps(s); err != nil { if err := testStorageRandTimestamps(s); err != nil {
t.Fatalf("error on iteration %d: %s", i, err) t.Fatalf("error on iteration %d: %s", i, err)
} }
s.MustClose() s.MustClose()
s = MustOpenStorage(path, retentionMsecs, 0, 0) s = MustOpenStorage(path, retention, 0, 0)
} }
}) })
t.Run("concurrent", func(t *testing.T) { t.Run("concurrent", func(t *testing.T) {
@ -936,8 +936,8 @@ func testStorageRegisterMetricNames(s *Storage) error {
func TestStorageAddRowsSerial(t *testing.T) { func TestStorageAddRowsSerial(t *testing.T) {
rng := rand.New(rand.NewSource(1)) rng := rand.New(rand.NewSource(1))
path := "TestStorageAddRowsSerial" path := "TestStorageAddRowsSerial"
retentionMsecs := int64(msecsPerMonth * 10) retention := 10 * retentionMonth
s := MustOpenStorage(path, retentionMsecs, 1e5, 1e5) s := MustOpenStorage(path, retention, 1e5, 1e5)
if err := testStorageAddRows(rng, s); err != nil { if err := testStorageAddRows(rng, s); err != nil {
t.Fatalf("unexpected error: %s", err) t.Fatalf("unexpected error: %s", err)
} }
@ -949,8 +949,8 @@ func TestStorageAddRowsSerial(t *testing.T) {
func TestStorageAddRowsConcurrent(t *testing.T) { func TestStorageAddRowsConcurrent(t *testing.T) {
path := "TestStorageAddRowsConcurrent" path := "TestStorageAddRowsConcurrent"
retentionMsecs := int64(msecsPerMonth * 10) retention := 10 * retentionMonth
s := MustOpenStorage(path, retentionMsecs, 1e5, 1e5) s := MustOpenStorage(path, retention, 1e5, 1e5)
ch := make(chan error, 3) ch := make(chan error, 3)
for i := 0; i < cap(ch); i++ { for i := 0; i < cap(ch); i++ {
go func(n int) { go func(n int) {
@ -1164,8 +1164,8 @@ func testStorageAddMetrics(s *Storage, workerNum int) error {
func TestStorageDeleteStaleSnapshots(t *testing.T) { func TestStorageDeleteStaleSnapshots(t *testing.T) {
rng := rand.New(rand.NewSource(1)) rng := rand.New(rand.NewSource(1))
path := "TestStorageDeleteStaleSnapshots" path := "TestStorageDeleteStaleSnapshots"
retentionMsecs := int64(msecsPerMonth * 10) retention := 10 * retentionMonth
s := MustOpenStorage(path, retentionMsecs, 1e5, 1e5) s := MustOpenStorage(path, retention, 1e5, 1e5)
const rowsPerAdd = 1e3 const rowsPerAdd = 1e3
const addsCount = 10 const addsCount = 10
maxTimestamp := timestampFromTime(time.Now()) maxTimestamp := timestampFromTime(time.Now())

View file

@ -7,7 +7,7 @@ import (
func TestTableOpenClose(t *testing.T) { func TestTableOpenClose(t *testing.T) {
const path = "TestTableOpenClose" const path = "TestTableOpenClose"
const retentionMsecs = 123 * msecsPerMonth const retention = 123 * retentionMonth
if err := os.RemoveAll(path); err != nil { if err := os.RemoveAll(path); err != nil {
t.Fatalf("cannot remove %q: %s", path, err) t.Fatalf("cannot remove %q: %s", path, err)
@ -18,7 +18,7 @@ func TestTableOpenClose(t *testing.T) {
// Create a new table // Create a new table
strg := newTestStorage() strg := newTestStorage()
strg.retentionMsecs = retentionMsecs strg.retentionMsecs = retention.Milliseconds()
tb := mustOpenTable(path, strg) tb := mustOpenTable(path, strg)
// Close it // Close it