mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-02-09 15:27:11 +00:00
all: consistently log durations in seconds with millisecond precision
This should improve logs readability
This commit is contained in:
parent
6eddce1d15
commit
ea53a21b02
11 changed files with 50 additions and 47 deletions
|
@ -53,7 +53,7 @@ func main() {
|
||||||
logger.Fatalf("missing -storageNode arg")
|
logger.Fatalf("missing -storageNode arg")
|
||||||
}
|
}
|
||||||
netstorage.InitStorageNodes(*storageNodes)
|
netstorage.InitStorageNodes(*storageNodes)
|
||||||
logger.Infof("successfully initialized netstorage in %s", time.Since(startTime))
|
logger.Infof("successfully initialized netstorage in %.3f seconds", time.Since(startTime).Seconds())
|
||||||
|
|
||||||
storage.SetMaxLabelsPerTimeseries(*maxLabelsPerTimeseries)
|
storage.SetMaxLabelsPerTimeseries(*maxLabelsPerTimeseries)
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ func main() {
|
||||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||||
logger.Fatalf("cannot stop the service: %s", err)
|
logger.Fatalf("cannot stop the service: %s", err)
|
||||||
}
|
}
|
||||||
logger.Infof("successfully shut down the service in %s", time.Since(startTime))
|
logger.Infof("successfully shut down the service in %.3f seconds", time.Since(startTime).Seconds())
|
||||||
|
|
||||||
if len(*graphiteListenAddr) > 0 {
|
if len(*graphiteListenAddr) > 0 {
|
||||||
graphiteServer.MustStop()
|
graphiteServer.MustStop()
|
||||||
|
@ -95,7 +95,7 @@ func main() {
|
||||||
logger.Infof("shutting down neststorage...")
|
logger.Infof("shutting down neststorage...")
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
netstorage.Stop()
|
netstorage.Stop()
|
||||||
logger.Infof("successfully stopped netstorage in %s", time.Since(startTime))
|
logger.Infof("successfully stopped netstorage in %.3f seconds", time.Since(startTime).Seconds())
|
||||||
|
|
||||||
fs.MustStopDirRemover()
|
fs.MustStopDirRemover()
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ func main() {
|
||||||
logger.Fatalf("missing -storageNode arg")
|
logger.Fatalf("missing -storageNode arg")
|
||||||
}
|
}
|
||||||
netstorage.InitStorageNodes(*storageNodes)
|
netstorage.InitStorageNodes(*storageNodes)
|
||||||
logger.Infof("started netstorage in %s", time.Since(startTime))
|
logger.Infof("started netstorage in %.3f seconds", time.Since(startTime).Seconds())
|
||||||
|
|
||||||
if len(*cacheDataPath) > 0 {
|
if len(*cacheDataPath) > 0 {
|
||||||
tmpDataPath := *cacheDataPath + "/tmp"
|
tmpDataPath := *cacheDataPath + "/tmp"
|
||||||
|
@ -81,7 +81,7 @@ func main() {
|
||||||
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
if err := httpserver.Stop(*httpListenAddr); err != nil {
|
||||||
logger.Fatalf("cannot stop the service: %s", err)
|
logger.Fatalf("cannot stop the service: %s", err)
|
||||||
}
|
}
|
||||||
logger.Infof("successfully shut down the service in %s", time.Since(startTime))
|
logger.Infof("successfully shut down the service in %.3f seconds", time.Since(startTime).Seconds())
|
||||||
|
|
||||||
logger.Infof("shutting down neststorage...")
|
logger.Infof("shutting down neststorage...")
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
|
@ -89,7 +89,7 @@ func main() {
|
||||||
if len(*cacheDataPath) > 0 {
|
if len(*cacheDataPath) > 0 {
|
||||||
promql.StopRollupResultCache()
|
promql.StopRollupResultCache()
|
||||||
}
|
}
|
||||||
logger.Infof("successfully stopped netstorage in %s", time.Since(startTime))
|
logger.Infof("successfully stopped netstorage in %.3f seconds", time.Since(startTime).Seconds())
|
||||||
|
|
||||||
fs.MustStopDirRemover()
|
fs.MustStopDirRemover()
|
||||||
|
|
||||||
|
|
|
@ -26,8 +26,8 @@ func Exec(ec *EvalConfig, q string, isFirstPointOnly bool) ([]netstorage.Result,
|
||||||
defer func() {
|
defer func() {
|
||||||
d := time.Since(startTime)
|
d := time.Since(startTime)
|
||||||
if d >= *logSlowQueryDuration {
|
if d >= *logSlowQueryDuration {
|
||||||
logger.Infof("slow query according to -search.logSlowQueryDuration=%s: duration=%s, start=%d, end=%d, step=%d, accountID=%d, projectID=%d, query=%q",
|
logger.Infof("slow query according to -search.logSlowQueryDuration=%s: duration=%.3f seconds, start=%d, end=%d, step=%d, accountID=%d, projectID=%d, query=%q",
|
||||||
*logSlowQueryDuration, d, ec.Start/1000, ec.End/1000, ec.Step/1000, ec.AuthToken.AccountID, ec.AuthToken.ProjectID, q)
|
*logSlowQueryDuration, d.Seconds(), ec.Start/1000, ec.End/1000, ec.Step/1000, ec.AuthToken.AccountID, ec.AuthToken.ProjectID, q)
|
||||||
slowQueries.Inc()
|
slowQueries.Inc()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -77,8 +77,8 @@ func InitRollupResultCache(cachePath string) {
|
||||||
return stats
|
return stats
|
||||||
}
|
}
|
||||||
if len(rollupResultCachePath) > 0 {
|
if len(rollupResultCachePath) > 0 {
|
||||||
logger.Infof("loaded rollupResult cache from %q in %s; entriesCount: %d, sizeBytes: %d",
|
logger.Infof("loaded rollupResult cache from %q in %.3f seconds; entriesCount: %d, sizeBytes: %d",
|
||||||
rollupResultCachePath, time.Since(startTime), fcs().EntriesCount, fcs().BytesSize)
|
rollupResultCachePath, time.Since(startTime).Seconds(), fcs().EntriesCount, fcs().BytesSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.NewGauge(`vm_cache_entries{type="promql/rollupResult"}`, func() float64 {
|
metrics.NewGauge(`vm_cache_entries{type="promql/rollupResult"}`, func() float64 {
|
||||||
|
@ -116,8 +116,8 @@ func StopRollupResultCache() {
|
||||||
rollupResultCacheV.c.UpdateStats(&fcs)
|
rollupResultCacheV.c.UpdateStats(&fcs)
|
||||||
rollupResultCacheV.c.Stop()
|
rollupResultCacheV.c.Stop()
|
||||||
rollupResultCacheV.c = nil
|
rollupResultCacheV.c = nil
|
||||||
logger.Infof("saved rollupResult cache to %q in %s; entriesCount: %d, sizeBytes: %d",
|
logger.Infof("saved rollupResult cache to %q in %.3f seconds; entriesCount: %d, sizeBytes: %d",
|
||||||
rollupResultCachePath, time.Since(startTime), fcs.EntriesCount, fcs.BytesSize)
|
rollupResultCachePath, time.Since(startTime).Seconds(), fcs.EntriesCount, fcs.BytesSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: convert this cache to distributed cache shared among vmselect
|
// TODO: convert this cache to distributed cache shared among vmselect
|
||||||
|
|
|
@ -52,8 +52,8 @@ func main() {
|
||||||
blocksCount := tm.SmallBlocksCount + tm.BigBlocksCount
|
blocksCount := tm.SmallBlocksCount + tm.BigBlocksCount
|
||||||
rowsCount := tm.SmallRowsCount + tm.BigRowsCount
|
rowsCount := tm.SmallRowsCount + tm.BigRowsCount
|
||||||
sizeBytes := tm.SmallSizeBytes + tm.BigSizeBytes
|
sizeBytes := tm.SmallSizeBytes + tm.BigSizeBytes
|
||||||
logger.Infof("successfully opened storage %q in %s; partsCount: %d; blocksCount: %d; rowsCount: %d; sizeBytes: %d",
|
logger.Infof("successfully opened storage %q in %.3f seconds; partsCount: %d; blocksCount: %d; rowsCount: %d; sizeBytes: %d",
|
||||||
*storageDataPath, time.Since(startTime), partsCount, blocksCount, rowsCount, sizeBytes)
|
*storageDataPath, time.Since(startTime).Seconds(), partsCount, blocksCount, rowsCount, sizeBytes)
|
||||||
|
|
||||||
registerStorageMetrics(strg)
|
registerStorageMetrics(strg)
|
||||||
|
|
||||||
|
@ -76,12 +76,12 @@ func main() {
|
||||||
logger.Infof("gracefully shutting down the service")
|
logger.Infof("gracefully shutting down the service")
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
srv.MustClose()
|
srv.MustClose()
|
||||||
logger.Infof("successfully shut down the service in %s", time.Since(startTime))
|
logger.Infof("successfully shut down the service in %.3f seconds", time.Since(startTime).Seconds())
|
||||||
|
|
||||||
logger.Infof("gracefully closing the storage at %s", *storageDataPath)
|
logger.Infof("gracefully closing the storage at %s", *storageDataPath)
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
strg.MustClose()
|
strg.MustClose()
|
||||||
logger.Infof("successfully closed the storage in %s", time.Since(startTime))
|
logger.Infof("successfully closed the storage in %.3f seconds", time.Since(startTime).Seconds())
|
||||||
|
|
||||||
fs.MustStopDirRemover()
|
fs.MustStopDirRemover()
|
||||||
|
|
||||||
|
|
|
@ -166,8 +166,8 @@ func runBackup(src *fslocal.FS, dst common.RemoteFS, origin common.OriginFS, con
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("backed up %d bytes in %s; deleted %d bytes; server-side copied %d bytes; uploaded %d bytes",
|
logger.Infof("backed up %d bytes in %.3f seconds; deleted %d bytes; server-side copied %d bytes; uploaded %d bytes",
|
||||||
backupSize, time.Since(startTime), deleteSize, copySize, uploadSize)
|
backupSize, time.Since(startTime).Seconds(), deleteSize, copySize, uploadSize)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -182,7 +182,8 @@ func (r *Restore) Run() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("restored %d bytes from backup in %s; deleted %d bytes; downloaded %d bytes", backupSize, time.Since(startTime), deleteSize, downloadSize)
|
logger.Infof("restored %d bytes from backup in %.3f seconds; deleted %d bytes; downloaded %d bytes",
|
||||||
|
backupSize, time.Since(startTime).Seconds(), deleteSize, downloadSize)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -187,8 +187,8 @@ func OpenTable(path string, flushCallback func(), prepareBlock PrepareBlockCallb
|
||||||
|
|
||||||
var m TableMetrics
|
var m TableMetrics
|
||||||
tb.UpdateMetrics(&m)
|
tb.UpdateMetrics(&m)
|
||||||
logger.Infof("table %q has been opened in %s; partsCount: %d; blocksCount: %d, itemsCount: %d; sizeBytes: %d",
|
logger.Infof("table %q has been opened in %.3f seconds; partsCount: %d; blocksCount: %d, itemsCount: %d; sizeBytes: %d",
|
||||||
path, time.Since(startTime), m.PartsCount, m.BlocksCount, m.ItemsCount, m.SizeBytes)
|
path, time.Since(startTime).Seconds(), m.PartsCount, m.BlocksCount, m.ItemsCount, m.SizeBytes)
|
||||||
|
|
||||||
tb.convertersWG.Add(1)
|
tb.convertersWG.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -206,17 +206,17 @@ func (tb *Table) MustClose() {
|
||||||
logger.Infof("waiting for raw items flusher to stop on %q...", tb.path)
|
logger.Infof("waiting for raw items flusher to stop on %q...", tb.path)
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
tb.rawItemsFlusherWG.Wait()
|
tb.rawItemsFlusherWG.Wait()
|
||||||
logger.Infof("raw items flusher stopped in %s on %q", time.Since(startTime), tb.path)
|
logger.Infof("raw items flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path)
|
||||||
|
|
||||||
logger.Infof("waiting for converters to stop on %q...", tb.path)
|
logger.Infof("waiting for converters to stop on %q...", tb.path)
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
tb.convertersWG.Wait()
|
tb.convertersWG.Wait()
|
||||||
logger.Infof("converters stopped in %s on %q", time.Since(startTime), tb.path)
|
logger.Infof("converters stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path)
|
||||||
|
|
||||||
logger.Infof("waiting for part mergers to stop on %q...", tb.path)
|
logger.Infof("waiting for part mergers to stop on %q...", tb.path)
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
tb.partMergersWG.Wait()
|
tb.partMergersWG.Wait()
|
||||||
logger.Infof("part mergers stopped in %s on %q", time.Since(startTime), tb.path)
|
logger.Infof("part mergers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), tb.path)
|
||||||
|
|
||||||
logger.Infof("flushing inmemory parts to files on %q...", tb.path)
|
logger.Infof("flushing inmemory parts to files on %q...", tb.path)
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
|
@ -242,7 +242,7 @@ func (tb *Table) MustClose() {
|
||||||
if err := tb.mergePartsOptimal(pws, nil); err != nil {
|
if err := tb.mergePartsOptimal(pws, nil); err != nil {
|
||||||
logger.Panicf("FATAL: cannot flush inmemory parts to files in %q: %s", tb.path, err)
|
logger.Panicf("FATAL: cannot flush inmemory parts to files in %q: %s", tb.path, err)
|
||||||
}
|
}
|
||||||
logger.Infof("%d inmemory parts have been flushed to files in %s on %q", len(pws), time.Since(startTime), tb.path)
|
logger.Infof("%d inmemory parts have been flushed to files in %.3f seconds on %q", len(pws), time.Since(startTime).Seconds(), tb.path)
|
||||||
|
|
||||||
// Remove references to parts from the tb, so they may be eventually closed
|
// Remove references to parts from the tb, so they may be eventually closed
|
||||||
// after all the searches are done.
|
// after all the searches are done.
|
||||||
|
@ -447,7 +447,7 @@ func (tb *Table) convertToV1280() {
|
||||||
logger.Errorf("failed round 1 of background conversion of %q to v1.28.0 format: %s", tb.path, err)
|
logger.Errorf("failed round 1 of background conversion of %q to v1.28.0 format: %s", tb.path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Infof("finished round 1 of background conversion of %q to v1.28.0 format in %s", tb.path, time.Since(startTime))
|
logger.Infof("finished round 1 of background conversion of %q to v1.28.0 format in %.3f seconds", tb.path, time.Since(startTime).Seconds())
|
||||||
|
|
||||||
// The second round is needed in order to merge small blocks
|
// The second round is needed in order to merge small blocks
|
||||||
// with tag->metricIDs rows left after the first round.
|
// with tag->metricIDs rows left after the first round.
|
||||||
|
@ -460,7 +460,7 @@ func (tb *Table) convertToV1280() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.Infof("finished round 2 of background conversion of %q to v1.28.0 format in %s", tb.path, time.Since(startTime))
|
logger.Infof("finished round 2 of background conversion of %q to v1.28.0 format in %.3f seconds", tb.path, time.Since(startTime).Seconds())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := fs.WriteFileAtomically(flagFilePath, []byte("ok")); err != nil {
|
if err := fs.WriteFileAtomically(flagFilePath, []byte("ok")); err != nil {
|
||||||
|
@ -853,7 +853,8 @@ func (tb *Table) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isOuterP
|
||||||
|
|
||||||
d := time.Since(startTime)
|
d := time.Since(startTime)
|
||||||
if d > 10*time.Second {
|
if d > 10*time.Second {
|
||||||
logger.Infof("merged %d items in %s at %d items/sec to %q; sizeBytes: %d", outItemsCount, d, int(float64(outItemsCount)/d.Seconds()), dstPartPath, newPSize)
|
logger.Infof("merged %d items in %.3f seconds at %d items/sec to %q; sizeBytes: %d",
|
||||||
|
outItemsCount, d.Seconds(), int(float64(outItemsCount)/d.Seconds()), dstPartPath, newPSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -1057,7 +1058,7 @@ func (tb *Table) CreateSnapshotAt(dstDir string) error {
|
||||||
parentDir := filepath.Dir(dstDir)
|
parentDir := filepath.Dir(dstDir)
|
||||||
fs.MustSyncPath(parentDir)
|
fs.MustSyncPath(parentDir)
|
||||||
|
|
||||||
logger.Infof("created Table snapshot of %q at %q in %s", srcDir, dstDir, time.Since(startTime))
|
logger.Infof("created Table snapshot of %q at %q in %.3f seconds", srcDir, dstDir, time.Since(startTime).Seconds())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -613,22 +613,22 @@ func (pt *partition) MustClose() {
|
||||||
logger.Infof("waiting for inmemory parts flusher to stop on %q...", pt.smallPartsPath)
|
logger.Infof("waiting for inmemory parts flusher to stop on %q...", pt.smallPartsPath)
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
pt.inmemoryPartsFlusherWG.Wait()
|
pt.inmemoryPartsFlusherWG.Wait()
|
||||||
logger.Infof("inmemory parts flusher stopped in %s on %q", time.Since(startTime), pt.smallPartsPath)
|
logger.Infof("inmemory parts flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
|
||||||
|
|
||||||
logger.Infof("waiting for raw rows flusher to stop on %q...", pt.smallPartsPath)
|
logger.Infof("waiting for raw rows flusher to stop on %q...", pt.smallPartsPath)
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
pt.rawRowsFlusherWG.Wait()
|
pt.rawRowsFlusherWG.Wait()
|
||||||
logger.Infof("raw rows flusher stopped in %s on %q", time.Since(startTime), pt.smallPartsPath)
|
logger.Infof("raw rows flusher stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
|
||||||
|
|
||||||
logger.Infof("waiting for small part mergers to stop on %q...", pt.smallPartsPath)
|
logger.Infof("waiting for small part mergers to stop on %q...", pt.smallPartsPath)
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
pt.smallPartsMergerWG.Wait()
|
pt.smallPartsMergerWG.Wait()
|
||||||
logger.Infof("small part mergers stopped in %s on %q", time.Since(startTime), pt.smallPartsPath)
|
logger.Infof("small part mergers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
|
||||||
|
|
||||||
logger.Infof("waiting for big part mergers to stop on %q...", pt.bigPartsPath)
|
logger.Infof("waiting for big part mergers to stop on %q...", pt.bigPartsPath)
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
pt.bigPartsMergerWG.Wait()
|
pt.bigPartsMergerWG.Wait()
|
||||||
logger.Infof("big part mergers stopped in %s on %q", time.Since(startTime), pt.bigPartsPath)
|
logger.Infof("big part mergers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.bigPartsPath)
|
||||||
|
|
||||||
logger.Infof("flushing inmemory parts to files on %q...", pt.smallPartsPath)
|
logger.Infof("flushing inmemory parts to files on %q...", pt.smallPartsPath)
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
|
@ -654,7 +654,7 @@ func (pt *partition) MustClose() {
|
||||||
if err := pt.mergePartsOptimal(pws); err != nil {
|
if err := pt.mergePartsOptimal(pws); err != nil {
|
||||||
logger.Panicf("FATAL: cannot flush %d inmemory parts to files on %q: %s", len(pws), pt.smallPartsPath, err)
|
logger.Panicf("FATAL: cannot flush %d inmemory parts to files on %q: %s", len(pws), pt.smallPartsPath, err)
|
||||||
}
|
}
|
||||||
logger.Infof("%d inmemory parts have been flushed to files in %s on %q", len(pws), time.Since(startTime), pt.smallPartsPath)
|
logger.Infof("%d inmemory parts have been flushed to files in %.3f seconds on %q", len(pws), time.Since(startTime).Seconds(), pt.smallPartsPath)
|
||||||
|
|
||||||
// Remove references to smallParts from the pt, so they may be eventually closed
|
// Remove references to smallParts from the pt, so they may be eventually closed
|
||||||
// after all the searches are done.
|
// after all the searches are done.
|
||||||
|
@ -1167,7 +1167,8 @@ func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}) erro
|
||||||
|
|
||||||
d := time.Since(startTime)
|
d := time.Since(startTime)
|
||||||
if d > 10*time.Second {
|
if d > 10*time.Second {
|
||||||
logger.Infof("merged %d rows in %s at %d rows/sec to %q; sizeBytes: %d", outRowsCount, d, int(float64(outRowsCount)/d.Seconds()), dstPartPath, newPSize)
|
logger.Infof("merged %d rows in %.3f seconds at %d rows/sec to %q; sizeBytes: %d",
|
||||||
|
outRowsCount, d.Seconds(), int(float64(outRowsCount)/d.Seconds()), dstPartPath, newPSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -1359,8 +1360,7 @@ func openParts(pathPrefix1, pathPrefix2, path string) ([]*partWrapper, error) {
|
||||||
mustCloseParts(pws)
|
mustCloseParts(pws)
|
||||||
return nil, fmt.Errorf("cannot open part %q: %s", partPath, err)
|
return nil, fmt.Errorf("cannot open part %q: %s", partPath, err)
|
||||||
}
|
}
|
||||||
d := time.Since(startTime)
|
logger.Infof("opened part %q in %.3f seconds", partPath, time.Since(startTime).Seconds())
|
||||||
logger.Infof("opened part %q in %s", partPath, d)
|
|
||||||
|
|
||||||
pw := &partWrapper{
|
pw := &partWrapper{
|
||||||
p: p,
|
p: p,
|
||||||
|
@ -1407,7 +1407,8 @@ func (pt *partition) CreateSnapshotAt(smallPath, bigPath string) error {
|
||||||
return fmt.Errorf("cannot create snapshot for %q: %s", pt.bigPartsPath, err)
|
return fmt.Errorf("cannot create snapshot for %q: %s", pt.bigPartsPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("created partition snapshot of %q and %q at %q and %q in %s", pt.smallPartsPath, pt.bigPartsPath, smallPath, bigPath, time.Since(startTime))
|
logger.Infof("created partition snapshot of %q and %q at %q and %q in %.3f seconds",
|
||||||
|
pt.smallPartsPath, pt.bigPartsPath, smallPath, bigPath, time.Since(startTime).Seconds())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -226,7 +226,7 @@ func (s *Storage) CreateSnapshot() (string, error) {
|
||||||
fs.MustSyncPath(dstDir)
|
fs.MustSyncPath(dstDir)
|
||||||
fs.MustSyncPath(srcDir + "/snapshots")
|
fs.MustSyncPath(srcDir + "/snapshots")
|
||||||
|
|
||||||
logger.Infof("created Storage snapshot for %q at %q in %s", srcDir, dstDir, time.Since(startTime))
|
logger.Infof("created Storage snapshot for %q at %q in %.3f seconds", srcDir, dstDir, time.Since(startTime).Seconds())
|
||||||
return snapshotName, nil
|
return snapshotName, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -271,7 +271,7 @@ func (s *Storage) DeleteSnapshot(snapshotName string) error {
|
||||||
fs.MustRemoveAll(idbPath)
|
fs.MustRemoveAll(idbPath)
|
||||||
fs.MustRemoveAll(snapshotPath)
|
fs.MustRemoveAll(snapshotPath)
|
||||||
|
|
||||||
logger.Infof("deleted snapshot %q in %s", snapshotPath, time.Since(startTime))
|
logger.Infof("deleted snapshot %q in %.3f seconds", snapshotPath, time.Since(startTime).Seconds())
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -575,7 +575,7 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
|
||||||
byTenant[k] = m
|
byTenant[k] = m
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("loaded %s from %q in %s; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime), hmLen, srcOrigLen)
|
logger.Infof("loaded %s from %q in %.3f seconds; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime).Seconds(), hmLen, srcOrigLen)
|
||||||
return &hourMetricIDs{
|
return &hourMetricIDs{
|
||||||
m: m,
|
m: m,
|
||||||
byTenant: byTenant,
|
byTenant: byTenant,
|
||||||
|
@ -623,7 +623,7 @@ func (s *Storage) mustSaveHourMetricIDs(hm *hourMetricIDs, name string) {
|
||||||
if err := ioutil.WriteFile(path, dst, 0644); err != nil {
|
if err := ioutil.WriteFile(path, dst, 0644); err != nil {
|
||||||
logger.Panicf("FATAL: cannot write %d bytes to %q: %s", len(dst), path, err)
|
logger.Panicf("FATAL: cannot write %d bytes to %q: %s", len(dst), path, err)
|
||||||
}
|
}
|
||||||
logger.Infof("saved %s to %q in %s; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime), hm.m.Len(), len(dst))
|
logger.Infof("saved %s to %q in %.3f seconds; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime).Seconds(), hm.m.Len(), len(dst))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) mustLoadCache(info, name string, sizeBytes int) *workingsetcache.Cache {
|
func (s *Storage) mustLoadCache(info, name string, sizeBytes int) *workingsetcache.Cache {
|
||||||
|
@ -633,8 +633,8 @@ func (s *Storage) mustLoadCache(info, name string, sizeBytes int) *workingsetcac
|
||||||
c := workingsetcache.Load(path, sizeBytes, time.Hour)
|
c := workingsetcache.Load(path, sizeBytes, time.Hour)
|
||||||
var cs fastcache.Stats
|
var cs fastcache.Stats
|
||||||
c.UpdateStats(&cs)
|
c.UpdateStats(&cs)
|
||||||
logger.Infof("loaded %s cache from %q in %s; entriesCount: %d; sizeBytes: %d",
|
logger.Infof("loaded %s cache from %q in %.3f seconds; entriesCount: %d; sizeBytes: %d",
|
||||||
info, path, time.Since(startTime), cs.EntriesCount, cs.BytesSize)
|
info, path, time.Since(startTime).Seconds(), cs.EntriesCount, cs.BytesSize)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -648,8 +648,8 @@ func (s *Storage) mustSaveAndStopCache(c *workingsetcache.Cache, info, name stri
|
||||||
var cs fastcache.Stats
|
var cs fastcache.Stats
|
||||||
c.UpdateStats(&cs)
|
c.UpdateStats(&cs)
|
||||||
c.Stop()
|
c.Stop()
|
||||||
logger.Infof("saved %s cache to %q in %s; entriesCount: %d; sizeBytes: %d",
|
logger.Infof("saved %s cache to %q in %.3f seconds; entriesCount: %d; sizeBytes: %d",
|
||||||
info, path, time.Since(startTime), cs.EntriesCount, cs.BytesSize)
|
info, path, time.Since(startTime).Seconds(), cs.EntriesCount, cs.BytesSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
func nextRetentionDuration(retentionMonths int) time.Duration {
|
func nextRetentionDuration(retentionMonths int) time.Duration {
|
||||||
|
|
|
@ -170,7 +170,7 @@ func (tb *table) CreateSnapshot(snapshotName string) (string, string, error) {
|
||||||
fs.MustSyncPath(filepath.Dir(dstSmallDir))
|
fs.MustSyncPath(filepath.Dir(dstSmallDir))
|
||||||
fs.MustSyncPath(filepath.Dir(dstBigDir))
|
fs.MustSyncPath(filepath.Dir(dstBigDir))
|
||||||
|
|
||||||
logger.Infof("created table snapshot for %q at (%q, %q) in %s", tb.path, dstSmallDir, dstBigDir, time.Since(startTime))
|
logger.Infof("created table snapshot for %q at (%q, %q) in %.3f seconds", tb.path, dstSmallDir, dstBigDir, time.Since(startTime).Seconds())
|
||||||
return dstSmallDir, dstBigDir, nil
|
return dstSmallDir, dstBigDir, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue