lib/storage: reduce the unimportant logging during Storage start / stop

This should improve the visibility of potentially important logs
This commit is contained in:
Aliaksandr Valialkin 2023-05-16 15:14:18 -07:00
parent d330c7e6fc
commit 278278af95
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
3 changed files with 13 additions and 44 deletions

View file

@ -31,6 +31,7 @@ The following tip changes can be tested by building VictoriaMetrics components f
* FEATURE: do not execute the incoming request if it has been canceled by the client before the execution start. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4223).
* FEATURE: support time formats with timezones. For example, `2024-01-02+02:00` means `January 2, 2024` at `+02:00` time zone. See [these docs](https://docs.victoriametrics.com/#timestamp-formats).
* FEATURE: expose `process_*` metrics at `/metrics` page of all the VictoriaMetrics components under Windows OS. See [this pull request](https://github.com/VictoriaMetrics/metrics/pull/47).
* FEATURE: reduce the amounts of unimprotant `INFO` logging during VictoriaMetrics startup / shutdown. This should improve visibility for potentially important logs.
* FEATURE: upgrade base docker image (alpine) from 3.17.3 to 3.18.0. See [alpine 3.18.0 release notes](https://www.alpinelinux.org/posts/Alpine-3.18.0-released.html).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): support the ability to filter [consul_sd_configs](https://docs.victoriametrics.com/sd_configs.html#consul_sd_configs) targets in more optimal way via new `filter` option. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4183).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for [consulagent_sd_configs](https://docs.victoriametrics.com/sd_configs.html#consulagent_sd_configs). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3953).

View file

@ -772,15 +772,10 @@ func incRefForParts(pws []*partWrapper) {
func (pt *partition) MustClose() {
close(pt.stopCh)
logger.Infof("waiting for service workers to stop on %q...", pt.smallPartsPath)
startTime := time.Now()
// Waiting for service workers to stop
pt.wg.Wait()
logger.Infof("service workers stopped in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
logger.Infof("flushing inmemory parts to files on %q...", pt.smallPartsPath)
startTime = time.Now()
pt.flushInmemoryRows()
logger.Infof("inmemory parts have been flushed to files in %.3f seconds on %q", time.Since(startTime).Seconds(), pt.smallPartsPath)
// Remove references from inmemoryParts, smallParts and bigParts, so they may be eventually closed
// after all the searches are done.

View file

@ -190,9 +190,9 @@ func MustOpenStorage(path string, retentionMsecs int64, maxHourlySeries, maxDail
// Load caches.
mem := memory.Allowed()
s.tsidCache = s.mustLoadCache("MetricName->TSID", "metricName_tsid", getTSIDCacheSize())
s.metricIDCache = s.mustLoadCache("MetricID->TSID", "metricID_tsid", mem/16)
s.metricNameCache = s.mustLoadCache("MetricID->MetricName", "metricID_metricName", mem/10)
s.tsidCache = s.mustLoadCache("metricName_tsid", getTSIDCacheSize())
s.metricIDCache = s.mustLoadCache("metricID_tsid", mem/16)
s.metricNameCache = s.mustLoadCache("metricID_metricName", mem/10)
s.dateMetricIDCache = newDateMetricIDCache()
hour := fasttime.UnixHour()
@ -762,7 +762,7 @@ func (s *Storage) resetAndSaveTSIDCache() {
// from inconsistent behaviour after possible unclean shutdown.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1347
s.tsidCache.Reset()
s.mustSaveCache(s.tsidCache, "MetricName->TSID", "metricName_tsid")
s.mustSaveCache(s.tsidCache, "metricName_tsid")
}
// MustClose closes the storage.
@ -780,11 +780,11 @@ func (s *Storage) MustClose() {
s.idb().MustClose()
// Save caches.
s.mustSaveCache(s.tsidCache, "MetricName->TSID", "metricName_tsid")
s.mustSaveCache(s.tsidCache, "metricName_tsid")
s.tsidCache.Stop()
s.mustSaveCache(s.metricIDCache, "MetricID->TSID", "metricID_tsid")
s.mustSaveCache(s.metricIDCache, "metricID_tsid")
s.metricIDCache.Stop()
s.mustSaveCache(s.metricNameCache, "MetricID->MetricName", "metricID_metricName")
s.mustSaveCache(s.metricNameCache, "metricID_metricName")
s.metricNameCache.Stop()
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
@ -814,8 +814,6 @@ func (s *Storage) mustLoadNextDayMetricIDs(date uint64) *byDateMetricIDEntry {
}
name := "next_day_metric_ids"
path := filepath.Join(s.cachePath, name)
logger.Infof("loading %s from %q...", name, path)
startTime := time.Now()
if !fs.IsPathExist(path) {
logger.Infof("nothing to load from %q", path)
return e
@ -824,7 +822,6 @@ func (s *Storage) mustLoadNextDayMetricIDs(date uint64) *byDateMetricIDEntry {
if err != nil {
logger.Panicf("FATAL: cannot read %s: %s", path, err)
}
srcOrigLen := len(src)
if len(src) < 16 {
logger.Errorf("discarding %s, since it has broken header; got %d bytes; want %d bytes", path, len(src), 16)
return e
@ -849,7 +846,6 @@ func (s *Storage) mustLoadNextDayMetricIDs(date uint64) *byDateMetricIDEntry {
return e
}
e.v = *m
logger.Infof("loaded %s from %q in %.3f seconds; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime).Seconds(), m.Len(), srcOrigLen)
return e
}
@ -858,8 +854,6 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
hour: hour,
}
path := filepath.Join(s.cachePath, name)
logger.Infof("loading %s from %q...", name, path)
startTime := time.Now()
if !fs.IsPathExist(path) {
logger.Infof("nothing to load from %q", path)
return hm
@ -868,7 +862,6 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
if err != nil {
logger.Panicf("FATAL: cannot read %s: %s", path, err)
}
srcOrigLen := len(src)
if len(src) < 16 {
logger.Errorf("discarding %s, since it has broken header; got %d bytes; want %d bytes", path, len(src), 16)
return hm
@ -893,15 +886,12 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
return hm
}
hm.m = m
logger.Infof("loaded %s from %q in %.3f seconds; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime).Seconds(), m.Len(), srcOrigLen)
return hm
}
func (s *Storage) mustSaveNextDayMetricIDs(e *byDateMetricIDEntry) {
name := "next_day_metric_ids"
path := filepath.Join(s.cachePath, name)
logger.Infof("saving %s to %q...", name, path)
startTime := time.Now()
dst := make([]byte, 0, e.v.Len()*8+16)
// Marshal header
@ -913,13 +903,10 @@ func (s *Storage) mustSaveNextDayMetricIDs(e *byDateMetricIDEntry) {
if err := os.WriteFile(path, dst, 0644); err != nil {
logger.Panicf("FATAL: cannot write %d bytes to %q: %s", len(dst), path, err)
}
logger.Infof("saved %s to %q in %.3f seconds; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime).Seconds(), e.v.Len(), len(dst))
}
func (s *Storage) mustSaveHourMetricIDs(hm *hourMetricIDs, name string) {
path := filepath.Join(s.cachePath, name)
logger.Infof("saving %s to %q...", name, path)
startTime := time.Now()
dst := make([]byte, 0, hm.m.Len()*8+24)
// Marshal header
@ -931,7 +918,6 @@ func (s *Storage) mustSaveHourMetricIDs(hm *hourMetricIDs, name string) {
if err := os.WriteFile(path, dst, 0644); err != nil {
logger.Panicf("FATAL: cannot write %d bytes to %q: %s", len(dst), path, err)
}
logger.Infof("saved %s to %q in %.3f seconds; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime).Seconds(), hm.m.Len(), len(dst))
}
func unmarshalUint64Set(src []byte) (*uint64set.Set, []byte, error) {
@ -994,32 +980,19 @@ func loadMinTimestampForCompositeIndex(path string) (int64, error) {
return encoding.UnmarshalInt64(data), nil
}
func (s *Storage) mustLoadCache(info, name string, sizeBytes int) *workingsetcache.Cache {
func (s *Storage) mustLoadCache(name string, sizeBytes int) *workingsetcache.Cache {
path := filepath.Join(s.cachePath, name)
logger.Infof("loading %s cache from %q...", info, path)
startTime := time.Now()
c := workingsetcache.Load(path, sizeBytes)
var cs fastcache.Stats
c.UpdateStats(&cs)
logger.Infof("loaded %s cache from %q in %.3f seconds; entriesCount: %d; sizeBytes: %d",
info, path, time.Since(startTime).Seconds(), cs.EntriesCount, cs.BytesSize)
return c
return workingsetcache.Load(path, sizeBytes)
}
func (s *Storage) mustSaveCache(c *workingsetcache.Cache, info, name string) {
func (s *Storage) mustSaveCache(c *workingsetcache.Cache, name string) {
saveCacheLock.Lock()
defer saveCacheLock.Unlock()
path := filepath.Join(s.cachePath, name)
logger.Infof("saving %s cache to %q...", info, path)
startTime := time.Now()
if err := c.Save(path); err != nil {
logger.Panicf("FATAL: cannot save %s cache to %q: %s", info, path, err)
logger.Panicf("FATAL: cannot save cache to %q: %s", path, err)
}
var cs fastcache.Stats
c.UpdateStats(&cs)
logger.Infof("saved %s cache to %q in %.3f seconds; entriesCount: %d; sizeBytes: %d",
info, path, time.Since(startTime).Seconds(), cs.EntriesCount, cs.BytesSize)
}
// saveCacheLock prevents from data races when multiple concurrent goroutines save the same cache.