mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
app/vmstorage: expose proper types for storage metrics when -metrics.exposeMetadata command-line flag is set
This is a follow-up for 326a77c697
This commit is contained in:
parent
bfa73ebdf3
commit
7fc2bd0412
7 changed files with 206 additions and 463 deletions
|
@ -4,6 +4,7 @@ import (
|
|||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -123,7 +124,10 @@ func Init(resetCacheIfNeeded func(mrs []storage.MetricRow)) {
|
|||
*DataPath, time.Since(startTime).Seconds(), partsCount, blocksCount, rowsCount, sizeBytes)
|
||||
|
||||
// register storage metrics
|
||||
storageMetrics = newStorageMetrics(Storage)
|
||||
storageMetrics = metrics.NewSet()
|
||||
storageMetrics.RegisterMetricsWriter(func(w io.Writer) {
|
||||
writeStorageMetrics(w, strg)
|
||||
})
|
||||
metrics.RegisterSet(storageMetrics)
|
||||
}
|
||||
|
||||
|
@ -438,501 +442,194 @@ var (
|
|||
snapshotsDeleteAllErrorsTotal = metrics.NewCounter(`vm_http_request_errors_total{path="/snapshot/delete_all"}`)
|
||||
)
|
||||
|
||||
func newStorageMetrics(strg *storage.Storage) *metrics.Set {
|
||||
storageMetrics := metrics.NewSet()
|
||||
func writeStorageMetrics(w io.Writer, strg *storage.Storage) {
|
||||
var m storage.Metrics
|
||||
strg.UpdateMetrics(&m)
|
||||
tm := &m.TableMetrics
|
||||
idbm := &m.IndexDBMetrics
|
||||
|
||||
mCache := &storage.Metrics{}
|
||||
var mCacheLock sync.Mutex
|
||||
var lastUpdateTime time.Time
|
||||
metrics.WriteGaugeUint64(w, fmt.Sprintf(`vm_free_disk_space_bytes{path=%q}`, *DataPath), fs.MustGetFreeSpace(*DataPath))
|
||||
metrics.WriteGaugeUint64(w, fmt.Sprintf(`vm_free_disk_space_limit_bytes{path=%q}`, *DataPath), uint64(minFreeDiskSpaceBytes.N))
|
||||
|
||||
m := func() *storage.Metrics {
|
||||
mCacheLock.Lock()
|
||||
defer mCacheLock.Unlock()
|
||||
if time.Since(lastUpdateTime) < time.Second {
|
||||
return mCache
|
||||
}
|
||||
var mc storage.Metrics
|
||||
strg.UpdateMetrics(&mc)
|
||||
mCache = &mc
|
||||
lastUpdateTime = time.Now()
|
||||
return mCache
|
||||
}
|
||||
tm := func() *storage.TableMetrics {
|
||||
sm := m()
|
||||
return &sm.TableMetrics
|
||||
}
|
||||
idbm := func() *storage.IndexDBMetrics {
|
||||
sm := m()
|
||||
return &sm.IndexDBMetrics
|
||||
}
|
||||
|
||||
storageMetrics.NewGauge(fmt.Sprintf(`vm_free_disk_space_bytes{path=%q}`, *DataPath), func() float64 {
|
||||
return float64(fs.MustGetFreeSpace(*DataPath))
|
||||
})
|
||||
storageMetrics.NewGauge(fmt.Sprintf(`vm_free_disk_space_limit_bytes{path=%q}`, *DataPath), func() float64 {
|
||||
return float64(minFreeDiskSpaceBytes.N)
|
||||
})
|
||||
storageMetrics.NewGauge(fmt.Sprintf(`vm_storage_is_read_only{path=%q}`, *DataPath), func() float64 {
|
||||
isReadOnly := 0
|
||||
if strg.IsReadOnly() {
|
||||
return 1
|
||||
isReadOnly = 1
|
||||
}
|
||||
return 0
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, fmt.Sprintf(`vm_storage_is_read_only{path=%q}`, *DataPath), uint64(isReadOnly))
|
||||
|
||||
storageMetrics.NewGauge(`vm_active_merges{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().ActiveInmemoryMerges)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_active_merges{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().ActiveSmallMerges)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_active_merges{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().ActiveBigMerges)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_active_merges{type="indexdb/inmemory"}`, func() float64 {
|
||||
return float64(idbm().ActiveInmemoryMerges)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_active_merges{type="indexdb/file"}`, func() float64 {
|
||||
return float64(idbm().ActiveFileMerges)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_active_merges{type="storage/inmemory"}`, tm.ActiveInmemoryMerges)
|
||||
metrics.WriteGaugeUint64(w, `vm_active_merges{type="storage/small"}`, tm.ActiveSmallMerges)
|
||||
metrics.WriteGaugeUint64(w, `vm_active_merges{type="storage/big"}`, tm.ActiveBigMerges)
|
||||
metrics.WriteGaugeUint64(w, `vm_active_merges{type="indexdb/inmemory"}`, idbm.ActiveInmemoryMerges)
|
||||
metrics.WriteGaugeUint64(w, `vm_active_merges{type="indexdb/file"}`, idbm.ActiveFileMerges)
|
||||
|
||||
storageMetrics.NewGauge(`vm_merges_total{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().InmemoryMergesCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_merges_total{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallMergesCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_merges_total{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigMergesCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_merges_total{type="indexdb/inmemory"}`, func() float64 {
|
||||
return float64(idbm().InmemoryMergesCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_merges_total{type="indexdb/file"}`, func() float64 {
|
||||
return float64(idbm().FileMergesCount)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_merges_total{type="storage/inmemory"}`, tm.InmemoryMergesCount)
|
||||
metrics.WriteCounterUint64(w, `vm_merges_total{type="storage/small"}`, tm.SmallMergesCount)
|
||||
metrics.WriteCounterUint64(w, `vm_merges_total{type="storage/big"}`, tm.BigMergesCount)
|
||||
metrics.WriteCounterUint64(w, `vm_merges_total{type="indexdb/inmemory"}`, idbm.InmemoryMergesCount)
|
||||
metrics.WriteCounterUint64(w, `vm_merges_total{type="indexdb/file"}`, idbm.FileMergesCount)
|
||||
|
||||
storageMetrics.NewGauge(`vm_rows_merged_total{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().InmemoryRowsMerged)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows_merged_total{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallRowsMerged)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows_merged_total{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigRowsMerged)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows_merged_total{type="indexdb/inmemory"}`, func() float64 {
|
||||
return float64(idbm().InmemoryItemsMerged)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows_merged_total{type="indexdb/file"}`, func() float64 {
|
||||
return float64(idbm().FileItemsMerged)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_rows_merged_total{type="storage/inmemory"}`, tm.InmemoryRowsMerged)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_merged_total{type="storage/small"}`, tm.SmallRowsMerged)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_merged_total{type="storage/big"}`, tm.BigRowsMerged)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_merged_total{type="indexdb/inmemory"}`, idbm.InmemoryItemsMerged)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_merged_total{type="indexdb/file"}`, idbm.FileItemsMerged)
|
||||
|
||||
storageMetrics.NewGauge(`vm_rows_deleted_total{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().InmemoryRowsDeleted)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows_deleted_total{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallRowsDeleted)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows_deleted_total{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigRowsDeleted)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_rows_deleted_total{type="storage/inmemory"}`, tm.InmemoryRowsDeleted)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_deleted_total{type="storage/small"}`, tm.SmallRowsDeleted)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_deleted_total{type="storage/big"}`, tm.BigRowsDeleted)
|
||||
|
||||
storageMetrics.NewGauge(`vm_part_references{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().InmemoryPartsRefCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_part_references{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallPartsRefCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_part_references{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigPartsRefCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_partition_references{type="storage"}`, func() float64 {
|
||||
return float64(tm().PartitionsRefCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_object_references{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().IndexDBRefCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_part_references{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().PartsRefCount)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_part_references{type="storage/inmemory"}`, tm.InmemoryPartsRefCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_part_references{type="storage/small"}`, tm.SmallPartsRefCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_part_references{type="storage/big"}`, tm.BigPartsRefCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_partition_references{type="storage"}`, tm.PartitionsRefCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_object_references{type="indexdb"}`, idbm.IndexDBRefCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_part_references{type="indexdb"}`, idbm.PartsRefCount)
|
||||
|
||||
storageMetrics.NewGauge(`vm_missing_tsids_for_metric_id_total`, func() float64 {
|
||||
return float64(idbm().MissingTSIDsForMetricID)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_index_blocks_with_metric_ids_processed_total`, func() float64 {
|
||||
return float64(idbm().IndexBlocksWithMetricIDsProcessed)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_index_blocks_with_metric_ids_incorrect_order_total`, func() float64 {
|
||||
return float64(idbm().IndexBlocksWithMetricIDsIncorrectOrder)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_composite_index_min_timestamp`, func() float64 {
|
||||
return float64(idbm().MinTimestampForCompositeIndex) / 1e3
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_composite_filter_success_conversions_total`, func() float64 {
|
||||
return float64(idbm().CompositeFilterSuccessConversions)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_composite_filter_missing_conversions_total`, func() float64 {
|
||||
return float64(idbm().CompositeFilterMissingConversions)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_missing_tsids_for_metric_id_total`, idbm.MissingTSIDsForMetricID)
|
||||
metrics.WriteCounterUint64(w, `vm_index_blocks_with_metric_ids_processed_total`, idbm.IndexBlocksWithMetricIDsProcessed)
|
||||
metrics.WriteCounterUint64(w, `vm_index_blocks_with_metric_ids_incorrect_order_total`, idbm.IndexBlocksWithMetricIDsIncorrectOrder)
|
||||
metrics.WriteGaugeUint64(w, `vm_composite_index_min_timestamp`, idbm.MinTimestampForCompositeIndex/1e3)
|
||||
metrics.WriteCounterUint64(w, `vm_composite_filter_success_conversions_total`, idbm.CompositeFilterSuccessConversions)
|
||||
metrics.WriteCounterUint64(w, `vm_composite_filter_missing_conversions_total`, idbm.CompositeFilterMissingConversions)
|
||||
|
||||
storageMetrics.NewGauge(`vm_assisted_merges_total{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().InmemoryAssistedMerges)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_assisted_merges_total{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallAssistedMerges)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_assisted_merges_total{type="storage/inmemory"}`, tm.InmemoryAssistedMerges)
|
||||
metrics.WriteCounterUint64(w, `vm_assisted_merges_total{type="storage/small"}`, tm.SmallAssistedMerges)
|
||||
|
||||
storageMetrics.NewGauge(`vm_assisted_merges_total{type="indexdb/inmemory"}`, func() float64 {
|
||||
return float64(idbm().InmemoryAssistedMerges)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_assisted_merges_total{type="indexdb/file"}`, func() float64 {
|
||||
return float64(idbm().FileAssistedMerges)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_assisted_merges_total{type="indexdb/inmemory"}`, idbm.InmemoryAssistedMerges)
|
||||
metrics.WriteCounterUint64(w, `vm_assisted_merges_total{type="indexdb/file"}`, idbm.FileAssistedMerges)
|
||||
|
||||
storageMetrics.NewGauge(`vm_indexdb_items_added_total`, func() float64 {
|
||||
return float64(idbm().ItemsAdded)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_indexdb_items_added_size_bytes_total`, func() float64 {
|
||||
return float64(idbm().ItemsAddedSizeBytes)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_indexdb_items_added_total`, idbm.ItemsAdded)
|
||||
metrics.WriteCounterUint64(w, `vm_indexdb_items_added_size_bytes_total`, idbm.ItemsAddedSizeBytes)
|
||||
|
||||
storageMetrics.NewGauge(`vm_pending_rows{type="storage"}`, func() float64 {
|
||||
return float64(tm().PendingRows)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_pending_rows{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().PendingItems)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_pending_rows{type="storage"}`, tm.PendingRows)
|
||||
metrics.WriteGaugeUint64(w, `vm_pending_rows{type="indexdb"}`, idbm.PendingItems)
|
||||
|
||||
storageMetrics.NewGauge(`vm_parts{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().InmemoryPartsCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_parts{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallPartsCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_parts{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigPartsCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_parts{type="indexdb/inmemory"}`, func() float64 {
|
||||
return float64(idbm().InmemoryPartsCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_parts{type="indexdb/file"}`, func() float64 {
|
||||
return float64(idbm().FilePartsCount)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_parts{type="storage/inmemory"}`, tm.InmemoryPartsCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_parts{type="storage/small"}`, tm.SmallPartsCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_parts{type="storage/big"}`, tm.BigPartsCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_parts{type="indexdb/inmemory"}`, idbm.InmemoryPartsCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_parts{type="indexdb/file"}`, idbm.FilePartsCount)
|
||||
|
||||
storageMetrics.NewGauge(`vm_blocks{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().InmemoryBlocksCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_blocks{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallBlocksCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_blocks{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigBlocksCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_blocks{type="indexdb/inmemory"}`, func() float64 {
|
||||
return float64(idbm().InmemoryBlocksCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_blocks{type="indexdb/file"}`, func() float64 {
|
||||
return float64(idbm().FileBlocksCount)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_blocks{type="storage/inmemory"}`, tm.InmemoryBlocksCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_blocks{type="storage/small"}`, tm.SmallBlocksCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_blocks{type="storage/big"}`, tm.BigBlocksCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_blocks{type="indexdb/inmemory"}`, idbm.InmemoryBlocksCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_blocks{type="indexdb/file"}`, idbm.FileBlocksCount)
|
||||
|
||||
storageMetrics.NewGauge(`vm_data_size_bytes{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().InmemorySizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_data_size_bytes{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_data_size_bytes{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_data_size_bytes{type="indexdb/inmemory"}`, func() float64 {
|
||||
return float64(idbm().InmemorySizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_data_size_bytes{type="indexdb/file"}`, func() float64 {
|
||||
return float64(idbm().FileSizeBytes)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_data_size_bytes{type="storage/inmemory"}`, tm.InmemorySizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_data_size_bytes{type="storage/small"}`, tm.SmallSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_data_size_bytes{type="storage/big"}`, tm.BigSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_data_size_bytes{type="indexdb/inmemory"}`, idbm.InmemorySizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_data_size_bytes{type="indexdb/file"}`, idbm.FileSizeBytes)
|
||||
|
||||
storageMetrics.NewGauge(`vm_rows_added_to_storage_total`, func() float64 {
|
||||
return float64(m().RowsAddedTotal)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_deduplicated_samples_total{type="merge"}`, func() float64 {
|
||||
return float64(m().DedupsDuringMerge)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_rows_added_to_storage_total`, m.RowsAddedTotal)
|
||||
metrics.WriteCounterUint64(w, `vm_deduplicated_samples_total{type="merge"}`, m.DedupsDuringMerge)
|
||||
|
||||
storageMetrics.NewGauge(`vm_rows_ignored_total{reason="big_timestamp"}`, func() float64 {
|
||||
return float64(m().TooBigTimestampRows)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows_ignored_total{reason="small_timestamp"}`, func() float64 {
|
||||
return float64(m().TooSmallTimestampRows)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_rows_ignored_total{reason="big_timestamp"}`, m.TooBigTimestampRows)
|
||||
metrics.WriteCounterUint64(w, `vm_rows_ignored_total{reason="small_timestamp"}`, m.TooSmallTimestampRows)
|
||||
|
||||
storageMetrics.NewGauge(`vm_timeseries_repopulated_total`, func() float64 {
|
||||
return float64(m().TimeseriesRepopulated)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_timeseries_precreated_total`, func() float64 {
|
||||
return float64(m().TimeseriesPreCreated)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_new_timeseries_created_total`, func() float64 {
|
||||
return float64(m().NewTimeseriesCreated)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_slow_row_inserts_total`, func() float64 {
|
||||
return float64(m().SlowRowInserts)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_slow_per_day_index_inserts_total`, func() float64 {
|
||||
return float64(m().SlowPerDayIndexInserts)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_slow_metric_name_loads_total`, func() float64 {
|
||||
return float64(m().SlowMetricNameLoads)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_timeseries_repopulated_total`, m.TimeseriesRepopulated)
|
||||
metrics.WriteCounterUint64(w, `vm_timeseries_precreated_total`, m.TimeseriesPreCreated)
|
||||
metrics.WriteCounterUint64(w, `vm_new_timeseries_created_total`, m.NewTimeseriesCreated)
|
||||
metrics.WriteCounterUint64(w, `vm_slow_row_inserts_total`, m.SlowRowInserts)
|
||||
metrics.WriteCounterUint64(w, `vm_slow_per_day_index_inserts_total`, m.SlowPerDayIndexInserts)
|
||||
metrics.WriteCounterUint64(w, `vm_slow_metric_name_loads_total`, m.SlowMetricNameLoads)
|
||||
|
||||
if *maxHourlySeries > 0 {
|
||||
storageMetrics.NewGauge(`vm_hourly_series_limit_current_series`, func() float64 {
|
||||
return float64(m().HourlySeriesLimitCurrentSeries)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_hourly_series_limit_max_series`, func() float64 {
|
||||
return float64(m().HourlySeriesLimitMaxSeries)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_hourly_series_limit_rows_dropped_total`, func() float64 {
|
||||
return float64(m().HourlySeriesLimitRowsDropped)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_hourly_series_limit_current_series`, m.HourlySeriesLimitCurrentSeries)
|
||||
metrics.WriteGaugeUint64(w, `vm_hourly_series_limit_max_series`, m.HourlySeriesLimitMaxSeries)
|
||||
metrics.WriteCounterUint64(w, `vm_hourly_series_limit_rows_dropped_total`, m.HourlySeriesLimitRowsDropped)
|
||||
}
|
||||
|
||||
if *maxDailySeries > 0 {
|
||||
storageMetrics.NewGauge(`vm_daily_series_limit_current_series`, func() float64 {
|
||||
return float64(m().DailySeriesLimitCurrentSeries)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_daily_series_limit_max_series`, func() float64 {
|
||||
return float64(m().DailySeriesLimitMaxSeries)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_daily_series_limit_rows_dropped_total`, func() float64 {
|
||||
return float64(m().DailySeriesLimitRowsDropped)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_daily_series_limit_current_series`, m.DailySeriesLimitCurrentSeries)
|
||||
metrics.WriteGaugeUint64(w, `vm_daily_series_limit_max_series`, m.DailySeriesLimitMaxSeries)
|
||||
metrics.WriteCounterUint64(w, `vm_daily_series_limit_rows_dropped_total`, m.DailySeriesLimitRowsDropped)
|
||||
}
|
||||
|
||||
storageMetrics.NewGauge(`vm_timestamps_blocks_merged_total`, func() float64 {
|
||||
return float64(m().TimestampsBlocksMerged)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_timestamps_bytes_saved_total`, func() float64 {
|
||||
return float64(m().TimestampsBytesSaved)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_timestamps_blocks_merged_total`, m.TimestampsBlocksMerged)
|
||||
metrics.WriteCounterUint64(w, `vm_timestamps_bytes_saved_total`, m.TimestampsBytesSaved)
|
||||
|
||||
storageMetrics.NewGauge(`vm_rows{type="storage/inmemory"}`, func() float64 {
|
||||
return float64(tm().InmemoryRowsCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows{type="storage/small"}`, func() float64 {
|
||||
return float64(tm().SmallRowsCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows{type="storage/big"}`, func() float64 {
|
||||
return float64(tm().BigRowsCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows{type="indexdb/inmemory"}`, func() float64 {
|
||||
return float64(idbm().InmemoryItemsCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_rows{type="indexdb/file"}`, func() float64 {
|
||||
return float64(idbm().FileItemsCount)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_rows{type="storage/inmemory"}`, tm.InmemoryRowsCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_rows{type="storage/small"}`, tm.SmallRowsCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_rows{type="storage/big"}`, tm.BigRowsCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_rows{type="indexdb/inmemory"}`, idbm.InmemoryItemsCount)
|
||||
metrics.WriteGaugeUint64(w, `vm_rows{type="indexdb/file"}`, idbm.FileItemsCount)
|
||||
|
||||
storageMetrics.NewGauge(`vm_date_range_search_calls_total`, func() float64 {
|
||||
return float64(idbm().DateRangeSearchCalls)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_date_range_hits_total`, func() float64 {
|
||||
return float64(idbm().DateRangeSearchHits)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_global_search_calls_total`, func() float64 {
|
||||
return float64(idbm().GlobalSearchCalls)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_date_range_search_calls_total`, idbm.DateRangeSearchCalls)
|
||||
metrics.WriteCounterUint64(w, `vm_date_range_hits_total`, idbm.DateRangeSearchHits)
|
||||
metrics.WriteCounterUint64(w, `vm_global_search_calls_total`, idbm.GlobalSearchCalls)
|
||||
|
||||
storageMetrics.NewGauge(`vm_missing_metric_names_for_metric_id_total`, func() float64 {
|
||||
return float64(idbm().MissingMetricNamesForMetricID)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_missing_metric_names_for_metric_id_total`, idbm.MissingMetricNamesForMetricID)
|
||||
|
||||
storageMetrics.NewGauge(`vm_date_metric_id_cache_syncs_total`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheSyncsCount)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_date_metric_id_cache_resets_total`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheResetsCount)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_date_metric_id_cache_syncs_total`, m.DateMetricIDCacheSyncsCount)
|
||||
metrics.WriteCounterUint64(w, `vm_date_metric_id_cache_resets_total`, m.DateMetricIDCacheResetsCount)
|
||||
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/metricIDs"}`, func() float64 {
|
||||
return float64(m().MetricIDCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/date_metricID"}`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/hour_metric_ids"}`, func() float64 {
|
||||
return float64(m().HourMetricIDCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/next_day_metric_ids"}`, func() float64 {
|
||||
return float64(m().NextDayMetricIDCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/indexBlocks"}`, func() float64 {
|
||||
return float64(tm().IndexBlocksCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="indexdb/dataBlocks"}`, func() float64 {
|
||||
return float64(idbm().DataBlocksCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="indexdb/indexBlocks"}`, func() float64 {
|
||||
return float64(idbm().IndexBlocksCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="indexdb/tagFiltersToMetricIDs"}`, func() float64 {
|
||||
return float64(idbm().TagFiltersToMetricIDsCacheSize)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/regexps"}`, func() float64 {
|
||||
return float64(storage.RegexpCacheSize())
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/regexpPrefixes"}`, func() float64 {
|
||||
return float64(storage.RegexpPrefixesCacheSize())
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/tsid"}`, m.TSIDCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/metricIDs"}`, m.MetricIDCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/metricName"}`, m.MetricNameCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/date_metricID"}`, m.DateMetricIDCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/hour_metric_ids"}`, m.HourMetricIDCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/next_day_metric_ids"}`, m.NextDayMetricIDCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/indexBlocks"}`, tm.IndexBlocksCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheSize)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/regexps"}`, uint64(storage.RegexpCacheSize()))
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/regexpPrefixes"}`, uint64(storage.RegexpPrefixesCacheSize()))
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_entries{type="storage/prefetchedMetricIDs"}`, m.PrefetchedMetricIDsSize)
|
||||
|
||||
storageMetrics.NewGauge(`vm_cache_entries{type="storage/prefetchedMetricIDs"}`, func() float64 {
|
||||
return float64(m().PrefetchedMetricIDsSize)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/tsid"}`, m.TSIDCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/metricIDs"}`, m.MetricIDCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/metricName"}`, m.MetricNameCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/indexBlocks"}`, tm.IndexBlocksCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/date_metricID"}`, m.DateMetricIDCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/hour_metric_ids"}`, m.HourMetricIDCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/next_day_metric_ids"}`, m.NextDayMetricIDCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheSizeBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/regexps"}`, uint64(storage.RegexpCacheSizeBytes()))
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/regexpPrefixes"}`, uint64(storage.RegexpPrefixesCacheSizeBytes()))
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_bytes{type="storage/prefetchedMetricIDs"}`, m.PrefetchedMetricIDsSizeBytes)
|
||||
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/metricIDs"}`, func() float64 {
|
||||
return float64(m().MetricIDCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/indexBlocks"}`, func() float64 {
|
||||
return float64(tm().IndexBlocksCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="indexdb/dataBlocks"}`, func() float64 {
|
||||
return float64(idbm().DataBlocksCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="indexdb/indexBlocks"}`, func() float64 {
|
||||
return float64(idbm().IndexBlocksCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/date_metricID"}`, func() float64 {
|
||||
return float64(m().DateMetricIDCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/hour_metric_ids"}`, func() float64 {
|
||||
return float64(m().HourMetricIDCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/next_day_metric_ids"}`, func() float64 {
|
||||
return float64(m().NextDayMetricIDCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="indexdb/tagFiltersToMetricIDs"}`, func() float64 {
|
||||
return float64(idbm().TagFiltersToMetricIDsCacheSizeBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/regexps"}`, func() float64 {
|
||||
return float64(storage.RegexpCacheSizeBytes())
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/regexpPrefixes"}`, func() float64 {
|
||||
return float64(storage.RegexpPrefixesCacheSizeBytes())
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_bytes{type="storage/prefetchedMetricIDs"}`, func() float64 {
|
||||
return float64(m().PrefetchedMetricIDsSizeBytes)
|
||||
})
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/tsid"}`, m.TSIDCacheSizeMaxBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/metricIDs"}`, m.MetricIDCacheSizeMaxBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/metricName"}`, m.MetricNameCacheSizeMaxBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/indexBlocks"}`, tm.IndexBlocksCacheSizeMaxBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheSizeMaxBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheSizeMaxBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheSizeMaxBytes)
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/regexps"}`, uint64(storage.RegexpCacheMaxSizeBytes()))
|
||||
metrics.WriteGaugeUint64(w, `vm_cache_size_max_bytes{type="storage/regexpPrefixes"}`, uint64(storage.RegexpPrefixesCacheMaxSizeBytes()))
|
||||
|
||||
storageMetrics.NewGauge(`vm_cache_size_max_bytes{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheSizeMaxBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_max_bytes{type="storage/metricIDs"}`, func() float64 {
|
||||
return float64(m().MetricIDCacheSizeMaxBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_max_bytes{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheSizeMaxBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_max_bytes{type="storage/indexBlocks"}`, func() float64 {
|
||||
return float64(tm().IndexBlocksCacheSizeMaxBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_max_bytes{type="indexdb/dataBlocks"}`, func() float64 {
|
||||
return float64(idbm().DataBlocksCacheSizeMaxBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_max_bytes{type="indexdb/indexBlocks"}`, func() float64 {
|
||||
return float64(idbm().IndexBlocksCacheSizeMaxBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_max_bytes{type="indexdb/tagFiltersToMetricIDs"}`, func() float64 {
|
||||
return float64(idbm().TagFiltersToMetricIDsCacheSizeMaxBytes)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_max_bytes{type="storage/regexps"}`, func() float64 {
|
||||
return float64(storage.RegexpCacheMaxSizeBytes())
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_size_max_bytes{type="storage/regexpPrefixes"}`, func() float64 {
|
||||
return float64(storage.RegexpPrefixesCacheMaxSizeBytes())
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/tsid"}`, m.TSIDCacheRequests)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/metricIDs"}`, m.MetricIDCacheRequests)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/metricName"}`, m.MetricNameCacheRequests)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/indexBlocks"}`, tm.IndexBlocksCacheRequests)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheRequests)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheRequests)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheRequests)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/regexps"}`, storage.RegexpCacheRequests())
|
||||
metrics.WriteCounterUint64(w, `vm_cache_requests_total{type="storage/regexpPrefixes"}`, storage.RegexpPrefixesCacheRequests())
|
||||
|
||||
storageMetrics.NewGauge(`vm_cache_requests_total{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheRequests)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_requests_total{type="storage/metricIDs"}`, func() float64 {
|
||||
return float64(m().MetricIDCacheRequests)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_requests_total{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheRequests)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_requests_total{type="storage/indexBlocks"}`, func() float64 {
|
||||
return float64(tm().IndexBlocksCacheRequests)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_requests_total{type="indexdb/dataBlocks"}`, func() float64 {
|
||||
return float64(idbm().DataBlocksCacheRequests)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_requests_total{type="indexdb/indexBlocks"}`, func() float64 {
|
||||
return float64(idbm().IndexBlocksCacheRequests)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_requests_total{type="indexdb/tagFiltersToMetricIDs"}`, func() float64 {
|
||||
return float64(idbm().TagFiltersToMetricIDsCacheRequests)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_requests_total{type="storage/regexps"}`, func() float64 {
|
||||
return float64(storage.RegexpCacheRequests())
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_requests_total{type="storage/regexpPrefixes"}`, func() float64 {
|
||||
return float64(storage.RegexpPrefixesCacheRequests())
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/tsid"}`, m.TSIDCacheMisses)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/metricIDs"}`, m.MetricIDCacheMisses)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/metricName"}`, m.MetricNameCacheMisses)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/indexBlocks"}`, tm.IndexBlocksCacheMisses)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/dataBlocks"}`, idbm.DataBlocksCacheMisses)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/indexBlocks"}`, idbm.IndexBlocksCacheMisses)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="indexdb/tagFiltersToMetricIDs"}`, idbm.TagFiltersToMetricIDsCacheMisses)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/regexps"}`, storage.RegexpCacheMisses())
|
||||
metrics.WriteCounterUint64(w, `vm_cache_misses_total{type="storage/regexpPrefixes"}`, storage.RegexpPrefixesCacheMisses())
|
||||
|
||||
storageMetrics.NewGauge(`vm_cache_misses_total{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheMisses)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_misses_total{type="storage/metricIDs"}`, func() float64 {
|
||||
return float64(m().MetricIDCacheMisses)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_misses_total{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheMisses)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_misses_total{type="storage/indexBlocks"}`, func() float64 {
|
||||
return float64(tm().IndexBlocksCacheMisses)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_misses_total{type="indexdb/dataBlocks"}`, func() float64 {
|
||||
return float64(idbm().DataBlocksCacheMisses)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_misses_total{type="indexdb/indexBlocks"}`, func() float64 {
|
||||
return float64(idbm().IndexBlocksCacheMisses)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_misses_total{type="indexdb/tagFiltersToMetricIDs"}`, func() float64 {
|
||||
return float64(idbm().TagFiltersToMetricIDsCacheMisses)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_misses_total{type="storage/regexps"}`, func() float64 {
|
||||
return float64(storage.RegexpCacheMisses())
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_misses_total{type="storage/regexpPrefixes"}`, func() float64 {
|
||||
return float64(storage.RegexpPrefixesCacheMisses())
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_deleted_metrics_total{type="indexdb"}`, idbm.DeletedMetricsCount)
|
||||
|
||||
storageMetrics.NewGauge(`vm_deleted_metrics_total{type="indexdb"}`, func() float64 {
|
||||
return float64(idbm().DeletedMetricsCount)
|
||||
})
|
||||
metrics.WriteCounterUint64(w, `vm_cache_collisions_total{type="storage/tsid"}`, m.TSIDCacheCollisions)
|
||||
metrics.WriteCounterUint64(w, `vm_cache_collisions_total{type="storage/metricName"}`, m.MetricNameCacheCollisions)
|
||||
|
||||
storageMetrics.NewGauge(`vm_cache_collisions_total{type="storage/tsid"}`, func() float64 {
|
||||
return float64(m().TSIDCacheCollisions)
|
||||
})
|
||||
storageMetrics.NewGauge(`vm_cache_collisions_total{type="storage/metricName"}`, func() float64 {
|
||||
return float64(m().MetricNameCacheCollisions)
|
||||
})
|
||||
|
||||
storageMetrics.NewGauge(`vm_next_retention_seconds`, func() float64 {
|
||||
return float64(m().NextRetentionSeconds)
|
||||
})
|
||||
|
||||
return storageMetrics
|
||||
metrics.WriteGaugeUint64(w, `vm_next_retention_seconds`, m.NextRetentionSeconds)
|
||||
}
|
||||
|
||||
func jsonResponseError(w http.ResponseWriter, err error) {
|
||||
|
|
2
go.mod
2
go.mod
|
@ -12,7 +12,7 @@ require (
|
|||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.2.0
|
||||
github.com/VictoriaMetrics/metrics v1.30.0
|
||||
github.com/VictoriaMetrics/metrics v1.31.0
|
||||
github.com/VictoriaMetrics/metricsql v0.70.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.24.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.26.1
|
||||
|
|
4
go.sum
4
go.sum
|
@ -65,8 +65,8 @@ github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkT
|
|||
github.com/VictoriaMetrics/fasthttp v1.2.0 h1:nd9Wng4DlNtaI27WlYh5mGXCJOmee/2c2blTJwfyU9I=
|
||||
github.com/VictoriaMetrics/fasthttp v1.2.0/go.mod h1:zv5YSmasAoSyv8sBVexfArzFDIGGTN4TfCKAtAw7IfE=
|
||||
github.com/VictoriaMetrics/metrics v1.24.0/go.mod h1:eFT25kvsTidQFHb6U0oa0rTrDRdz4xTYjpL8+UPohys=
|
||||
github.com/VictoriaMetrics/metrics v1.30.0 h1:m8o1sEDTpvFGwvliAmcaxxCDrIYS16rJPmOhwQNgavo=
|
||||
github.com/VictoriaMetrics/metrics v1.30.0/go.mod h1:r7hveu6xMdUACXvB8TYdAj8WEsKzWB0EkpJN+RDtOf8=
|
||||
github.com/VictoriaMetrics/metrics v1.31.0 h1:X6+nBvAP0UB+GjR0Ht9hhQ3pjL1AN4b8dt9zFfzTsUo=
|
||||
github.com/VictoriaMetrics/metrics v1.31.0/go.mod h1:r7hveu6xMdUACXvB8TYdAj8WEsKzWB0EkpJN+RDtOf8=
|
||||
github.com/VictoriaMetrics/metricsql v0.70.0 h1:G0k/m1yAF6pmk0dM3VT9/XI5PZ8dL7EbcLhREf4bgeI=
|
||||
github.com/VictoriaMetrics/metricsql v0.70.0/go.mod h1:k4UaP/+CjuZslIjd+kCigNG9TQmUqh5v0TP/nMEy90I=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
|
|
16
vendor/github.com/VictoriaMetrics/metrics/metrics.go
generated
vendored
16
vendor/github.com/VictoriaMetrics/metrics/metrics.go
generated
vendored
|
@ -62,9 +62,21 @@ func UnregisterSet(s *Set) {
|
|||
registeredSetsLock.Unlock()
|
||||
}
|
||||
|
||||
// WritePrometheus writes all the metrics from default set and all the registered sets in Prometheus format to w.
|
||||
// RegisterMetricsWriter registers writeMetrics callback for including metrics in the output generated by WritePrometheus.
|
||||
//
|
||||
// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments.
|
||||
// The last line generated by writeMetrics must end with \n.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is OK to register multiple writeMetrics callbacks - all of them will be called sequentially for gererating the output at WritePrometheus.
|
||||
func RegisterMetricsWriter(writeMetrics func(w io.Writer)) {
|
||||
defaultSet.RegisterMetricsWriter(writeMetrics)
|
||||
}
|
||||
|
||||
// WritePrometheus writes all the metrics in Prometheus format from the default set, all the added sets and metrics writers to w.
|
||||
//
|
||||
// Additional sets can be registered via RegisterSet() call.
|
||||
// Additional metric writers can be registered via RegisterMetricsWriter() call.
|
||||
//
|
||||
// If exposeProcessMetrics is true, then various `go_*` and `process_*` metrics
|
||||
// are exposed for the current process.
|
||||
|
@ -232,6 +244,8 @@ func UnregisterMetric(name string) bool {
|
|||
}
|
||||
|
||||
// UnregisterAllMetrics unregisters all the metrics from default set.
|
||||
//
|
||||
// It also unregisters writeMetrics callbacks passed to RegisterMetricsWriter.
|
||||
func UnregisterAllMetrics() {
|
||||
defaultSet.UnregisterAllMetrics()
|
||||
}
|
||||
|
|
3
vendor/github.com/VictoriaMetrics/metrics/push.go
generated
vendored
3
vendor/github.com/VictoriaMetrics/metrics/push.go
generated
vendored
|
@ -39,6 +39,7 @@ type PushOptions struct {
|
|||
// InitPushWithOptions sets up periodic push for globally registered metrics to the given pushURL with the given interval.
|
||||
//
|
||||
// The periodic push is stopped when ctx is canceled.
|
||||
// It is possible to wait until the background metrics push worker is stopped on a WaitGroup passed via opts.WaitGroup.
|
||||
//
|
||||
// If pushProcessMetrics is set to true, then 'process_*' and `go_*` metrics are also pushed to pushURL.
|
||||
//
|
||||
|
@ -116,6 +117,7 @@ func PushMetrics(ctx context.Context, pushURL string, pushProcessMetrics bool, o
|
|||
// InitPushWithOptions sets up periodic push for metrics from s to the given pushURL with the given interval.
|
||||
//
|
||||
// The periodic push is stopped when the ctx is canceled.
|
||||
// It is possible to wait until the background metrics push worker is stopped on a WaitGroup passed via opts.WaitGroup.
|
||||
//
|
||||
// opts may contain additional configuration options if non-nil.
|
||||
//
|
||||
|
@ -187,6 +189,7 @@ func InitPushExt(pushURL string, interval time.Duration, extraLabels string, wri
|
|||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// The periodic push is stopped when the ctx is canceled.
|
||||
// It is possible to wait until the background metrics push worker is stopped on a WaitGroup passed via opts.WaitGroup.
|
||||
//
|
||||
// opts may contain additional configuration options if non-nil.
|
||||
//
|
||||
|
|
29
vendor/github.com/VictoriaMetrics/metrics/set.go
generated
vendored
29
vendor/github.com/VictoriaMetrics/metrics/set.go
generated
vendored
|
@ -19,6 +19,8 @@ type Set struct {
|
|||
a []*namedMetric
|
||||
m map[string]*namedMetric
|
||||
summaries []*Summary
|
||||
|
||||
metricsWriters []func(w io.Writer)
|
||||
}
|
||||
|
||||
// NewSet creates new set of metrics.
|
||||
|
@ -45,6 +47,7 @@ func (s *Set) WritePrometheus(w io.Writer) {
|
|||
sort.Slice(s.a, lessFunc)
|
||||
}
|
||||
sa := append([]*namedMetric(nil), s.a...)
|
||||
metricsWriters := s.metricsWriters
|
||||
s.mu.Unlock()
|
||||
|
||||
prevMetricFamily := ""
|
||||
|
@ -61,6 +64,10 @@ func (s *Set) WritePrometheus(w io.Writer) {
|
|||
nm.metric.marshalTo(nm.name, &bb)
|
||||
}
|
||||
w.Write(bb.Bytes())
|
||||
|
||||
for _, writeMetrics := range metricsWriters {
|
||||
writeMetrics(w)
|
||||
}
|
||||
}
|
||||
|
||||
// NewHistogram creates and returns new histogram in s with the given name.
|
||||
|
@ -523,14 +530,22 @@ func (s *Set) unregisterMetricLocked(nm *namedMetric) bool {
|
|||
}
|
||||
|
||||
// UnregisterAllMetrics de-registers all metrics registered in s.
|
||||
//
|
||||
// It also de-registers writeMetrics callbacks passed to RegisterMetricsWriter.
|
||||
func (s *Set) UnregisterAllMetrics() {
|
||||
metricNames := s.ListMetricNames()
|
||||
for _, name := range metricNames {
|
||||
s.UnregisterMetric(name)
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.metricsWriters = nil
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// ListMetricNames returns sorted list of all the metrics in s.
|
||||
//
|
||||
// The returned list doesn't include metrics generated by metricsWriter passed to RegisterMetricsWriter.
|
||||
func (s *Set) ListMetricNames() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
@ -544,3 +559,17 @@ func (s *Set) ListMetricNames() []string {
|
|||
sort.Strings(metricNames)
|
||||
return metricNames
|
||||
}
|
||||
|
||||
// RegisterMetricsWriter registers writeMetrics callback for including metrics in the output generated by s.WritePrometheus.
|
||||
//
|
||||
// The writeMetrics callback must write metrics to w in Prometheus text exposition format without timestamps and trailing comments.
|
||||
// The last line generated by writeMetrics must end with \n.
|
||||
// See https://github.com/prometheus/docs/blob/main/content/docs/instrumenting/exposition_formats.md#text-based-format
|
||||
//
|
||||
// It is OK to reguster multiple writeMetrics callbacks - all of them will be called sequentially for gererating the output at s.WritePrometheus.
|
||||
func (s *Set) RegisterMetricsWriter(writeMetrics func(w io.Writer)) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.metricsWriters = append(s.metricsWriters, writeMetrics)
|
||||
}
|
||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -100,7 +100,7 @@ github.com/VictoriaMetrics/fastcache
|
|||
github.com/VictoriaMetrics/fasthttp
|
||||
github.com/VictoriaMetrics/fasthttp/fasthttputil
|
||||
github.com/VictoriaMetrics/fasthttp/stackless
|
||||
# github.com/VictoriaMetrics/metrics v1.30.0
|
||||
# github.com/VictoriaMetrics/metrics v1.31.0
|
||||
## explicit; go 1.17
|
||||
github.com/VictoriaMetrics/metrics
|
||||
# github.com/VictoriaMetrics/metricsql v0.70.0
|
||||
|
|
Loading…
Reference in a new issue