From f76598594754b79060f631bf515a468f0dd343a0 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 23 Nov 2020 09:55:38 +0200 Subject: [PATCH 01/21] lib/fs: replace fs.OpenReaderAt with fs.MustOpenReaderAt All the callers for fs.OpenReaderAt expect that the file will be opened. So it is better to log fatal error inside fs.MustOpenReaderAt instead of leaving this to the caller. --- app/vmselect/netstorage/tmp_blocks_file.go | 5 +---- lib/fs/reader_at.go | 13 +++++++------ lib/fs/reader_at_test.go | 5 +---- lib/fs/reader_at_timing_test.go | 5 +---- lib/mergeset/part.go | 21 +++------------------ lib/storage/part.go | 18 +++--------------- 6 files changed, 16 insertions(+), 51 deletions(-) diff --git a/app/vmselect/netstorage/tmp_blocks_file.go b/app/vmselect/netstorage/tmp_blocks_file.go index 71bd7561c..7a16131a3 100644 --- a/app/vmselect/netstorage/tmp_blocks_file.go +++ b/app/vmselect/netstorage/tmp_blocks_file.go @@ -133,10 +133,7 @@ func (tbf *tmpBlocksFile) Finalize() error { return fmt.Errorf("cannot write the remaining %d bytes to %q: %w", len(tbf.buf), fname, err) } tbf.buf = tbf.buf[:0] - r, err := fs.OpenReaderAt(fname) - if err != nil { - logger.Panicf("FATAL: cannot open %q: %s", fname, err) - } + r := fs.MustOpenReaderAt(fname) // Hint the OS that the file is read almost sequentiallly. // This should reduce the number of disk seeks, which is important // for HDDs. diff --git a/lib/fs/reader_at.go b/lib/fs/reader_at.go index 175292cc0..00b05d24e 100644 --- a/lib/fs/reader_at.go +++ b/lib/fs/reader_at.go @@ -158,13 +158,13 @@ func (r *ReaderAt) MustFadviseSequentialRead(prefetch bool) { } } -// OpenReaderAt opens ReaderAt for reading from filename. +// MustOpenReaderAt opens ReaderAt for reading from filename. // // MustClose must be called on the returned ReaderAt when it is no longer needed. -func OpenReaderAt(path string) (*ReaderAt, error) { +func MustOpenReaderAt(path string) *ReaderAt { f, err := os.Open(path) if err != nil { - return nil, fmt.Errorf("cannot open file %q for reader: %w", path, err) + logger.Panicf("FATAL: cannot open file %q for reading: %s", path, err) } var r ReaderAt r.f = f @@ -172,7 +172,8 @@ func OpenReaderAt(path string) (*ReaderAt, error) { if !*disableMmap { fi, err := f.Stat() if err != nil { - return nil, fmt.Errorf("error in stat: %w", err) + MustClose(f) + logger.Panicf("FATAL: error in fstat(%q): %s", path, err) } size := fi.Size() bm := &pageCacheBitmap{ @@ -188,12 +189,12 @@ func OpenReaderAt(path string) (*ReaderAt, error) { data, err := mmapFile(f, size) if err != nil { MustClose(f) - return nil, fmt.Errorf("cannot init reader for %q: %w", path, err) + logger.Panicf("FATAL: cannot mmap %q: %s", path, err) } r.mmapData = data } readersCount.Inc() - return &r, nil + return &r } func pageCacheBitmapCleaner(pcbm *atomic.Value, stopCh <-chan struct{}) { diff --git a/lib/fs/reader_at_test.go b/lib/fs/reader_at_test.go index 10d84c738..4747030e7 100644 --- a/lib/fs/reader_at_test.go +++ b/lib/fs/reader_at_test.go @@ -22,10 +22,7 @@ func testReaderAt(t *testing.T, bufSize int) { t.Fatalf("cannot create %q: %s", path, err) } defer MustRemoveAll(path) - r, err := OpenReaderAt(path) - if err != nil { - t.Fatalf("error in OpenReaderAt(%q): %s", path, err) - } + r := MustOpenReaderAt(path) defer r.MustClose() buf := make([]byte, bufSize) diff --git a/lib/fs/reader_at_timing_test.go b/lib/fs/reader_at_timing_test.go index 84eb1e65a..87b012e97 100644 --- a/lib/fs/reader_at_timing_test.go +++ b/lib/fs/reader_at_timing_test.go @@ -29,10 +29,7 @@ func benchmarkReaderAtMustReadAt(b *testing.B, isMmap bool) { b.Fatalf("cannot create %q: %s", path, err) } defer MustRemoveAll(path) - r, err := OpenReaderAt(path) - if err != nil { - b.Fatalf("error in OpenReaderAt(%q): %s", path, err) - } + r := MustOpenReaderAt(path) defer r.MustClose() b.ResetTimer() diff --git a/lib/mergeset/part.go b/lib/mergeset/part.go index 86bbc5bb8..0ead8d97c 100644 --- a/lib/mergeset/part.go +++ b/lib/mergeset/part.go @@ -78,30 +78,15 @@ func openFilePart(path string) (*part, error) { metaindexSize := fs.MustFileSize(metaindexPath) indexPath := path + "/index.bin" - indexFile, err := fs.OpenReaderAt(indexPath) - if err != nil { - metaindexFile.MustClose() - return nil, fmt.Errorf("cannot open %q: %w", indexPath, err) - } + indexFile := fs.MustOpenReaderAt(indexPath) indexSize := fs.MustFileSize(indexPath) itemsPath := path + "/items.bin" - itemsFile, err := fs.OpenReaderAt(itemsPath) - if err != nil { - metaindexFile.MustClose() - indexFile.MustClose() - return nil, fmt.Errorf("cannot open %q: %w", itemsPath, err) - } + itemsFile := fs.MustOpenReaderAt(itemsPath) itemsSize := fs.MustFileSize(itemsPath) lensPath := path + "/lens.bin" - lensFile, err := fs.OpenReaderAt(lensPath) - if err != nil { - metaindexFile.MustClose() - indexFile.MustClose() - itemsFile.MustClose() - return nil, fmt.Errorf("cannot open %q: %w", lensPath, err) - } + lensFile := fs.MustOpenReaderAt(lensPath) lensSize := fs.MustFileSize(lensPath) size := metaindexSize + indexSize + itemsSize + lensSize diff --git a/lib/storage/part.go b/lib/storage/part.go index 2eb62b180..1fcd969d5 100644 --- a/lib/storage/part.go +++ b/lib/storage/part.go @@ -60,27 +60,15 @@ func openFilePart(path string) (*part, error) { } timestampsPath := path + "/timestamps.bin" - timestampsFile, err := fs.OpenReaderAt(timestampsPath) - if err != nil { - return nil, fmt.Errorf("cannot open timestamps file: %w", err) - } + timestampsFile := fs.MustOpenReaderAt(timestampsPath) timestampsSize := fs.MustFileSize(timestampsPath) valuesPath := path + "/values.bin" - valuesFile, err := fs.OpenReaderAt(valuesPath) - if err != nil { - timestampsFile.MustClose() - return nil, fmt.Errorf("cannot open values file: %w", err) - } + valuesFile := fs.MustOpenReaderAt(valuesPath) valuesSize := fs.MustFileSize(valuesPath) indexPath := path + "/index.bin" - indexFile, err := fs.OpenReaderAt(indexPath) - if err != nil { - timestampsFile.MustClose() - valuesFile.MustClose() - return nil, fmt.Errorf("cannot open index file: %w", err) - } + indexFile := fs.MustOpenReaderAt(indexPath) indexSize := fs.MustFileSize(indexPath) metaindexPath := path + "/metaindex.bin" From d3794eb994de606e47b76cc6245b9375c8d327d9 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 23 Nov 2020 12:33:17 +0200 Subject: [PATCH 02/21] app/{vminsert,vmselect}: move `/tags/tagSeries` and `/tags/tagMultiSeries` api from vminsert to vmselect This is needed for consistency, since all the `/tags*` api handlers are located in vmselect. --- app/vminsert/graphite/tags.go | 102 ------------------ .../tags_tag_multi_series_response.qtpl.go | 75 ------------- app/vminsert/main.go | 22 ---- app/vmselect/graphite/tags_api.go | 90 ++++++++++++++++ .../tags_tag_multi_series_response.qtpl | 0 .../tags_tag_multi_series_response.qtpl.go | 75 +++++++++++++ app/vmselect/main.go | 22 ++++ 7 files changed, 187 insertions(+), 199 deletions(-) delete mode 100644 app/vminsert/graphite/tags.go delete mode 100644 app/vminsert/graphite/tags_tag_multi_series_response.qtpl.go rename app/{vminsert => vmselect}/graphite/tags_tag_multi_series_response.qtpl (100%) create mode 100644 app/vmselect/graphite/tags_tag_multi_series_response.qtpl.go diff --git a/app/vminsert/graphite/tags.go b/app/vminsert/graphite/tags.go deleted file mode 100644 index fe49fcebf..000000000 --- a/app/vminsert/graphite/tags.go +++ /dev/null @@ -1,102 +0,0 @@ -package graphite - -import ( - "fmt" - "net/http" - "sort" - "time" - - "github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" - graphiteparser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" - "github.com/VictoriaMetrics/metrics" -) - -// TagsTagSeriesHandler implements /tags/tagSeries handler. -// -// See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb -func TagsTagSeriesHandler(w http.ResponseWriter, r *http.Request) error { - return registerMetrics(w, r, false) -} - -// TagsTagMultiSeriesHandler implements /tags/tagMultiSeries handler. -// -// See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb -func TagsTagMultiSeriesHandler(w http.ResponseWriter, r *http.Request) error { - return registerMetrics(w, r, true) -} - -func registerMetrics(w http.ResponseWriter, r *http.Request, isJSONResponse bool) error { - startTime := time.Now() - if err := r.ParseForm(); err != nil { - return fmt.Errorf("cannot parse form values: %w", err) - } - paths := r.Form["path"] - var row graphiteparser.Row - var labels []prompb.Label - var b []byte - var tagsPool []graphiteparser.Tag - mrs := make([]storage.MetricRow, len(paths)) - ct := time.Now().UnixNano() / 1e6 - canonicalPaths := make([]string, len(paths)) - for i, path := range paths { - var err error - tagsPool, err = row.UnmarshalMetricAndTags(path, tagsPool[:0]) - if err != nil { - return fmt.Errorf("cannot parse path=%q: %w", path, err) - } - - // Construct canonical path according to https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb - sort.Slice(row.Tags, func(i, j int) bool { - return row.Tags[i].Key < row.Tags[j].Key - }) - b = append(b[:0], row.Metric...) - for _, tag := range row.Tags { - b = append(b, ';') - b = append(b, tag.Key...) - b = append(b, '=') - b = append(b, tag.Value...) - } - canonicalPaths[i] = string(b) - - // Convert parsed metric and tags to labels. - labels = append(labels[:0], prompb.Label{ - Name: []byte("__name__"), - Value: []byte(row.Metric), - }) - for _, tag := range row.Tags { - labels = append(labels, prompb.Label{ - Name: []byte(tag.Key), - Value: []byte(tag.Value), - }) - } - - // Put labels with the current timestamp to MetricRow - mr := &mrs[i] - mr.MetricNameRaw = storage.MarshalMetricNameRaw(mr.MetricNameRaw[:0], labels) - mr.Timestamp = ct - } - if err := vmstorage.RegisterMetricNames(mrs); err != nil { - return fmt.Errorf("cannot register paths: %w", err) - } - - // Return response - contentType := "text/plain; charset=utf-8" - if isJSONResponse { - contentType = "application/json; charset=utf-8" - } - w.Header().Set("Content-Type", contentType) - WriteTagsTagMultiSeriesResponse(w, canonicalPaths, isJSONResponse) - if isJSONResponse { - tagsTagMultiSeriesDuration.UpdateDuration(startTime) - } else { - tagsTagSeriesDuration.UpdateDuration(startTime) - } - return nil -} - -var ( - tagsTagSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/tagSeries"}`) - tagsTagMultiSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/tagMultiSeries"}`) -) diff --git a/app/vminsert/graphite/tags_tag_multi_series_response.qtpl.go b/app/vminsert/graphite/tags_tag_multi_series_response.qtpl.go deleted file mode 100644 index 773a6ce4e..000000000 --- a/app/vminsert/graphite/tags_tag_multi_series_response.qtpl.go +++ /dev/null @@ -1,75 +0,0 @@ -// Code generated by qtc from "tags_tag_multi_series_response.qtpl". DO NOT EDIT. -// See https://github.com/valyala/quicktemplate for details. - -// TagsTagMultiSeriesResponse generates response for /tags/tagMultiSeries .See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb - -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:5 -package graphite - -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:5 -import ( - qtio422016 "io" - - qt422016 "github.com/valyala/quicktemplate" -) - -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:5 -var ( - _ = qtio422016.Copy - _ = qt422016.AcquireByteBuffer -) - -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:5 -func StreamTagsTagMultiSeriesResponse(qw422016 *qt422016.Writer, canonicalPaths []string, isJSONResponse bool) { -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:6 - if isJSONResponse { -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:6 - qw422016.N().S(`[`) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:6 - } -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:7 - for i, path := range canonicalPaths { -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:8 - qw422016.N().Q(path) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:9 - if i+1 < len(canonicalPaths) { -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:9 - qw422016.N().S(`,`) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:9 - } -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:10 - } -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:11 - if isJSONResponse { -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:11 - qw422016.N().S(`]`) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:11 - } -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 -} - -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 -func WriteTagsTagMultiSeriesResponse(qq422016 qtio422016.Writer, canonicalPaths []string, isJSONResponse bool) { -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 - qw422016 := qt422016.AcquireWriter(qq422016) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 - StreamTagsTagMultiSeriesResponse(qw422016, canonicalPaths, isJSONResponse) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 - qt422016.ReleaseWriter(qw422016) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 -} - -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 -func TagsTagMultiSeriesResponse(canonicalPaths []string, isJSONResponse bool) string { -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 - qb422016 := qt422016.AcquireByteBuffer() -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 - WriteTagsTagMultiSeriesResponse(qb422016, canonicalPaths, isJSONResponse) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 - qs422016 := string(qb422016.B) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 - qt422016.ReleaseByteBuffer(qb422016) -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 - return qs422016 -//line app/vminsert/graphite/tags_tag_multi_series_response.qtpl:12 -} diff --git a/app/vminsert/main.go b/app/vminsert/main.go index 1716b1c4e..e47132551 100644 --- a/app/vminsert/main.go +++ b/app/vminsert/main.go @@ -153,22 +153,6 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { influxQueryRequests.Inc() fmt.Fprintf(w, `{"results":[{"series":[{"values":[]}]}]}`) return true - case "/tags/tagSeries": - graphiteTagsTagSeriesRequests.Inc() - if err := graphite.TagsTagSeriesHandler(w, r); err != nil { - graphiteTagsTagSeriesErrors.Inc() - httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) - return true - } - return true - case "/tags/tagMultiSeries": - graphiteTagsTagMultiSeriesRequests.Inc() - if err := graphite.TagsTagMultiSeriesHandler(w, r); err != nil { - graphiteTagsTagMultiSeriesErrors.Inc() - httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) - return true - } - return true case "/targets": promscrapeTargetsRequests.Inc() w.Header().Set("Content-Type", "text/plain; charset=utf-8") @@ -223,12 +207,6 @@ var ( influxQueryRequests = metrics.NewCounter(`vm_http_requests_total{path="/query", protocol="influx"}`) - graphiteTagsTagSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/tagSeries", protocol="graphite"}`) - graphiteTagsTagSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/tagSeries", protocol="graphite"}`) - - graphiteTagsTagMultiSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/tagMultiSeries", protocol="graphite"}`) - graphiteTagsTagMultiSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/tagMultiSeries", protocol="graphite"}`) - promscrapeTargetsRequests = metrics.NewCounter(`vm_http_requests_total{path="/targets"}`) promscrapeAPIV1TargetsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/targets"}`) diff --git a/app/vmselect/graphite/tags_api.go b/app/vmselect/graphite/tags_api.go index df0d613e7..9591df4bc 100644 --- a/app/vmselect/graphite/tags_api.go +++ b/app/vmselect/graphite/tags_api.go @@ -12,10 +12,100 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/bufferedwriter" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" + graphiteparser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" "github.com/VictoriaMetrics/metrics" ) +// TagsTagSeriesHandler implements /tags/tagSeries handler. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb +func TagsTagSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { + return registerMetrics(startTime, w, r, false) +} + +// TagsTagMultiSeriesHandler implements /tags/tagMultiSeries handler. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb +func TagsTagMultiSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { + return registerMetrics(startTime, w, r, true) +} + +func registerMetrics(startTime time.Time, w http.ResponseWriter, r *http.Request, isJSONResponse bool) error { + if err := r.ParseForm(); err != nil { + return fmt.Errorf("cannot parse form values: %w", err) + } + paths := r.Form["path"] + var row graphiteparser.Row + var labels []prompb.Label + var b []byte + var tagsPool []graphiteparser.Tag + mrs := make([]storage.MetricRow, len(paths)) + ct := time.Now().UnixNano() / 1e6 + canonicalPaths := make([]string, len(paths)) + for i, path := range paths { + var err error + tagsPool, err = row.UnmarshalMetricAndTags(path, tagsPool[:0]) + if err != nil { + return fmt.Errorf("cannot parse path=%q: %w", path, err) + } + + // Construct canonical path according to https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb + sort.Slice(row.Tags, func(i, j int) bool { + return row.Tags[i].Key < row.Tags[j].Key + }) + b = append(b[:0], row.Metric...) + for _, tag := range row.Tags { + b = append(b, ';') + b = append(b, tag.Key...) + b = append(b, '=') + b = append(b, tag.Value...) + } + canonicalPaths[i] = string(b) + + // Convert parsed metric and tags to labels. + labels = append(labels[:0], prompb.Label{ + Name: []byte("__name__"), + Value: []byte(row.Metric), + }) + for _, tag := range row.Tags { + labels = append(labels, prompb.Label{ + Name: []byte(tag.Key), + Value: []byte(tag.Value), + }) + } + + // Put labels with the current timestamp to MetricRow + mr := &mrs[i] + mr.MetricNameRaw = storage.MarshalMetricNameRaw(mr.MetricNameRaw[:0], labels) + mr.Timestamp = ct + } + if err := vmstorage.RegisterMetricNames(mrs); err != nil { + return fmt.Errorf("cannot register paths: %w", err) + } + + // Return response + contentType := "text/plain; charset=utf-8" + if isJSONResponse { + contentType = "application/json; charset=utf-8" + } + w.Header().Set("Content-Type", contentType) + WriteTagsTagMultiSeriesResponse(w, canonicalPaths, isJSONResponse) + if isJSONResponse { + tagsTagMultiSeriesDuration.UpdateDuration(startTime) + } else { + tagsTagSeriesDuration.UpdateDuration(startTime) + } + return nil +} + +var ( + tagsTagSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/tagSeries"}`) + tagsTagMultiSeriesDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/tags/tagMultiSeries"}`) +) + // TagsAutoCompleteValuesHandler implements /tags/autoComplete/values endpoint from Graphite Tags API. // // See https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support diff --git a/app/vminsert/graphite/tags_tag_multi_series_response.qtpl b/app/vmselect/graphite/tags_tag_multi_series_response.qtpl similarity index 100% rename from app/vminsert/graphite/tags_tag_multi_series_response.qtpl rename to app/vmselect/graphite/tags_tag_multi_series_response.qtpl diff --git a/app/vmselect/graphite/tags_tag_multi_series_response.qtpl.go b/app/vmselect/graphite/tags_tag_multi_series_response.qtpl.go new file mode 100644 index 000000000..d996a925d --- /dev/null +++ b/app/vmselect/graphite/tags_tag_multi_series_response.qtpl.go @@ -0,0 +1,75 @@ +// Code generated by qtc from "tags_tag_multi_series_response.qtpl". DO NOT EDIT. +// See https://github.com/valyala/quicktemplate for details. + +// TagsTagMultiSeriesResponse generates response for /tags/tagMultiSeries .See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb + +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:5 +package graphite + +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:5 +import ( + qtio422016 "io" + + qt422016 "github.com/valyala/quicktemplate" +) + +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:5 +var ( + _ = qtio422016.Copy + _ = qt422016.AcquireByteBuffer +) + +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:5 +func StreamTagsTagMultiSeriesResponse(qw422016 *qt422016.Writer, canonicalPaths []string, isJSONResponse bool) { +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:6 + if isJSONResponse { +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:6 + qw422016.N().S(`[`) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:6 + } +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:7 + for i, path := range canonicalPaths { +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:8 + qw422016.N().Q(path) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:9 + if i+1 < len(canonicalPaths) { +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:9 + qw422016.N().S(`,`) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:9 + } +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:10 + } +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:11 + if isJSONResponse { +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:11 + qw422016.N().S(`]`) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:11 + } +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 +} + +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 +func WriteTagsTagMultiSeriesResponse(qq422016 qtio422016.Writer, canonicalPaths []string, isJSONResponse bool) { +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 + qw422016 := qt422016.AcquireWriter(qq422016) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 + StreamTagsTagMultiSeriesResponse(qw422016, canonicalPaths, isJSONResponse) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 + qt422016.ReleaseWriter(qw422016) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 +} + +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 +func TagsTagMultiSeriesResponse(canonicalPaths []string, isJSONResponse bool) string { +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 + qb422016 := qt422016.AcquireByteBuffer() +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 + WriteTagsTagMultiSeriesResponse(qb422016, canonicalPaths, isJSONResponse) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 + qs422016 := string(qb422016.B) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 + qt422016.ReleaseByteBuffer(qb422016) +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 + return qs422016 +//line app/vmselect/graphite/tags_tag_multi_series_response.qtpl:12 +} diff --git a/app/vmselect/main.go b/app/vmselect/main.go index cc8aab099..ff7a9d0da 100644 --- a/app/vmselect/main.go +++ b/app/vmselect/main.go @@ -269,6 +269,22 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { return true } return true + case "/tags/tagSeries": + graphiteTagsTagSeriesRequests.Inc() + if err := graphite.TagsTagSeriesHandler(startTime, w, r); err != nil { + graphiteTagsTagSeriesErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true + case "/tags/tagMultiSeries": + graphiteTagsTagMultiSeriesRequests.Inc() + if err := graphite.TagsTagMultiSeriesHandler(startTime, w, r); err != nil { + graphiteTagsTagMultiSeriesErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true case "/tags": graphiteTagsRequests.Inc() if err := graphite.TagsHandler(startTime, w, r); err != nil { @@ -416,6 +432,12 @@ var ( graphiteMetricsIndexRequests = metrics.NewCounter(`vm_http_requests_total{path="/metrics/index.json"}`) graphiteMetricsIndexErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/metrics/index.json"}`) + graphiteTagsTagSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/tagSeries"}`) + graphiteTagsTagSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/tagSeries"}`) + + graphiteTagsTagMultiSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/tagMultiSeries"}`) + graphiteTagsTagMultiSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/tagMultiSeries"}`) + graphiteTagsRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags"}`) graphiteTagsErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags"}`) From f0c207fae2a91b522ad4202c2cb4900c62c19dd8 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 23 Nov 2020 15:26:20 +0200 Subject: [PATCH 03/21] app/vmselect: add `/tags/delSeries` handler from Graphite Tags API See https://graphite.readthedocs.io/en/stable/tags.html#removing-series-from-the-tagdb --- README.md | 1 + app/vmselect/graphite/tags_api.go | 46 +++++++++++++++++++++++++++ app/vmselect/main.go | 11 +++++++ docs/Single-server-VictoriaMetrics.md | 1 + 4 files changed, 59 insertions(+) diff --git a/README.md b/README.md index 85675eb54..ff7fb3fc2 100644 --- a/README.md +++ b/README.md @@ -566,6 +566,7 @@ VictoriaMetrics supports the following handlers from [Graphite Tags API](https:/ * [/tags/findSeries](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags) * [/tags/autoComplete/tags](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support) * [/tags/autoComplete/values](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support) +* [/tags/delSeries](https://graphite.readthedocs.io/en/stable/tags.html#removing-series-from-the-tagdb) ## How to build from sources diff --git a/app/vmselect/graphite/tags_api.go b/app/vmselect/graphite/tags_api.go index 9591df4bc..ba46aeb2d 100644 --- a/app/vmselect/graphite/tags_api.go +++ b/app/vmselect/graphite/tags_api.go @@ -19,6 +19,52 @@ import ( "github.com/VictoriaMetrics/metrics" ) +// TagsDelSeriesHandler implements /tags/delSeries handler. +// +// See https://graphite.readthedocs.io/en/stable/tags.html#removing-series-from-the-tagdb +func TagsDelSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) error { + if err := r.ParseForm(); err != nil { + return fmt.Errorf("cannot parse form values: %w", err) + } + paths := r.Form["path"] + totalDeleted := 0 + var row graphiteparser.Row + var tagsPool []graphiteparser.Tag + ct := time.Now().UnixNano() / 1e6 + for _, path := range paths { + var err error + tagsPool, err = row.UnmarshalMetricAndTags(path, tagsPool[:0]) + if err != nil { + return fmt.Errorf("cannot parse path=%q: %w", path, err) + } + tfs := make([]storage.TagFilter, 0, 1+len(row.Tags)) + tfs = append(tfs, storage.TagFilter{ + Key: nil, + Value: []byte(row.Metric), + }) + for _, tag := range row.Tags { + tfs = append(tfs, storage.TagFilter{ + Key: []byte(tag.Key), + Value: []byte(tag.Value), + }) + } + sq := storage.NewSearchQuery(0, ct, [][]storage.TagFilter{tfs}) + n, err := netstorage.DeleteSeries(sq) + if err != nil { + return fmt.Errorf("cannot delete series for %q: %w", sq, err) + } + totalDeleted += n + } + + w.Header().Set("Content-Type", "application/json; charset=utf-8") + if totalDeleted > 0 { + fmt.Fprintf(w, "true") + } else { + fmt.Fprintf(w, "false") + } + return nil +} + // TagsTagSeriesHandler implements /tags/tagSeries handler. // // See https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb diff --git a/app/vmselect/main.go b/app/vmselect/main.go index ff7a9d0da..57dee7a9f 100644 --- a/app/vmselect/main.go +++ b/app/vmselect/main.go @@ -319,6 +319,14 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { return true } return true + case "/tags/delSeries": + graphiteTagsDelSeriesRequests.Inc() + if err := graphite.TagsDelSeriesHandler(startTime, w, r); err != nil { + graphiteTagsDelSeriesErrors.Inc() + httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) + return true + } + return true case "/api/v1/rules": // Return dumb placeholder rulesRequests.Inc() @@ -453,6 +461,9 @@ var ( graphiteTagsAutoCompleteValuesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/autoComplete/values"}`) graphiteTagsAutoCompleteValuesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/autoComplete/values"}`) + graphiteTagsDelSeriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/tags/delSeries"}`) + graphiteTagsDelSeriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/tags/delSeries"}`) + rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/rules"}`) alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/alerts"}`) metadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/v1/metadata"}`) diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index 85675eb54..ff7fb3fc2 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -566,6 +566,7 @@ VictoriaMetrics supports the following handlers from [Graphite Tags API](https:/ * [/tags/findSeries](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags) * [/tags/autoComplete/tags](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support) * [/tags/autoComplete/values](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support) +* [/tags/delSeries](https://graphite.readthedocs.io/en/stable/tags.html#removing-series-from-the-tagdb) ## How to build from sources From fb8e56d8a21f7fd0d53db6843aa0ec7e542e4656 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 23 Nov 2020 15:32:49 +0200 Subject: [PATCH 04/21] docs/Cluster-VictoriaMetrics.md: sync with cluster branch --- docs/Cluster-VictoriaMetrics.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/Cluster-VictoriaMetrics.md b/docs/Cluster-VictoriaMetrics.md index fed3173d3..090601648 100644 --- a/docs/Cluster-VictoriaMetrics.md +++ b/docs/Cluster-VictoriaMetrics.md @@ -205,11 +205,14 @@ or [an alternative dashboard for VictoriaMetrics cluster](https://grafana.com/gr - `metrics/find` - searches Graphite metrics. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-find). - `metrics/expand` - expands Graphite metrics. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-expand). - `metrics/index.json` - returns all the metric names. See [these docs](https://graphite-api.readthedocs.io/en/latest/api.html#metrics-index-json). + - `tags/tagSeries` - registers time series. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb). + - `tags/tagMultiSeries` - register multiple time series. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#adding-series-to-the-tagdb). - `tags` - returns tag names. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags). - `tags/` - returns tag values for the given ``. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags). - `tags/findSeries` - returns series matching the given `expr`. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#exploring-tags). - `tags/autoComplete/tags` - returns tags matching the given `tagPrefix` and/or `expr`. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support). - `tags/autoComplete/values` - returns tag values matching the given `valuePrefix` and/or `expr`. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#auto-complete-support). + - `tags/delSeries` - deletes series matching the given `path`. See [these docs](https://graphite.readthedocs.io/en/stable/tags.html#removing-series-from-the-tagdb). * URL for time series deletion: `http://:8481/delete//prometheus/api/v1/admin/tsdb/delete_series?match[]=`. Note that the `delete_series` handler should be used only in exceptional cases such as deletion of accidentally ingested incorrect time series. It shouldn't From a724dde90a3da4ec84366ef622de0deeb9a51e2b Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 23 Nov 2020 15:35:59 +0200 Subject: [PATCH 05/21] app/vmselect: protect `/tags/delSeries` with `-deleteAuthKey` in the same way as `/api/v1/admin/tsdb/delete_series` --- app/vmselect/main.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/app/vmselect/main.go b/app/vmselect/main.go index 57dee7a9f..3ec78f12e 100644 --- a/app/vmselect/main.go +++ b/app/vmselect/main.go @@ -23,7 +23,7 @@ import ( ) var ( - deleteAuthKey = flag.String("deleteAuthKey", "", "authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series") + deleteAuthKey = flag.String("deleteAuthKey", "", "authKey for metrics' deletion via /api/v1/admin/tsdb/delete_series and /tags/delSeries") maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", getDefaultMaxConcurrentRequests(), "The maximum number of concurrent search requests. "+ "It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration") maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the request waits for execution when -search.maxConcurrentRequests "+ @@ -321,6 +321,11 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { return true case "/tags/delSeries": graphiteTagsDelSeriesRequests.Inc() + authKey := r.FormValue("authKey") + if authKey != *deleteAuthKey { + httpserver.Errorf(w, r, "invalid authKey %q. It must match the value from -deleteAuthKey command line flag", authKey) + return true + } if err := graphite.TagsDelSeriesHandler(startTime, w, r); err != nil { graphiteTagsDelSeriesErrors.Inc() httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err) From de523c81b9789ba182d4287dea80661a46f1438d Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 23 Nov 2020 16:26:52 +0200 Subject: [PATCH 06/21] lib/promscrape: add `filters` option to `dockerswarm_sd_config` like Prometheus did in v2.23.0 --- docs/CHANGELOG.md | 1 + lib/promscrape/discovery/dockerswarm/api.go | 43 ++++++++++++++++++- .../discovery/dockerswarm/api_test.go | 26 +++++++++++ .../discovery/dockerswarm/dockerswarm.go | 14 ++++-- .../discovery/dockerswarm/network.go | 2 +- lib/promscrape/discovery/dockerswarm/nodes.go | 2 +- .../discovery/dockerswarm/services.go | 2 +- lib/promscrape/discovery/dockerswarm/tasks.go | 2 +- 8 files changed, 84 insertions(+), 8 deletions(-) create mode 100644 lib/promscrape/discovery/dockerswarm/api_test.go diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 44de82a62..4dccfff4d 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -11,6 +11,7 @@ * FEATURE: vminsert: export `vm_rpc_vmstorage_is_reachable` metric, which can be used for monitoring reachability of vmstorage nodes from vminsert nodes. * FEATURE: vmagent: add Netflix Eureka service discovery (aka [eureka_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config)). See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/851 +* FEATURE: add `filters` option to `dockerswarm_sd_config` like Prometheus did in v2.23.0 - see https://github.com/prometheus/prometheus/pull/8074 * FEATURE: add `-loggerWarnsPerSecondLimit` command-line flag for rate limiting of WARN messages in logs. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/905 * FEATURE: apply `loggerErrorsPerSecondLimit` and `-loggerWarnsPerSecondLimit` rate limit per caller. I.e. log messages are suppressed if the same caller logs the same message at the rate exceeding the given limit. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/905#issuecomment-729395855 diff --git a/lib/promscrape/discovery/dockerswarm/api.go b/lib/promscrape/discovery/dockerswarm/api.go index 25dffe207..3853600a4 100644 --- a/lib/promscrape/discovery/dockerswarm/api.go +++ b/lib/promscrape/discovery/dockerswarm/api.go @@ -1,8 +1,12 @@ package dockerswarm import ( + "encoding/json" "fmt" + "net/url" + "strings" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth" "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils" ) @@ -12,6 +16,9 @@ var configMap = discoveryutils.NewConfigMap() type apiConfig struct { client *discoveryutils.Client port int + + // filtersQueryArg contains escaped `filters` query arg to add to each request to Docker Swarm API. + filtersQueryArg string } func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { @@ -24,7 +31,8 @@ func getAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { cfg := &apiConfig{ - port: sdc.Port, + port: sdc.Port, + filtersQueryArg: getFiltersQueryArg(sdc.Filters), } ac, err := promauth.NewConfig(baseDir, sdc.BasicAuth, sdc.BearerToken, sdc.BearerTokenFile, sdc.TLSConfig) if err != nil { @@ -37,3 +45,36 @@ func newAPIConfig(sdc *SDConfig, baseDir string) (*apiConfig, error) { cfg.client = client return cfg, nil } + +func (cfg *apiConfig) getAPIResponse(path string) ([]byte, error) { + if len(cfg.filtersQueryArg) > 0 { + separator := "?" + if strings.Contains(path, "?") { + separator = "&" + } + path += separator + "filters=" + cfg.filtersQueryArg + } + return cfg.client.GetAPIResponse(path) +} + +func getFiltersQueryArg(filters []Filter) string { + if len(filters) == 0 { + return "" + } + m := make(map[string]map[string]bool) + for _, f := range filters { + x := m[f.Name] + if x == nil { + x = make(map[string]bool) + m[f.Name] = x + } + for _, value := range f.Values { + x[value] = true + } + } + buf, err := json.Marshal(m) + if err != nil { + logger.Panicf("BUG: unexpected error in json.Marshal: %s", err) + } + return url.QueryEscape(string(buf)) +} diff --git a/lib/promscrape/discovery/dockerswarm/api_test.go b/lib/promscrape/discovery/dockerswarm/api_test.go new file mode 100644 index 000000000..fe36a75a2 --- /dev/null +++ b/lib/promscrape/discovery/dockerswarm/api_test.go @@ -0,0 +1,26 @@ +package dockerswarm + +import ( + "testing" +) + +func TestGetFiltersQueryArg(t *testing.T) { + f := func(filters []Filter, queryArgExpected string) { + t.Helper() + queryArg := getFiltersQueryArg(filters) + if queryArg != queryArgExpected { + t.Fatalf("unexpected query arg; got %s; want %s", queryArg, queryArgExpected) + } + } + f(nil, "") + f([]Filter{ + { + Name: "name", + Values: []string{"foo", "bar"}, + }, + { + Name: "xxx", + Values: []string{"aa"}, + }, + }, "%7B%22name%22%3A%7B%22bar%22%3Atrue%2C%22foo%22%3Atrue%7D%2C%22xxx%22%3A%7B%22aa%22%3Atrue%7D%7D") +} diff --git a/lib/promscrape/discovery/dockerswarm/dockerswarm.go b/lib/promscrape/discovery/dockerswarm/dockerswarm.go index 3d49365a3..61b4b02f6 100644 --- a/lib/promscrape/discovery/dockerswarm/dockerswarm.go +++ b/lib/promscrape/discovery/dockerswarm/dockerswarm.go @@ -10,17 +10,25 @@ import ( // // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#dockerswarm_sd_config type SDConfig struct { - Host string `yaml:"host"` + Host string `yaml:"host"` + Role string `yaml:"role"` + Port int `yaml:"port,omitempty"` + Filters []Filter `yaml:"filters,omitempty"` + // TODO: add support for proxy_url TLSConfig *promauth.TLSConfig `yaml:"tls_config,omitempty"` - Role string `yaml:"role"` - Port int `yaml:"port,omitempty"` // refresh_interval is obtained from `-promscrape.dockerswarmSDCheckInterval` command-line option BasicAuth *promauth.BasicAuthConfig `yaml:"basic_auth,omitempty"` BearerToken string `yaml:"bearer_token,omitempty"` BearerTokenFile string `yaml:"bearer_token_file,omitempty"` } +// Filter is a filter, which can be passed to SDConfig. +type Filter struct { + Name string `yaml:"name"` + Values []string `yaml:"values"` +} + // GetLabels returns dockerswarm labels according to sdc. func GetLabels(sdc *SDConfig, baseDir string) ([]map[string]string, error) { cfg, err := getAPIConfig(sdc, baseDir) diff --git a/lib/promscrape/discovery/dockerswarm/network.go b/lib/promscrape/discovery/dockerswarm/network.go index bcf21dd0a..27bb748cb 100644 --- a/lib/promscrape/discovery/dockerswarm/network.go +++ b/lib/promscrape/discovery/dockerswarm/network.go @@ -27,7 +27,7 @@ func getNetworksLabelsByNetworkID(cfg *apiConfig) (map[string]map[string]string, } func getNetworks(cfg *apiConfig) ([]network, error) { - resp, err := cfg.client.GetAPIResponse("/networks") + resp, err := cfg.getAPIResponse("/networks") if err != nil { return nil, fmt.Errorf("cannot query dockerswarm api for networks: %w", err) } diff --git a/lib/promscrape/discovery/dockerswarm/nodes.go b/lib/promscrape/discovery/dockerswarm/nodes.go index d5eec44cc..c6db715f2 100644 --- a/lib/promscrape/discovery/dockerswarm/nodes.go +++ b/lib/promscrape/discovery/dockerswarm/nodes.go @@ -46,7 +46,7 @@ func getNodesLabels(cfg *apiConfig) ([]map[string]string, error) { } func getNodes(cfg *apiConfig) ([]node, error) { - resp, err := cfg.client.GetAPIResponse("/nodes") + resp, err := cfg.getAPIResponse("/nodes") if err != nil { return nil, fmt.Errorf("cannot query dockerswarm api for nodes: %w", err) } diff --git a/lib/promscrape/discovery/dockerswarm/services.go b/lib/promscrape/discovery/dockerswarm/services.go index 147c610cc..29e4f0dcb 100644 --- a/lib/promscrape/discovery/dockerswarm/services.go +++ b/lib/promscrape/discovery/dockerswarm/services.go @@ -59,7 +59,7 @@ func getServicesLabels(cfg *apiConfig) ([]map[string]string, error) { } func getServices(cfg *apiConfig) ([]service, error) { - data, err := cfg.client.GetAPIResponse("/services") + data, err := cfg.getAPIResponse("/services") if err != nil { return nil, fmt.Errorf("cannot query dockerswarm api for services: %w", err) } diff --git a/lib/promscrape/discovery/dockerswarm/tasks.go b/lib/promscrape/discovery/dockerswarm/tasks.go index e8724c2cb..ecf69e4a5 100644 --- a/lib/promscrape/discovery/dockerswarm/tasks.go +++ b/lib/promscrape/discovery/dockerswarm/tasks.go @@ -58,7 +58,7 @@ func getTasksLabels(cfg *apiConfig) ([]map[string]string, error) { } func getTasks(cfg *apiConfig) ([]task, error) { - resp, err := cfg.client.GetAPIResponse("/tasks") + resp, err := cfg.getAPIResponse("/tasks") if err != nil { return nil, fmt.Errorf("cannot query dockerswarm api for tasks: %w", err) } From aa90b93778e96079e84460aac2f829ac0263a597 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 23 Nov 2020 16:52:53 +0200 Subject: [PATCH 07/21] lib/promscrape: expose `__meta_ec2_ipv6_addresses` label for `ec2_sd_config` like Prometheus will do in the next release --- docs/CHANGELOG.md | 1 + lib/promscrape/discovery/dockerswarm/api_test.go | 4 ++-- lib/promscrape/discovery/ec2/instance.go | 16 ++++++++++++++-- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 4dccfff4d..66d91bd55 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -12,6 +12,7 @@ * FEATURE: vmagent: add Netflix Eureka service discovery (aka [eureka_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#eureka_sd_config)). See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/851 * FEATURE: add `filters` option to `dockerswarm_sd_config` like Prometheus did in v2.23.0 - see https://github.com/prometheus/prometheus/pull/8074 +* FEATURE: expose `__meta_ec2_ipv6_addresses` label for `ec2_sd_config` like Prometheus will do in the next release. * FEATURE: add `-loggerWarnsPerSecondLimit` command-line flag for rate limiting of WARN messages in logs. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/905 * FEATURE: apply `loggerErrorsPerSecondLimit` and `-loggerWarnsPerSecondLimit` rate limit per caller. I.e. log messages are suppressed if the same caller logs the same message at the rate exceeding the given limit. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/905#issuecomment-729395855 diff --git a/lib/promscrape/discovery/dockerswarm/api_test.go b/lib/promscrape/discovery/dockerswarm/api_test.go index fe36a75a2..fc8888ee5 100644 --- a/lib/promscrape/discovery/dockerswarm/api_test.go +++ b/lib/promscrape/discovery/dockerswarm/api_test.go @@ -15,11 +15,11 @@ func TestGetFiltersQueryArg(t *testing.T) { f(nil, "") f([]Filter{ { - Name: "name", + Name: "name", Values: []string{"foo", "bar"}, }, { - Name: "xxx", + Name: "xxx", Values: []string{"aa"}, }, }, "%7B%22name%22%3A%7B%22bar%22%3Atrue%2C%22foo%22%3Atrue%7D%2C%22xxx%22%3A%7B%22aa%22%3Atrue%7D%7D") diff --git a/lib/promscrape/discovery/ec2/instance.go b/lib/promscrape/discovery/ec2/instance.go index 7dbf6203a..45ccfb0cf 100644 --- a/lib/promscrape/discovery/ec2/instance.go +++ b/lib/promscrape/discovery/ec2/instance.go @@ -104,7 +104,13 @@ type NetworkInterfaceSet struct { // NetworkInterface represents NetworkInterface from https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceNetworkInterface.html type NetworkInterface struct { - SubnetID string `xml:"subnetId"` + SubnetID string `xml:"subnetId"` + IPv6AddressesSet Ipv6AddressesSet `xml:"ipv6AddressesSet"` +} + +// Ipv6AddressesSet represents ipv6AddressesSet from https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceNetworkInterface.html +type Ipv6AddressesSet struct { + Items []string `xml:"item"` } // TagSet represents TagSet from https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_Instance.html @@ -151,21 +157,27 @@ func (inst *Instance) appendTargetLabels(ms []map[string]string, ownerID string, "__meta_ec2_vpc_id": inst.VPCID, } if len(inst.VPCID) > 0 { - // Deduplicate VPC Subnet IDs maintaining the order of the network interfaces returned by EC2. subnets := make([]string, 0, len(inst.NetworkInterfaceSet.Items)) seenSubnets := make(map[string]bool, len(inst.NetworkInterfaceSet.Items)) + var ipv6Addrs []string for _, ni := range inst.NetworkInterfaceSet.Items { if len(ni.SubnetID) == 0 { continue } + // Deduplicate VPC Subnet IDs maintaining the order of the network interfaces returned by EC2. if !seenSubnets[ni.SubnetID] { seenSubnets[ni.SubnetID] = true subnets = append(subnets, ni.SubnetID) } + // Collect ipv6 addresses + ipv6Addrs = append(ipv6Addrs, ni.IPv6AddressesSet.Items...) } // We surround the separated list with the separator as well. This way regular expressions // in relabeling rules don't have to consider tag positions. m["__meta_ec2_subnet_id"] = "," + strings.Join(subnets, ",") + "," + if len(ipv6Addrs) > 0 { + m["__meta_ec2_ipv6_addresses"] = "," + strings.Join(ipv6Addrs, ",") + "," + } } for _, t := range inst.TagSet.Items { if len(t.Key) == 0 || len(t.Value) == 0 { From 7e8dcf9ddc87700df94a2d7c77a7ce1ca7d9e35e Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 23 Nov 2020 17:09:59 +0200 Subject: [PATCH 08/21] app/vmbackup: cosmetic fixes --- app/vmbackup/main.go | 8 ++++---- app/vmbackup/snapshot/snapshot.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/app/vmbackup/main.go b/app/vmbackup/main.go index bbaa2a77a..550e3fde8 100644 --- a/app/vmbackup/main.go +++ b/app/vmbackup/main.go @@ -43,7 +43,7 @@ func main() { cgroup.UpdateGOMAXPROCSToCPUQuota() if len(*snapshotCreateURL) > 0 { - logger.Infof("%s", "Snapshots enabled") + logger.Infof("Snapshots enabled") logger.Infof("Snapshot create url %s", *snapshotCreateURL) if len(*snapshotDeleteURL) <= 0 { err := flag.Set("snapshot.deleteURL", strings.Replace(*snapshotCreateURL, "/create", "/delete", 1)) @@ -55,17 +55,17 @@ func main() { name, err := snapshot.Create(*snapshotCreateURL) if err != nil { - logger.Fatalf("%s", err) + logger.Fatalf("cannot create snapshot: %s", err) } err = flag.Set("snapshotName", name) if err != nil { - logger.Fatalf("Failed to set snapshotName flag: %v", err) + logger.Fatalf("cannot set snapshotName flag: %v", err) } defer func() { err := snapshot.Delete(*snapshotDeleteURL, name) if err != nil { - logger.Fatalf("%s", err) + logger.Fatalf("cannot delete snapshot: %s", err) } }() } diff --git a/app/vmbackup/snapshot/snapshot.go b/app/vmbackup/snapshot/snapshot.go index 852c0b3b4..774db2c6f 100644 --- a/app/vmbackup/snapshot/snapshot.go +++ b/app/vmbackup/snapshot/snapshot.go @@ -20,7 +20,7 @@ type snapshot struct { // Create creates a snapshot and the provided api endpoint and returns // the snapshot name func Create(createSnapshotURL string) (string, error) { - logger.Infof("%s", "Creating snapshot") + logger.Infof("Creating snapshot") u, err := url.Parse(createSnapshotURL) if err != nil { return "", err From 0acdab3ab9f475537d4b940d28e9a76baacd032f Mon Sep 17 00:00:00 2001 From: BigFish <736759290@qq.com> Date: Mon, 23 Nov 2020 23:33:17 +0800 Subject: [PATCH 09/21] Update main.go (#922) fix spelling mistake --- app/vminsert/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/vminsert/main.go b/app/vminsert/main.go index e47132551..3bfd7f2c3 100644 --- a/app/vminsert/main.go +++ b/app/vminsert/main.go @@ -40,7 +40,7 @@ var ( "Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. "+ "Usually :4242 must be set. Doesn't work if empty") opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty") - maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superflouos labels are dropped") + maxLabelsPerTimeseries = flag.Int("maxLabelsPerTimeseries", 30, "The maximum number of labels accepted per time series. Superfluous labels are dropped") ) var ( From d48363534afcda3e2583210cf86f334a6ec16712 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 24 Nov 2020 12:03:23 +0200 Subject: [PATCH 10/21] docs/Articles.md: add recent articles about VictoriaMetrics --- docs/Articles.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/Articles.md b/docs/Articles.md index 68530972b..39c48b2d9 100644 --- a/docs/Articles.md +++ b/docs/Articles.md @@ -19,6 +19,7 @@ * [Calculating the Error of Quantile Estimation with Histograms](https://linuxczar.net/blog/2020/08/13/histogram-error/) * [Monitoring private clouds with VictoriaMetrics at LeroyMerlin](https://www.youtube.com/watch?v=74swsWqf0Uc) * [Monitoring Kubernetes with VictoriaMetrics+Prometheus](https://speakerdeck.com/bo0km4n/victoriametrics-plus-prometheusdegou-zhu-surufu-shu-kubernetesfalsejian-shi-ji-pan) +* [High-performance Graphite storage solution on top of VictoriaMetrics](https://golangexample.com/a-high-performance-graphite-storage-solution/) ## Our articles @@ -48,3 +49,4 @@ * [Filtering and modifying time series during import to VictoriaMetrics](https://medium.com/@romanhavronenko/victoriametrics-how-to-migrate-data-from-prometheus-filtering-and-modifying-time-series-6d40cea4bf21) * [Anomaly Detection in VictoriaMetrics](https://medium.com/@VictoriaMetrics/anomaly-detection-in-victoriametrics-9528538786a7) * [How to use relabeling in Prometheus and VictoriaMetrics](https://valyala.medium.com/how-to-use-relabeling-in-prometheus-and-victoriametrics-8b90fc22c4b2) +* [First look at performance comparison between InfluxDB IOx and VictoriaMetrics](https://medium.com/@VictoriaMetrics/first-look-at-perfomance-comparassion-between-influxdb-iox-and-victoriametrics-e590f847935b) From b7f4fc6e0da03661baf79a401f1a5d98bda7f572 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 24 Nov 2020 12:26:17 +0200 Subject: [PATCH 11/21] lib/protoparser/prometheus: properly parse metrics with exemplars Examplars have been introduced in OpenMetrics - see https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#exemplars-1 Previously VictoriaMetrics couldn't parse the following metric foo{bar="baz"} 123 # exemplar here This commit fixes this. Note that VictoriaMetrics ignores the exemplar as for now. --- docs/CHANGELOG.md | 2 ++ lib/protoparser/prometheus/parser.go | 15 ++++++++++- lib/protoparser/prometheus/parser_test.go | 31 +++++++++++++++++++++++ 3 files changed, 47 insertions(+), 1 deletion(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 66d91bd55..fd8ea4f26 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -19,6 +19,8 @@ See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/851 * FEATURE: add remoteAddr to slow query log in order to simplify identifying the client that sends slow queries to VictoriaMetrics. Slow query logging is controlled with `-search.logSlowQueryDuration` command-line flag. +* BUGFIX: properly parse Prometheus metrics with [exemplars](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#exemplars-1) such as `foo 123 # {bar="baz"} 1`. + # [v1.47.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.47.0) diff --git a/lib/protoparser/prometheus/parser.go b/lib/protoparser/prometheus/parser.go index 4972c7bd6..c7381f2bc 100644 --- a/lib/protoparser/prometheus/parser.go +++ b/lib/protoparser/prometheus/parser.go @@ -70,6 +70,14 @@ func (r *Row) reset() { r.Timestamp = 0 } +func skipTrailingComment(s string) string { + n := strings.IndexByte(s, '#') + if n < 0 { + return s + } + return s[:n] +} + func skipLeadingWhitespace(s string) string { // Prometheus treats ' ' and '\t' as whitespace // according to https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-format-details @@ -133,6 +141,7 @@ func (r *Row) unmarshal(s string, tagsPool []Tag, noEscapes bool) ([]Tag, error) return tagsPool, fmt.Errorf("metric cannot be empty") } s = skipLeadingWhitespace(s) + s = skipTrailingComment(s) if len(s) == 0 { return tagsPool, fmt.Errorf("value cannot be empty") } @@ -151,12 +160,16 @@ func (r *Row) unmarshal(s string, tagsPool []Tag, noEscapes bool) ([]Tag, error) if err != nil { return tagsPool, fmt.Errorf("cannot parse value %q: %w", s[:n], err) } + r.Value = v s = skipLeadingWhitespace(s[n+1:]) + if len(s) == 0 { + // There is no timestamp - just a whitespace after the value. + return tagsPool, nil + } ts, err := fastfloat.ParseInt64(s) if err != nil { return tagsPool, fmt.Errorf("cannot parse timestamp %q: %w", s, err) } - r.Value = v r.Timestamp = ts return tagsPool, nil } diff --git a/lib/protoparser/prometheus/parser_test.go b/lib/protoparser/prometheus/parser_test.go index dafa3f447..f864dd9a1 100644 --- a/lib/protoparser/prometheus/parser_test.go +++ b/lib/protoparser/prometheus/parser_test.go @@ -202,6 +202,37 @@ cassandra_token_ownership_ratio 78.9`, &Rows{ }}, }) + // Exemplars - see https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#exemplars-1 + f(`foo_bucket{le="10",a="#b"} 17 # {trace_id="oHg5SJ#YRHA0"} 9.8 1520879607.789 + abc 123 456#foobar + foo 344#bar`, &Rows{ + Rows: []Row{ + { + Metric: "foo_bucket", + Tags: []Tag{ + { + Key: "le", + Value: "10", + }, + { + Key: "a", + Value: "#b", + }, + }, + Value: 17, + }, + { + Metric: "abc", + Value: 123, + Timestamp: 456, + }, + { + Metric: "foo", + Value: 344, + }, + }, + }) + // Timestamp bigger than 1<<31 f("aaa 1123 429496729600", &Rows{ Rows: []Row{{ From d0ffb49ee2c6a65046a4f8c3543da098c6c9295c Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 24 Nov 2020 12:29:24 +0200 Subject: [PATCH 12/21] docs/CHANGELOG.md: mention that `/tags/delSeries` handler is supported after f0c207fae2a91b522ad4202c2cb4900c62c19dd8 --- docs/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index fd8ea4f26..a9a318bbb 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -18,6 +18,7 @@ See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/851 at the rate exceeding the given limit. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/905#issuecomment-729395855 * FEATURE: add remoteAddr to slow query log in order to simplify identifying the client that sends slow queries to VictoriaMetrics. Slow query logging is controlled with `-search.logSlowQueryDuration` command-line flag. +* FEATURE: add `/tags/delSeries` handler from Graphite Tags API. See https://victoriametrics.github.io/#graphite-tags-api-usage * BUGFIX: properly parse Prometheus metrics with [exemplars](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#exemplars-1) such as `foo 123 # {bar="baz"} 1`. From 78d2715d04bc2224546ee3047b4fe4ae82613289 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 24 Nov 2020 12:41:34 +0200 Subject: [PATCH 13/21] all: spelling fix: superflouos->superfluous. This is a follow-up for 0acdab3ab9f475537d4b940d28e9a76baacd032f --- lib/mergeset/part.go | 4 ++-- lib/mergeset/table_search_test.go | 2 +- lib/protoparser/csvimport/parser.go | 2 +- lib/protoparser/csvimport/parser_test.go | 2 +- lib/storage/dedup_test.go | 4 ++-- lib/storage/metric_name.go | 2 +- lib/storage/part.go | 2 +- 7 files changed, 9 insertions(+), 9 deletions(-) diff --git a/lib/mergeset/part.go b/lib/mergeset/part.go index 0ead8d97c..ed4d41c9c 100644 --- a/lib/mergeset/part.go +++ b/lib/mergeset/part.go @@ -245,7 +245,7 @@ func (idxbc *indexBlockCache) Get(k uint64) *indexBlock { func (idxbc *indexBlockCache) Put(k uint64, idxb *indexBlock) { idxbc.mu.Lock() - // Remove superflouos entries. + // Remove superfluous entries. if overflow := len(idxbc.m) - getMaxCachedIndexBlocksPerPart(); overflow > 0 { // Remove 10% of items from the cache. overflow = int(float64(len(idxbc.m)) * 0.1) @@ -393,7 +393,7 @@ func (ibc *inmemoryBlockCache) Get(k inmemoryBlockCacheKey) *inmemoryBlock { func (ibc *inmemoryBlockCache) Put(k inmemoryBlockCacheKey, ib *inmemoryBlock) { ibc.mu.Lock() - // Clean superflouos entries in cache. + // Clean superfluous entries in cache. if overflow := len(ibc.m) - getMaxCachedInmemoryBlocksPerPart(); overflow > 0 { // Remove 10% of items from the cache. overflow = int(float64(len(ibc.m)) * 0.1) diff --git a/lib/mergeset/table_search_test.go b/lib/mergeset/table_search_test.go index 56de4794e..18d769e21 100644 --- a/lib/mergeset/table_search_test.go +++ b/lib/mergeset/table_search_test.go @@ -136,7 +136,7 @@ func testTableSearchSerial(tb *Table, items []string) error { n++ } if ts.NextItem() { - return fmt.Errorf("superflouos item found at position %d when searching for %q: %q", n, key, ts.Item) + return fmt.Errorf("superfluous item found at position %d when searching for %q: %q", n, key, ts.Item) } if err := ts.Error(); err != nil { return fmt.Errorf("unexpected error when searching for %q: %w", key, err) diff --git a/lib/protoparser/csvimport/parser.go b/lib/protoparser/csvimport/parser.go index 3e5126c1e..004d581b6 100644 --- a/lib/protoparser/csvimport/parser.go +++ b/lib/protoparser/csvimport/parser.go @@ -83,7 +83,7 @@ func parseRows(sc *scanner, dst []Row, tags []Tag, metrics []metric, cds []Colum tagsLen := len(tags) for sc.NextColumn() { if col >= uint(len(cds)) { - // Skip superflouous column. + // Skip superfluous column. continue } cd := &cds[col] diff --git a/lib/protoparser/csvimport/parser_test.go b/lib/protoparser/csvimport/parser_test.go index 086588bbd..ea1fbcf2b 100644 --- a/lib/protoparser/csvimport/parser_test.go +++ b/lib/protoparser/csvimport/parser_test.go @@ -165,7 +165,7 @@ func TestRowsUnmarshalSuccess(t *testing.T) { }, }) - // Superflouos columns + // Superfluous columns f("1:metric:foo", `123,456,foo,bar`, []Row{ { Metric: "foo", diff --git a/lib/storage/dedup_test.go b/lib/storage/dedup_test.go index 9ac7ba0d5..8c7fd2348 100644 --- a/lib/storage/dedup_test.go +++ b/lib/storage/dedup_test.go @@ -44,7 +44,7 @@ func TestDeduplicateSamples(t *testing.T) { } } if j != len(timestampsCopy) { - t.Fatalf("superflouos timestamps found starting from index %d: %v", j, timestampsCopy[j:]) + t.Fatalf("superfluous timestamps found starting from index %d: %v", j, timestampsCopy[j:]) } } f(time.Millisecond, nil, []int64{}) @@ -94,7 +94,7 @@ func TestDeduplicateSamplesDuringMerge(t *testing.T) { } } if j != len(timestampsCopy) { - t.Fatalf("superflouos timestamps found starting from index %d: %v", j, timestampsCopy[j:]) + t.Fatalf("superfluous timestamps found starting from index %d: %v", j, timestampsCopy[j:]) } } f(time.Millisecond, nil, []int64{}) diff --git a/lib/storage/metric_name.go b/lib/storage/metric_name.go index f70b83c2a..cb2c41399 100644 --- a/lib/storage/metric_name.go +++ b/lib/storage/metric_name.go @@ -419,7 +419,7 @@ var maxLabelsPerTimeseries = 30 // SetMaxLabelsPerTimeseries sets the limit on the number of labels // per each time series. // -// Superfouos labels are dropped. +// Superfluous labels are dropped. func SetMaxLabelsPerTimeseries(maxLabels int) { if maxLabels <= 0 { logger.Panicf("BUG: maxLabels must be positive; got %d", maxLabels) diff --git a/lib/storage/part.go b/lib/storage/part.go index 1fcd969d5..653a19311 100644 --- a/lib/storage/part.go +++ b/lib/storage/part.go @@ -249,7 +249,7 @@ func (ibc *indexBlockCache) Get(k uint64) *indexBlock { func (ibc *indexBlockCache) Put(k uint64, ib *indexBlock) { ibc.mu.Lock() - // Clean superflouos cache entries. + // Clean superfluous cache entries. if overflow := len(ibc.m) - getMaxCachedIndexBlocksPerPart(); overflow > 0 { // Remove 10% of items from the cache. overflow = int(float64(len(ibc.m)) * 0.1) From bf95fbfc1db288ec3808951b47069a2caf229a9d Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 24 Nov 2020 12:37:52 +0200 Subject: [PATCH 14/21] lib/logger: disable rate limiting for error and warn logs by default --- lib/logger/logger.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/logger/logger.go b/lib/logger/logger.go index d747c2027..0e64ef332 100644 --- a/lib/logger/logger.go +++ b/lib/logger/logger.go @@ -22,9 +22,9 @@ var ( loggerOutput = flag.String("loggerOutput", "stderr", "Output for the logs. Supported values: stderr, stdout") disableTimestamps = flag.Bool("loggerDisableTimestamps", false, "Whether to disable writing timestamps in logs") - errorsPerSecondLimit = flag.Int("loggerErrorsPerSecondLimit", 10, "Per-second limit on the number of ERROR messages. If more than the given number of errors "+ + errorsPerSecondLimit = flag.Int("loggerErrorsPerSecondLimit", 0, "Per-second limit on the number of ERROR messages. If more than the given number of errors "+ "are emitted per second, then the remaining errors are suppressed. Zero value disables the rate limit") - warnsPerSecondLimit = flag.Int("loggerWarnsPerSecondLimit", 10, "Per-second limit on the number of WARN messages. If more than the given number of warns "+ + warnsPerSecondLimit = flag.Int("loggerWarnsPerSecondLimit", 0, "Per-second limit on the number of WARN messages. If more than the given number of warns "+ "are emitted per second, then the remaining warns are suppressed. Zero value disables the rate limit") ) From ae04378424c9171af06ecd3e7af91bebebe771cf Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Tue, 24 Nov 2020 19:00:55 +0200 Subject: [PATCH 15/21] lib/protoparser/prometheus: properly parse "infinity" values in OpenMetrics format Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/924 --- docs/CHANGELOG.md | 2 + go.mod | 2 +- go.sum | 4 +- lib/protoparser/prometheus/parser.go | 2 +- lib/protoparser/prometheus/parser_test.go | 41 +++++++++++++++++++ .../valyala/fastjson/fastfloat/parse.go | 8 +++- vendor/modules.txt | 2 +- 7 files changed, 54 insertions(+), 7 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index a9a318bbb..550672a27 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -21,6 +21,8 @@ See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/851 * FEATURE: add `/tags/delSeries` handler from Graphite Tags API. See https://victoriametrics.github.io/#graphite-tags-api-usage * BUGFIX: properly parse Prometheus metrics with [exemplars](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#exemplars-1) such as `foo 123 # {bar="baz"} 1`. +* BUGFIX: properly parse "infinity" values in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#abnf). + See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/924 # [v1.47.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.47.0) diff --git a/go.mod b/go.mod index 8b886db80..c6fe4117e 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/golang/snappy v0.0.2 github.com/klauspost/compress v1.11.3 github.com/stretchr/testify v1.5.1 // indirect - github.com/valyala/fastjson v1.6.1 + github.com/valyala/fastjson v1.6.3 github.com/valyala/fastrand v1.0.0 github.com/valyala/fasttemplate v1.2.1 github.com/valyala/gozstd v1.8.3 diff --git a/go.sum b/go.sum index b14e0bfb2..68e885846 100644 --- a/go.sum +++ b/go.sum @@ -164,8 +164,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/fastjson v1.6.1 h1:qJs/Kz/HebWzk8LmhOrSm7kdOyJBr1XB+zSkYtEEfQE= -github.com/valyala/fastjson v1.6.1/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc= +github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI= github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4= diff --git a/lib/protoparser/prometheus/parser.go b/lib/protoparser/prometheus/parser.go index c7381f2bc..9d2753826 100644 --- a/lib/protoparser/prometheus/parser.go +++ b/lib/protoparser/prometheus/parser.go @@ -155,7 +155,7 @@ func (r *Row) unmarshal(s string, tagsPool []Tag, noEscapes bool) ([]Tag, error) r.Value = v return tagsPool, nil } - // There is timestamp. + // There is a timestamp. v, err := fastfloat.Parse(s[:n]) if err != nil { return tagsPool, fmt.Errorf("cannot parse value %q: %w", s[:n], err) diff --git a/lib/protoparser/prometheus/parser_test.go b/lib/protoparser/prometheus/parser_test.go index f864dd9a1..745139733 100644 --- a/lib/protoparser/prometheus/parser_test.go +++ b/lib/protoparser/prometheus/parser_test.go @@ -1,6 +1,7 @@ package prometheus import ( + "math" "reflect" "testing" ) @@ -233,6 +234,46 @@ cassandra_token_ownership_ratio 78.9`, &Rows{ }, }) + // "Infinity" word - this has been added in OpenMetrics. + // See https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md + // Checks for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/924 + inf := math.Inf(1) + f(` + foo Infinity + bar +Infinity + baz -infinity + aaa +inf + bbb -INF + ccc INF + `, &Rows{ + Rows: []Row{ + { + Metric: "foo", + Value: inf, + }, + { + Metric: "bar", + Value: inf, + }, + { + Metric: "baz", + Value: -inf, + }, + { + Metric: "aaa", + Value: inf, + }, + { + Metric: "bbb", + Value: -inf, + }, + { + Metric: "ccc", + Value: inf, + }, + }, + }) + // Timestamp bigger than 1<<31 f("aaa 1123 429496729600", &Rows{ Rows: []Row{{ diff --git a/vendor/github.com/valyala/fastjson/fastfloat/parse.go b/vendor/github.com/valyala/fastjson/fastfloat/parse.go index d17d7f2e6..5d4a7c7a7 100644 --- a/vendor/github.com/valyala/fastjson/fastfloat/parse.go +++ b/vendor/github.com/valyala/fastjson/fastfloat/parse.go @@ -237,7 +237,9 @@ func ParseBestEffort(s string) float64 { if strings.HasPrefix(s, "+") { s = s[1:] } - if strings.EqualFold(s, "inf") { + // "infinity" is needed for OpenMetrics support. + // See https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md + if strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") { if minus { return -inf } @@ -385,7 +387,9 @@ func Parse(s string) (float64, error) { if strings.HasPrefix(ss, "+") { ss = ss[1:] } - if strings.EqualFold(ss, "inf") { + // "infinity" is needed for OpenMetrics support. + // See https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md + if strings.EqualFold(ss, "inf") || strings.EqualFold(ss, "infinity") { if minus { return -inf, nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 2affcefa9..84457e3cd 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -99,7 +99,7 @@ github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/bytebufferpool -# github.com/valyala/fastjson v1.6.1 +# github.com/valyala/fastjson v1.6.3 github.com/valyala/fastjson github.com/valyala/fastjson/fastfloat # github.com/valyala/fastrand v1.0.0 From b65236530c906ab75f2780f597741349d607533b Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Wed, 25 Nov 2020 14:15:42 +0200 Subject: [PATCH 16/21] lib/storage: typo fix in error message: allowd->allowed --- lib/storage/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/storage/storage.go b/lib/storage/storage.go index e474a0d20..b61c5f27b 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -1248,7 +1248,7 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra if mr.Timestamp > maxTimestamp { // Skip rows with too big timestamps significantly exceeding the current time. if firstWarn == nil { - firstWarn = fmt.Errorf("cannot insert row with too big timestamp %d exceeding the current time; maximum allowd timestamp is %d; "+ + firstWarn = fmt.Errorf("cannot insert row with too big timestamp %d exceeding the current time; maximum allowed timestamp is %d; "+ "propbably you need updating -retentionPeriod command-line flag", mr.Timestamp, maxTimestamp) } From 8a057e705a73b9b2e9afa7dd24dece9247042939 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Wed, 25 Nov 2020 14:41:02 +0200 Subject: [PATCH 17/21] lib/storage: log metric name plus all its labels when the metric timestamp is outside the configured retention This should simplify debugging when the source of the metric with unexpected timestamp must be found. --- docs/CHANGELOG.md | 1 + lib/storage/metric_name.go | 16 ++++++++-------- lib/storage/metric_name_test.go | 26 ++++++++++++++++++++++++++ lib/storage/storage.go | 19 ++++++++++++++----- 4 files changed, 49 insertions(+), 13 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 550672a27..abbb0c8b5 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -19,6 +19,7 @@ See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/851 * FEATURE: add remoteAddr to slow query log in order to simplify identifying the client that sends slow queries to VictoriaMetrics. Slow query logging is controlled with `-search.logSlowQueryDuration` command-line flag. * FEATURE: add `/tags/delSeries` handler from Graphite Tags API. See https://victoriametrics.github.io/#graphite-tags-api-usage +* FEATURE: log metric name plus all its labels when the metric timestamp is out of the configured retention. This should simplify detecting the source of metrics with unexpected timestamps. * BUGFIX: properly parse Prometheus metrics with [exemplars](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#exemplars-1) such as `foo 123 # {bar="baz"} 1`. * BUGFIX: properly parse "infinity" values in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#abnf). diff --git a/lib/storage/metric_name.go b/lib/storage/metric_name.go index cb2c41399..563bd1d5f 100644 --- a/lib/storage/metric_name.go +++ b/lib/storage/metric_name.go @@ -343,17 +343,17 @@ func hasTag(tags []string, key []byte) bool { } // String returns user-readable representation of the metric name. -// -// Use this function only for debug logging. func (mn *MetricName) String() string { - mn.sortTags() + var mnCopy MetricName + mnCopy.CopyFrom(mn) + mnCopy.sortTags() var tags []string - for i := range mn.Tags { - t := &mn.Tags[i] - tags = append(tags, fmt.Sprintf("%q=%q", t.Key, t.Value)) + for i := range mnCopy.Tags { + t := &mnCopy.Tags[i] + tags = append(tags, fmt.Sprintf("%s=%q", t.Key, t.Value)) } - tagsStr := strings.Join(tags, ", ") - return fmt.Sprintf("MetricGroup=%q, tags=[%s]", mn.MetricGroup, tagsStr) + tagsStr := strings.Join(tags, ",") + return fmt.Sprintf("%s{%s}", mnCopy.MetricGroup, tagsStr) } // Marshal appends marshaled mn to dst and returns the result. diff --git a/lib/storage/metric_name_test.go b/lib/storage/metric_name_test.go index 432509091..18d1cda8f 100644 --- a/lib/storage/metric_name_test.go +++ b/lib/storage/metric_name_test.go @@ -6,6 +6,32 @@ import ( "testing" ) +func TestMetricNameString(t *testing.T) { + f := func(mn *MetricName, resultExpected string) { + t.Helper() + result := mn.String() + if result != resultExpected { + t.Fatalf("unexpected result\ngot\n%s\nwant\n%s", result, resultExpected) + } + } + f(&MetricName{ + MetricGroup: []byte("foobar"), + }, "foobar{}") + f(&MetricName{ + MetricGroup: []byte("abc"), + Tags: []Tag{ + { + Key: []byte("foo"), + Value: []byte("bar"), + }, + { + Key: []byte("baz"), + Value: []byte("123"), + }, + }, + }, `abc{baz="123",foo="bar"}`) +} + func TestMetricNameSortTags(t *testing.T) { testMetricNameSortTags(t, []string{}, []string{}) testMetricNameSortTags(t, []string{"foo"}, []string{"foo"}) diff --git a/lib/storage/storage.go b/lib/storage/storage.go index b61c5f27b..07d8686b0 100644 --- a/lib/storage/storage.go +++ b/lib/storage/storage.go @@ -1238,9 +1238,10 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra if mr.Timestamp < minTimestamp { // Skip rows with too small timestamps outside the retention. if firstWarn == nil { + metricName := getUserReadableMetricName(mr.MetricNameRaw) firstWarn = fmt.Errorf("cannot insert row with too small timestamp %d outside the retention; minimum allowed timestamp is %d; "+ - "probably you need updating -retentionPeriod command-line flag", - mr.Timestamp, minTimestamp) + "probably you need updating -retentionPeriod command-line flag; metricName: %s", + mr.Timestamp, minTimestamp, metricName) } atomic.AddUint64(&s.tooSmallTimestampRows, 1) continue @@ -1248,9 +1249,9 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra if mr.Timestamp > maxTimestamp { // Skip rows with too big timestamps significantly exceeding the current time. if firstWarn == nil { - firstWarn = fmt.Errorf("cannot insert row with too big timestamp %d exceeding the current time; maximum allowed timestamp is %d; "+ - "propbably you need updating -retentionPeriod command-line flag", - mr.Timestamp, maxTimestamp) + metricName := getUserReadableMetricName(mr.MetricNameRaw) + firstWarn = fmt.Errorf("cannot insert row with too big timestamp %d exceeding the current time; maximum allowed timestamp is %d; metricName: %s", + mr.Timestamp, maxTimestamp, metricName) } atomic.AddUint64(&s.tooBigTimestampRows, 1) continue @@ -1359,6 +1360,14 @@ func (s *Storage) add(rows []rawRow, mrs []MetricRow, precisionBits uint8) ([]ra return rows, nil } +func getUserReadableMetricName(metricNameRaw []byte) string { + var mn MetricName + if err := mn.unmarshalRaw(metricNameRaw); err != nil { + return fmt.Sprintf("cannot unmarshal metricNameRaw %q: %s", metricNameRaw, err) + } + return mn.String() +} + type pendingMetricRow struct { MetricName []byte mr MetricRow From 7119f294f3ec9ef43a0b0131be3f5adfa03734d1 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Wed, 25 Nov 2020 21:19:43 +0200 Subject: [PATCH 18/21] lib/mergeset: help GC by removing refereces to slices in inmemoryBlock.Reset --- lib/mergeset/encoding.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/mergeset/encoding.go b/lib/mergeset/encoding.go index 3c9561e5e..9a48a8c02 100644 --- a/lib/mergeset/encoding.go +++ b/lib/mergeset/encoding.go @@ -31,6 +31,12 @@ type inmemoryBlock struct { func (ib *inmemoryBlock) Reset() { ib.commonPrefix = ib.commonPrefix[:0] ib.data = ib.data[:0] + + items := ib.items + for i := range items { + // Remove reference to by slice, so GC could free the byte slice. + items[i] = nil + } ib.items = ib.items[:0] } From 1c669a69a86404494395c2d3f9e41106822e56d7 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Wed, 25 Nov 2020 21:52:17 +0200 Subject: [PATCH 19/21] lib/mergeset: tune the number of rawItemsBlocks to merge at once 512 blocks give higher ingestion performance and slightly lower memory usage --- lib/mergeset/table.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/mergeset/table.go b/lib/mergeset/table.go index b4084b848..4284bc623 100644 --- a/lib/mergeset/table.go +++ b/lib/mergeset/table.go @@ -369,7 +369,7 @@ func (tb *Table) AddItems(items [][]byte) error { tb.rawItemsBlocks = append(tb.rawItemsBlocks, ib) } } - if len(tb.rawItemsBlocks) >= 1024 { + if len(tb.rawItemsBlocks) >= 512 { blocksToMerge = tb.rawItemsBlocks tb.rawItemsBlocks = nil tb.rawItemsLastFlushTime = fasttime.UnixTimestamp() From dabbf930d8c2c3a17ba17fbe23e3a7374a95caea Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Wed, 25 Nov 2020 22:26:23 +0200 Subject: [PATCH 20/21] app/vmagent: do not enable -promscrape.config.strictParse when -dryRun command-line flag is set Users can specify -promscrape.config.strictParse if -promscrape.config shouldn't contain unknown config entries --- app/vmagent/main.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/app/vmagent/main.go b/app/vmagent/main.go index 04c6cd8b2..bcff08e83 100644 --- a/app/vmagent/main.go +++ b/app/vmagent/main.go @@ -48,7 +48,8 @@ var ( "Usually :4242 must be set. Doesn't work if empty") opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty") dryRun = flag.Bool("dryRun", false, "Whether to check only config files without running vmagent. The following files are checked: "+ - "-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . See also -promscrape.config.dryRun") + "-promscrape.config, -remoteWrite.relabelConfig, -remoteWrite.urlRelabelConfig . "+ + "Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse") ) var ( @@ -69,9 +70,6 @@ func main() { cgroup.UpdateGOMAXPROCSToCPUQuota() if *dryRun { - if err := flag.Set("promscrape.config.strictParse", "true"); err != nil { - logger.Panicf("BUG: cannot set promscrape.config.strictParse=true: %s", err) - } if err := remotewrite.CheckRelabelConfigs(); err != nil { logger.Fatalf("error when checking relabel configs: %s", err) } From b7fcdb528d0093005423e844e6c5ada432d6a9fe Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Wed, 25 Nov 2020 22:59:13 +0200 Subject: [PATCH 21/21] app/{vmagent,victoria-metrics}: add `-dryRun` option and make more clear handling for `-promscrape.config.dryRun` --- app/victoria-metrics/main.go | 15 +++++++++++++++ app/vmagent/main.go | 9 ++++++++- docs/CHANGELOG.md | 1 + lib/promscrape/config.go | 13 +++++-------- 4 files changed, 29 insertions(+), 9 deletions(-) diff --git a/app/victoria-metrics/main.go b/app/victoria-metrics/main.go index 130974f12..6349dd3f3 100644 --- a/app/victoria-metrics/main.go +++ b/app/victoria-metrics/main.go @@ -17,6 +17,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" ) @@ -25,6 +26,8 @@ var ( minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Remove superflouos samples from time series if they are located closer to each other than this duration. "+ "This may be useful for reducing overhead when multiple identically configured Prometheus instances write data to the same VictoriaMetrics. "+ "Deduplication is disabled if the -dedup.minScrapeInterval is 0") + dryRun = flag.Bool("dryRun", false, "Whether to check only -promscrape.config and then exit. "+ + "Unknown config entries are allowed in -promscrape.config by default. This can be changed with -promscrape.config.strictParse") ) func main() { @@ -34,6 +37,18 @@ func main() { buildinfo.Init() logger.Init() cgroup.UpdateGOMAXPROCSToCPUQuota() + + if promscrape.IsDryRun() { + *dryRun = true + } + if *dryRun { + if err := promscrape.CheckConfig(); err != nil { + logger.Fatalf("error when checking -promscrape.config: %s", err) + } + logger.Infof("-promscrape.config is ok; exitting with 0 status code") + return + } + logger.Infof("starting VictoriaMetrics at %q...", *httpListenAddr) startTime := time.Now() storage.SetMinScrapeIntervalForDeduplication(*minScrapeInterval) diff --git a/app/vmagent/main.go b/app/vmagent/main.go index bcff08e83..df0fc9ee2 100644 --- a/app/vmagent/main.go +++ b/app/vmagent/main.go @@ -69,12 +69,19 @@ func main() { logger.Init() cgroup.UpdateGOMAXPROCSToCPUQuota() + if promscrape.IsDryRun() { + if err := promscrape.CheckConfig(); err != nil { + logger.Fatalf("error when checking -promscrape.config: %s", err) + } + logger.Infof("-promscrape.config is ok; exitting with 0 status code") + return + } if *dryRun { if err := remotewrite.CheckRelabelConfigs(); err != nil { logger.Fatalf("error when checking relabel configs: %s", err) } if err := promscrape.CheckConfig(); err != nil { - logger.Fatalf("error when checking Prometheus config: %s", err) + logger.Fatalf("error when checking -promscrape.config: %s", err) } logger.Infof("all the configs are ok; exitting with 0 status code") return diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index abbb0c8b5..02fb8c070 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -20,6 +20,7 @@ See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/851 Slow query logging is controlled with `-search.logSlowQueryDuration` command-line flag. * FEATURE: add `/tags/delSeries` handler from Graphite Tags API. See https://victoriametrics.github.io/#graphite-tags-api-usage * FEATURE: log metric name plus all its labels when the metric timestamp is out of the configured retention. This should simplify detecting the source of metrics with unexpected timestamps. +* FEATURE: add `-dryRun` command-line flag to single-node VictoriaMetrics in order to check config file pointed by `-promscrape.config`. * BUGFIX: properly parse Prometheus metrics with [exemplars](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#exemplars-1) such as `foo 123 # {bar="baz"} 1`. * BUGFIX: properly parse "infinity" values in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/OpenMetrics.md#abnf). diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go index 1a8b1fdd2..007ec267c 100644 --- a/lib/promscrape/config.go +++ b/lib/promscrape/config.go @@ -5,7 +5,6 @@ import ( "fmt" "io/ioutil" "net/url" - "os" "path/filepath" "strings" "sync/atomic" @@ -134,16 +133,14 @@ func loadConfig(path string) (cfg *Config, data []byte, err error) { if err := cfgObj.parse(data, path); err != nil { return nil, nil, fmt.Errorf("cannot parse Prometheus config from %q: %w", path, err) } - if *dryRun { - // This is a dirty hack for checking Prometheus config only. - // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/362 - // and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/508 for details. - logger.Infof("Success: the config at %q has no errors; exitting with 0 status code", path) - os.Exit(0) - } return &cfgObj, data, nil } +// IsDryRun returns true if -promscrape.config.dryRun command-line flag is set +func IsDryRun() bool { + return *dryRun +} + func (cfg *Config) parse(data []byte, path string) error { if err := unmarshalMaybeStrict(data, cfg); err != nil { return fmt.Errorf("cannot unmarshal data: %w", err)