mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-01 14:47:38 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
d0311b7fe5
7 changed files with 29 additions and 8 deletions
|
@ -277,6 +277,9 @@ It may be useful for performing `vmagent` rolling update without scrape loss.
|
|||
'match[]': ['{__name__!=""}']
|
||||
```
|
||||
|
||||
Note that `sample_limit` option doesn't work if stream parsing is enabled, since the parsed data is pushed to remote storage as soon as it is parsed. So `sample_limit` option
|
||||
has no sense during stream parsing.
|
||||
|
||||
* It is recommended to increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported at `http://vmagent-host:8429/metrics` page constantly grows.
|
||||
|
||||
* If you see gaps on the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set, then try increasing `-remoteWrite.queues`.
|
||||
|
|
|
@ -49,6 +49,8 @@ type client struct {
|
|||
fq *persistentqueue.FastQueue
|
||||
hc *http.Client
|
||||
|
||||
bytesSent *metrics.Counter
|
||||
blocksSent *metrics.Counter
|
||||
requestDuration *metrics.Histogram
|
||||
requestsOKCount *metrics.Counter
|
||||
errorsCount *metrics.Counter
|
||||
|
@ -111,6 +113,8 @@ func newClient(argIdx int, remoteWriteURL, sanitizedURL string, fq *persistentqu
|
|||
},
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
c.bytesSent = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_bytes_sent_total{url=%q}`, c.sanitizedURL))
|
||||
c.blocksSent = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_blocks_sent_total{url=%q}`, c.sanitizedURL))
|
||||
c.requestDuration = metrics.GetOrCreateHistogram(fmt.Sprintf(`vmagent_remotewrite_duration_seconds{url=%q}`, c.sanitizedURL))
|
||||
c.requestsOKCount = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_requests_total{url=%q, status_code="2XX"}`, c.sanitizedURL))
|
||||
c.errorsCount = metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_errors_total{url=%q}`, c.sanitizedURL))
|
||||
|
@ -187,6 +191,8 @@ func (c *client) runWorker() {
|
|||
func (c *client) sendBlock(block []byte) {
|
||||
retryDuration := time.Second
|
||||
retriesCount := 0
|
||||
c.bytesSent.Add(len(block))
|
||||
c.blocksSent.Inc()
|
||||
|
||||
again:
|
||||
req, err := http.NewRequest("POST", c.remoteWriteURL, bytes.NewBuffer(block))
|
||||
|
|
|
@ -2,6 +2,16 @@
|
|||
|
||||
# tip
|
||||
|
||||
|
||||
# [v1.50.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.1)
|
||||
|
||||
* FEATURE: vmagent: export `vmagent_remotewrite_blocks_sent_total` and `vmagent_remotewrite_blocks_sent_total` metrics for each `-remoteWrite.url`.
|
||||
|
||||
* BUGFIX: vmagent: properly delete unregistered scrape targets from `/targets` and `/api/v1/targets` pages. They weren't deleted due to the bug in `v1.50.0`.
|
||||
|
||||
|
||||
# [v1.50.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.50.0)
|
||||
|
||||
* FEATURE: automatically reset response cache when samples with timestamps older than `now - search.cacheTimestampOffset` are ingested to VictoriaMetrics. This makes unnecessary disabling response cache during data backfilling or resetting it after backfilling is complete as described [in these docs](https://victoriametrics.github.io/#backfilling). This feature applies only to single-node VictoriaMetrics. It doesn't apply to cluster version of VictoriaMetrics because `vminsert` nodes don't know about `vmselect` nodes where the response cache must be reset.
|
||||
* FEATURE: vmalert: add `query`, `first` and `value` functions to alert templates. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/539
|
||||
* FEATURE: vmagent: return user-friendly HTML page when requesting `/targets` page from web browser. The page is returned in the old plaintext format when requesting via curl or similar tool.
|
||||
|
|
|
@ -3,12 +3,12 @@ Release process guidance
|
|||
## Release version and Docker images
|
||||
|
||||
0. Document all the changes for new release in [CHANGELOG.md](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/docs/CHANGELOG.md).
|
||||
1. Create release tag with `git tag v1.xx.y`.
|
||||
1. Create release tag with `git tag v1.xx.y` in `master` branch and `git tag v1.xx.y-cluster` in `cluster` branch.
|
||||
2. Run `make release` for creating `*.tar.gz` release archive with the corresponding `_checksums.txt` inside `bin` directory.
|
||||
3. Run `make publish` for creating and publishing Docker images.
|
||||
4. Push release tag to https://github.com/VictoriaMetrics/VictoriaMetrics : `git push origin v1.xx.y`.
|
||||
5. Go to https://github.com/VictoriaMetrics/VictoriaMetrics/releases , create new release from the pushed tag on step 4
|
||||
and upload `*.tar.gz` archive with the corresponding `_checksums.txt` from step 2.
|
||||
4. Repeat steps 3-4 for `cluster` branch.
|
||||
5. Push release tag to https://github.com/VictoriaMetrics/VictoriaMetrics : `git push origin v1.xx.y`.
|
||||
6. Go to https://github.com/VictoriaMetrics/VictoriaMetrics/releases , create new release from the pushed tag on step 5 and upload `*.tar.gz` archive with the corresponding `_checksums.txt` from step 2.
|
||||
|
||||
|
||||
## Helm Charts
|
||||
|
|
|
@ -277,6 +277,9 @@ It may be useful for performing `vmagent` rolling update without scrape loss.
|
|||
'match[]': ['{__name__!=""}']
|
||||
```
|
||||
|
||||
Note that `sample_limit` option doesn't work if stream parsing is enabled, since the parsed data is pushed to remote storage as soon as it is parsed. So `sample_limit` option
|
||||
has no sense during stream parsing.
|
||||
|
||||
* It is recommended to increase `-remoteWrite.queues` if `vmagent_remotewrite_pending_data_bytes` metric exported at `http://vmagent-host:8429/metrics` page constantly grows.
|
||||
|
||||
* If you see gaps on the data pushed by `vmagent` to remote storage when `-remoteWrite.maxDiskUsagePerURL` is set, then try increasing `-remoteWrite.queues`.
|
||||
|
|
|
@ -21,7 +21,6 @@ may fail;
|
|||
* by default, rules execution is sequential within one group, but persisting of execution results to remote
|
||||
storage is asynchronous. Hence, user shouldn't rely on recording rules chaining when result of previous
|
||||
recording rule is reused in next one;
|
||||
* there is no `query` function support in templates yet;
|
||||
* `vmalert` has no UI, just an API for getting groups and rules statuses.
|
||||
|
||||
### QuickStart
|
||||
|
|
|
@ -318,14 +318,14 @@ func (sg *scraperGroup) update(sws []*ScrapeWork) {
|
|||
sg.activeScrapers.Inc()
|
||||
sg.scrapersStarted.Inc()
|
||||
sg.wg.Add(1)
|
||||
go func() {
|
||||
tsmGlobal.Register(sw)
|
||||
go func(sw *ScrapeWork) {
|
||||
defer sg.wg.Done()
|
||||
sc.sw.run(sc.stopCh)
|
||||
tsmGlobal.Unregister(sw)
|
||||
sg.activeScrapers.Dec()
|
||||
sg.scrapersStopped.Inc()
|
||||
}()
|
||||
tsmGlobal.Register(sw)
|
||||
}(sw)
|
||||
sg.m[key] = sc
|
||||
additionsCount++
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue