mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-02-19 15:30:17 +00:00
Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files
This commit is contained in:
commit
b80ebb8bfd
50 changed files with 2389 additions and 412 deletions
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
|
@ -65,7 +65,7 @@ jobs:
|
|||
make ${{ matrix.scenario}}
|
||||
|
||||
- name: Publish coverage
|
||||
uses: codecov/codecov-action@v3
|
||||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
|
||||
|
|
80
.github/workflows/update-sandbox.yml
vendored
80
.github/workflows/update-sandbox.yml
vendored
|
@ -1,80 +0,0 @@
|
|||
name: sandbox-release
|
||||
on:
|
||||
release:
|
||||
types: [published]
|
||||
permissions:
|
||||
contents: write
|
||||
jobs:
|
||||
deploy-sandbox:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: check inputs
|
||||
if: github.event.release.tag_name == ''
|
||||
run: exit 1
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
repository: VictoriaMetrics/ops
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
|
||||
- name: Import GPG key
|
||||
id: import-gpg
|
||||
uses: crazy-max/ghaction-import-gpg@v5
|
||||
with:
|
||||
gpg_private_key: ${{ secrets.VM_BOT_GPG_PRIVATE_KEY }}
|
||||
passphrase: ${{ secrets.VM_BOT_PASSPHRASE }}
|
||||
git_user_signingkey: true
|
||||
git_commit_gpgsign: true
|
||||
|
||||
- name: update image tag
|
||||
uses: fjogeleit/yaml-update-action@main
|
||||
with:
|
||||
valueFile: 'gcp-test/sandbox/manifests/benchmark-vm/vmcluster.yaml'
|
||||
commitChange: false
|
||||
createPR: false
|
||||
changes: |
|
||||
{
|
||||
"gcp-test/sandbox/manifests/benchmark-vm/vmcluster.yaml": {
|
||||
"spec.vminsert.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster",
|
||||
"spec.vmselect.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster",
|
||||
"spec.vmstorage.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster"
|
||||
},
|
||||
"gcp-test/sandbox/manifests/benchmark-vm/vmsingle.yaml": {
|
||||
"spec.image.tag": "${{ github.event.release.tag_name }}-enterprise"
|
||||
},
|
||||
"gcp-test/sandbox/manifests/monitoring/monitoring-vmagent.yaml": {
|
||||
"spec.image.tag": "${{ github.event.release.tag_name }}"
|
||||
},
|
||||
"gcp-test/sandbox/manifests/monitoring/monitoring-vmcluster.yaml": {
|
||||
"spec.vminsert.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster",
|
||||
"spec.vmselect.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster",
|
||||
"spec.vmstorage.image.tag": "${{ github.event.release.tag_name }}-enterprise-cluster"
|
||||
},
|
||||
"gcp-test/sandbox/manifests/monitoring/vmalert.yaml": {
|
||||
"spec.image.tag": "${{ github.event.release.tag_name }}-enterprise"
|
||||
}
|
||||
}
|
||||
|
||||
- name: commit changes
|
||||
run: |
|
||||
git config --global user.name "${{ steps.import-gpg.outputs.email }}"
|
||||
git config --global user.email "${{ steps.import-gpg.outputs.email }}"
|
||||
git add .
|
||||
git commit -S -m "Deploy image tag ${RELEASE_TAG} to sandbox"
|
||||
env:
|
||||
RELEASE_TAG: ${{ github.event.release.tag_name }}
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: peter-evans/create-pull-request@v5
|
||||
with:
|
||||
author: ${{ github.actor }} <${{ github.actor }}@users.noreply.github.com>
|
||||
branch: release-automation
|
||||
token: ${{ secrets.VM_BOT_GH_TOKEN }}
|
||||
delete-branch: true
|
||||
title: "release ${{ github.event.release.tag_name }}"
|
||||
body: |
|
||||
Release [${{ github.event.release.tag_name }}](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/${{ github.event.release.tag_name }}) to sandbox
|
||||
|
||||
> Auto-generated by `Github Actions Bot`
|
||||
|
|
@ -1694,6 +1694,10 @@ Downsampling happens during [background merges](https://docs.victoriametrics.com
|
|||
and can't be performed if there is not enough of free disk space or if vmstorage
|
||||
is in [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
|
||||
Please, note that intervals of `-downsampling.period` must be multiples of each other.
|
||||
In case [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled value of `-dedup.minScrapeInterval` must also be multiple of `-downsampling.period` intervals.
|
||||
This is required to ensure consistency of deduplication and downsampling results.
|
||||
|
||||
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
## Multi-tenancy
|
||||
|
|
|
@ -109,6 +109,12 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
|||
defer bufferedwriter.Put(bw)
|
||||
WriteBulkResponse(bw, n, tookMs)
|
||||
_ = bw.Flush()
|
||||
|
||||
// update bulkRequestDuration only for successfully parsed requests
|
||||
// There is no need in updating bulkRequestDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
bulkRequestDuration.UpdateDuration(startTime)
|
||||
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
@ -116,7 +122,9 @@ func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
|||
}
|
||||
|
||||
var (
|
||||
bulkRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/elasticsearch/_bulk"}`)
|
||||
bulkRequestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/elasticsearch/_bulk"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="elasticsearch_bulk"}`)
|
||||
bulkRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}`)
|
||||
)
|
||||
|
||||
func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string,
|
||||
|
@ -162,8 +170,6 @@ func readBulkRequest(r io.Reader, isGzip bool, timeField, msgField string,
|
|||
|
||||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
var rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="elasticsearch_bulk"}`)
|
||||
|
||||
func readBulkLine(sc *bufio.Scanner, timeField, msgField string,
|
||||
processLogMessage func(timestamp int64, fields []logstorage.Field),
|
||||
) (bool, error) {
|
||||
|
|
|
@ -3,12 +3,13 @@ package insertutils
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
)
|
||||
|
||||
// CommonParams contains common HTTP parameters used by log ingestion APIs.
|
||||
|
@ -73,12 +74,19 @@ func GetCommonParams(r *http.Request) (*CommonParams, error) {
|
|||
// GetProcessLogMessageFunc returns a function, which adds parsed log messages to lr.
|
||||
func (cp *CommonParams) GetProcessLogMessageFunc(lr *logstorage.LogRows) func(timestamp int64, fields []logstorage.Field) {
|
||||
return func(timestamp int64, fields []logstorage.Field) {
|
||||
if len(fields) > *MaxFieldsPerLine {
|
||||
rf := logstorage.RowFormatter(fields)
|
||||
logger.Warnf("dropping log line with %d fields; it exceeds -insert.maxFieldsPerLine=%d; %s", len(fields), *MaxFieldsPerLine, rf)
|
||||
rowsDroppedTotalTooManyFields.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
lr.MustAdd(cp.TenantID, timestamp, fields)
|
||||
if cp.Debug {
|
||||
s := lr.GetRowString(0)
|
||||
lr.ResetKeepSettings()
|
||||
logger.Infof("remoteAddr=%s; requestURI=%s; ignoring log entry because of `debug` query arg: %s", cp.DebugRemoteAddr, cp.DebugRequestURI, s)
|
||||
rowsDroppedTotal.Inc()
|
||||
rowsDroppedTotalDebug.Inc()
|
||||
return
|
||||
}
|
||||
if lr.NeedFlush() {
|
||||
|
@ -88,4 +96,5 @@ func (cp *CommonParams) GetProcessLogMessageFunc(lr *logstorage.LogRows) func(ti
|
|||
}
|
||||
}
|
||||
|
||||
var rowsDroppedTotal = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
|
||||
var rowsDroppedTotalDebug = metrics.NewCounter(`vl_rows_dropped_total{reason="debug"}`)
|
||||
var rowsDroppedTotalTooManyFields = metrics.NewCounter(`vl_rows_dropped_total{reason="too_many_fields"}`)
|
||||
|
|
|
@ -1,10 +1,15 @@
|
|||
package insertutils
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
|
||||
)
|
||||
|
||||
var (
|
||||
// MaxLineSizeBytes is the maximum length of a single line for /insert/* handlers
|
||||
MaxLineSizeBytes = flagutil.NewBytes("insert.maxLineSizeBytes", 256*1024, "The maximum size of a single line, which can be read by /insert/* handlers")
|
||||
|
||||
// MaxFieldsPerLine is the maximum number of fields per line for /insert/* handlers
|
||||
MaxFieldsPerLine = flag.Int("insert.maxFieldsPerLine", 1000, "The maximum number of log fields per line, which can be read by /insert/* handlers")
|
||||
)
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
|
||||
// RequestHandler processes jsonline insert requests
|
||||
func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
||||
startTime := time.Now()
|
||||
w.Header().Add("Content-Type", "application/json")
|
||||
|
||||
if r.Method != "POST" {
|
||||
|
@ -77,6 +78,11 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool {
|
|||
vlstorage.MustAddRows(lr)
|
||||
logstorage.PutLogRows(lr)
|
||||
|
||||
// update jsonlineRequestDuration only for successfully parsed requests.
|
||||
// There is no need in updating jsonlineRequestDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
jsonlineRequestDuration.UpdateDuration(startTime)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -144,6 +150,7 @@ func parseISO8601Timestamp(s string) (int64, error) {
|
|||
var lineBufferPool bytesutil.ByteBufferPool
|
||||
|
||||
var (
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="jsonline"}`)
|
||||
requestsTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/jsonline"}`)
|
||||
rowsIngestedTotal = metrics.NewCounter(`vl_rows_ingested_total{type="jsonline"}`)
|
||||
jsonlineRequestDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/jsonline"}`)
|
||||
)
|
||||
|
|
|
@ -3,17 +3,10 @@ package loki
|
|||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlinsert/insertutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
||||
)
|
||||
|
||||
var (
|
||||
lokiRequestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
lokiRequestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
)
|
||||
|
||||
// RequestHandler processes Loki insert requests
|
||||
func RequestHandler(path string, w http.ResponseWriter, r *http.Request) bool {
|
||||
switch path {
|
||||
|
@ -34,11 +27,9 @@ func handleInsert(r *http.Request, w http.ResponseWriter) bool {
|
|||
contentType := r.Header.Get("Content-Type")
|
||||
switch contentType {
|
||||
case "application/json":
|
||||
lokiRequestsJSONTotal.Inc()
|
||||
return handleJSON(r, w)
|
||||
default:
|
||||
// Protobuf request body should be handled by default accoring to https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
|
||||
lokiRequestsProtobufTotal.Inc()
|
||||
// Protobuf request body should be handled by default according to https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki
|
||||
return handleProtobuf(r, w)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,12 +18,11 @@ import (
|
|||
"github.com/valyala/fastjson"
|
||||
)
|
||||
|
||||
var (
|
||||
rowsIngestedJSONTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="json"}`)
|
||||
parserPool fastjson.ParserPool
|
||||
)
|
||||
var parserPool fastjson.ParserPool
|
||||
|
||||
func handleJSON(r *http.Request, w http.ResponseWriter) bool {
|
||||
startTime := time.Now()
|
||||
lokiRequestsJSONTotal.Inc()
|
||||
reader := r.Body
|
||||
if r.Header.Get("Content-Encoding") == "gzip" {
|
||||
zr, err := common.GetGzipReader(reader)
|
||||
|
@ -58,9 +57,21 @@ func handleJSON(r *http.Request, w http.ResponseWriter) bool {
|
|||
return true
|
||||
}
|
||||
rowsIngestedJSONTotal.Add(n)
|
||||
|
||||
// update lokiRequestJSONDuration only for successfully parsed requests
|
||||
// There is no need in updating lokiRequestJSONDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
lokiRequestJSONDuration.UpdateDuration(startTime)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
lokiRequestsJSONTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
rowsIngestedJSONTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="json"}`)
|
||||
lokiRequestJSONDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="json"}`)
|
||||
)
|
||||
|
||||
func parseJSONRequest(data []byte, processLogMessage func(timestamp int64, fields []logstorage.Field)) (int, error) {
|
||||
p := parserPool.Get()
|
||||
defer parserPool.Put(p)
|
||||
|
|
|
@ -19,12 +19,13 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="protobuf"}`)
|
||||
bytesBufPool bytesutil.ByteBufferPool
|
||||
pushReqsPool sync.Pool
|
||||
bytesBufPool bytesutil.ByteBufferPool
|
||||
pushReqsPool sync.Pool
|
||||
)
|
||||
|
||||
func handleProtobuf(r *http.Request, w http.ResponseWriter) bool {
|
||||
startTime := time.Now()
|
||||
lokiRequestsProtobufTotal.Inc()
|
||||
wcr := writeconcurrencylimiter.GetReader(r.Body)
|
||||
data, err := io.ReadAll(wcr)
|
||||
writeconcurrencylimiter.PutReader(wcr)
|
||||
|
@ -47,10 +48,23 @@ func handleProtobuf(r *http.Request, w http.ResponseWriter) bool {
|
|||
httpserver.Errorf(w, r, "cannot parse loki request: %s", err)
|
||||
return true
|
||||
}
|
||||
|
||||
rowsIngestedProtobufTotal.Add(n)
|
||||
|
||||
// update lokiRequestProtobufDuration only for successfully parsed requests
|
||||
// There is no need in updating lokiRequestProtobufDuration for request errors,
|
||||
// since their timings are usually much smaller than the timing for successful request parsing.
|
||||
lokiRequestProtobufDuration.UpdateDuration(startTime)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
var (
|
||||
lokiRequestsProtobufTotal = metrics.NewCounter(`vl_http_requests_total{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="protobuf"}`)
|
||||
lokiRequestProtobufDuration = metrics.NewHistogram(`vl_http_request_duration_seconds{path="/insert/loki/api/v1/push",format="protobuf"}`)
|
||||
)
|
||||
|
||||
func parseProtobufRequest(data []byte, processLogMessage func(timestamp int64, fields []logstorage.Field)) (int, error) {
|
||||
bb := bytesBufPool.Get()
|
||||
defer bytesBufPool.Put(bb)
|
||||
|
|
|
@ -47,7 +47,7 @@ See https://graphite.readthedocs.io/en/stable/render_api.html#json
|
|||
{% code timestamps := s.Timestamps %}
|
||||
{% for i, v := range s.Values %}
|
||||
[
|
||||
{% if math.IsNaN(v) %}null{% else %}{%f= v %}{% endif %},
|
||||
{% if math.IsNaN(v) || math.IsInf(v, 0) %}null{% else %}{%f= v %}{% endif %},
|
||||
{%dl= timestamps[i]/1e3 %}
|
||||
]
|
||||
{% if i+1 < len(timestamps) %},{% endif %}
|
||||
|
|
|
@ -148,7 +148,7 @@ func streamrenderSeriesJSON(qw422016 *qt422016.Writer, s *series) {
|
|||
//line app/vmselect/graphite/render_response.qtpl:48
|
||||
qw422016.N().S(`[`)
|
||||
//line app/vmselect/graphite/render_response.qtpl:50
|
||||
if math.IsNaN(v) {
|
||||
if math.IsNaN(v) || math.IsInf(v, 0) {
|
||||
//line app/vmselect/graphite/render_response.qtpl:50
|
||||
qw422016.N().S(`null`)
|
||||
//line app/vmselect/graphite/render_response.qtpl:50
|
||||
|
|
|
@ -45,7 +45,7 @@ func TestEscapeDotsInRegexpLabelFilters(t *testing.T) {
|
|||
f("2", "2")
|
||||
f(`foo.bar + 123`, `foo.bar + 123`)
|
||||
f(`foo{bar=~"baz.xx.yyy"}`, `foo{bar=~"baz\\.xx\\.yyy"}`)
|
||||
f(`foo(a.b{c="d.e",x=~"a.b.+[.a]",y!~"aaa.bb|cc.dd"}) + x.y(1,sum({x=~"aa.bb"}))`, `foo(a.b{c="d.e",x=~"a\\.b.+[\\.a]",y!~"aaa\\.bb|cc\\.dd"}) + x.y(1, sum({x=~"aa\\.bb"}))`)
|
||||
f(`sum(a.b{c="d.e",x=~"a.b.+[.a]",y!~"aaa.bb|cc.dd"}) + avg_over_time(1,sum({x=~"aa.bb"}))`, `sum(a.b{c="d.e",x=~"a\\.b.+[\\.a]",y!~"aaa\\.bb|cc\\.dd"}) + avg_over_time(1, sum({x=~"aa\\.bb"}))`)
|
||||
}
|
||||
|
||||
func TestExecSuccess(t *testing.T) {
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -2,7 +2,7 @@ version: '3.5'
|
|||
services:
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.93.3
|
||||
image: victoriametrics/vmagent:v1.93.4
|
||||
depends_on:
|
||||
- "vminsert"
|
||||
ports:
|
||||
|
@ -32,7 +32,7 @@ services:
|
|||
|
||||
vmstorage-1:
|
||||
container_name: vmstorage-1
|
||||
image: victoriametrics/vmstorage:v1.93.3-cluster
|
||||
image: victoriametrics/vmstorage:v1.93.4-cluster
|
||||
ports:
|
||||
- 8482
|
||||
- 8400
|
||||
|
@ -44,7 +44,7 @@ services:
|
|||
restart: always
|
||||
vmstorage-2:
|
||||
container_name: vmstorage-2
|
||||
image: victoriametrics/vmstorage:v1.93.3-cluster
|
||||
image: victoriametrics/vmstorage:v1.93.4-cluster
|
||||
ports:
|
||||
- 8482
|
||||
- 8400
|
||||
|
@ -56,7 +56,7 @@ services:
|
|||
restart: always
|
||||
vminsert:
|
||||
container_name: vminsert
|
||||
image: victoriametrics/vminsert:v1.93.3-cluster
|
||||
image: victoriametrics/vminsert:v1.93.4-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
|
@ -68,7 +68,7 @@ services:
|
|||
restart: always
|
||||
vmselect:
|
||||
container_name: vmselect
|
||||
image: victoriametrics/vmselect:v1.93.3-cluster
|
||||
image: victoriametrics/vmselect:v1.93.4-cluster
|
||||
depends_on:
|
||||
- "vmstorage-1"
|
||||
- "vmstorage-2"
|
||||
|
@ -82,7 +82,7 @@ services:
|
|||
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.93.3
|
||||
image: victoriametrics/vmalert:v1.93.4
|
||||
depends_on:
|
||||
- "vmselect"
|
||||
ports:
|
||||
|
|
|
@ -2,7 +2,7 @@ version: "3.5"
|
|||
services:
|
||||
vmagent:
|
||||
container_name: vmagent
|
||||
image: victoriametrics/vmagent:v1.93.3
|
||||
image: victoriametrics/vmagent:v1.93.4
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
ports:
|
||||
|
@ -18,7 +18,7 @@ services:
|
|||
restart: always
|
||||
victoriametrics:
|
||||
container_name: victoriametrics
|
||||
image: victoriametrics/victoria-metrics:v1.93.3
|
||||
image: victoriametrics/victoria-metrics:v1.93.4
|
||||
ports:
|
||||
- 8428:8428
|
||||
- 8089:8089
|
||||
|
@ -49,6 +49,7 @@ services:
|
|||
- grafanadata:/var/lib/grafana
|
||||
- ./provisioning/:/etc/grafana/provisioning/
|
||||
- ./../../dashboards/victoriametrics.json:/var/lib/grafana/dashboards/vm.json
|
||||
- ./../../dashboards/victorialogs.json:/var/lib/grafana/dashboards/vl.json
|
||||
- ./../../dashboards/vmagent.json:/var/lib/grafana/dashboards/vmagent.json
|
||||
- ./../../dashboards/vmalert.json:/var/lib/grafana/dashboards/vmalert.json
|
||||
networks:
|
||||
|
@ -56,7 +57,7 @@ services:
|
|||
restart: always
|
||||
vmalert:
|
||||
container_name: vmalert
|
||||
image: victoriametrics/vmalert:v1.93.3
|
||||
image: victoriametrics/vmalert:v1.93.4
|
||||
depends_on:
|
||||
- "victoriametrics"
|
||||
- "alertmanager"
|
||||
|
@ -93,6 +94,7 @@ services:
|
|||
- vm_net
|
||||
restart: always
|
||||
fluentbit:
|
||||
container_name: fluentbit
|
||||
image: cr.fluentbit.io/fluent/fluent-bit:2.1.4
|
||||
volumes:
|
||||
- /var/lib/docker/containers:/var/lib/docker/containers:ro
|
||||
|
@ -100,7 +102,10 @@ services:
|
|||
depends_on: [victorialogs]
|
||||
ports:
|
||||
- "5140:5140"
|
||||
networks:
|
||||
- vm_net
|
||||
victorialogs:
|
||||
container_name: victorialogs
|
||||
image: docker.io/victoriametrics/victoria-logs:v0.3.0-victorialogs
|
||||
command:
|
||||
- "--storageDataPath=/vlogs"
|
||||
|
@ -109,6 +114,8 @@ services:
|
|||
- victorialogs-fluentbit:/vlogs
|
||||
ports:
|
||||
- "9428:9428"
|
||||
networks:
|
||||
- vm_net
|
||||
volumes:
|
||||
vmagentdata: {}
|
||||
vmdata: {}
|
||||
|
|
|
@ -11,3 +11,6 @@ scrape_configs:
|
|||
- job_name: 'victoriametrics'
|
||||
static_configs:
|
||||
- targets: ['victoriametrics:8428']
|
||||
- job_name: 'victorialogs'
|
||||
static_configs:
|
||||
- targets: ['victorialogs:9428']
|
||||
|
|
|
@ -105,7 +105,7 @@ services:
|
|||
- '--config=/config.yml'
|
||||
|
||||
vmsingle:
|
||||
image: victoriametrics/victoria-metrics:v1.93.3
|
||||
image: victoriametrics/victoria-metrics:v1.93.4
|
||||
ports:
|
||||
- '8428:8428'
|
||||
command:
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
4. Set variables `DIGITALOCEAN_API_TOKEN` with `VM_VERSION` for `packer` environment and run make from example below:
|
||||
|
||||
```console
|
||||
make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="dop_v23_2e46f4759ceeeba0d0248" VM_VERSION="1.93.3"
|
||||
make release-victoria-metrics-digitalocean-oneclick-droplet DIGITALOCEAN_API_TOKEN="dop_v23_2e46f4759ceeeba0d0248" VM_VERSION="1.93.4"
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -19,8 +19,8 @@ On the server:
|
|||
* VictoriaMetrics is running on ports: 8428, 8089, 4242, 2003 and they are bound to the local interface.
|
||||
|
||||
********************************************************************************
|
||||
# This image includes 1.93.3 version of VictoriaMetrics.
|
||||
# See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.3
|
||||
# This image includes 1.93.4 version of VictoriaMetrics.
|
||||
# See Release notes https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.4
|
||||
|
||||
# Welcome to VictoriaMetrics droplet!
|
||||
|
||||
|
|
|
@ -24,8 +24,6 @@ The following `tip` changes can be tested by building VictoriaMetrics components
|
|||
|
||||
## tip
|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.21.0 to Go1.21.1. See [the list of issues addressed in Go1.20.6](https://github.com/golang/go/issues?q=milestone%3AGo1.21.1+label%3ACherryPickApproved).
|
||||
|
||||
* FEATURE: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): add support for numbers with underscore delimiters such as `1_234_567_890` and `1.234_567_890`. These numbers are easier to read than `1234567890` and `1.234567890`.
|
||||
* FEATURE: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): add support for server-side copy of existing backups. See [these docs](https://docs.victoriametrics.com/vmbackup.html#server-side-copy-of-the-existing-backup) for details.
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add the option to see the latest 25 queries. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4718).
|
||||
|
@ -44,10 +42,26 @@ The following `tip` changes can be tested by building VictoriaMetrics components
|
|||
ssue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4825) and [these docs](https://docs.victoriametrics.com/vmauth.html#auth-config) for details.
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): add ability to retry requests to the [remaining backends](https://docs.victoriametrics.com/vmauth.html#load-balancing) if they return response status codes specified in the `retry_status_codes` list. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4893).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): add `eval_offset` attribute for [Groups](https://docs.victoriametrics.com/vmalert.html#groups). If specified, Group will be evaluated at the exact time offset on the range of [0...evaluationInterval]. The setting might be useful for cron-like rules which must be evaluated at specific moments of time. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3409) for details.
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/vmalert.html): validate [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html) function names in alerting and recording rules when `vmalert` runs with `-dryRun` command-line flag. Previously it was allowed to use unknown (aka invalid) MetricsQL function names there. For example, `foo()` was counted as a valid query. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4933).
|
||||
* FEATURE: limit the length of string params in log messages to 500 chars. Longer string params are replaced with the `first_250_chars..last_250_chars`. This prevents from too long log lines, which can be emitted by VictoriaMetrics components.
|
||||
|
||||
* BUGFIX: [storage](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html): prevent from livelock when [forced merge](https://docs.victoriametrics.com/#forced-merge) is called under high data ingestion. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4987).
|
||||
* BUGFIX: [Graphite Render API](https://docs.victoriametrics.com/#graphite-render-api-usage): correctly return `null` instead of `Inf` in JSON query responses. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3783).
|
||||
* BUGFIX: [Official Grafana dashboards for VictoriaMetrics](https://grafana.com/orgs/victoriametrics): fix display of ingested rows rate for `Samples ingested/s` and `Samples rate` panels for vmagent's dasbhoard. Previously, not all ingested protocols were accounted in these panels. An extra panel `Rows rate` was added to `Ingestion` section to display the split for rows ingested rate by protocol.
|
||||
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix the bug causing render looping when switching to heatmap.
|
||||
* BUGFIX: [vmbackup](https://docs.victoriametrics.com/vmbackup.html): properly copy `parts.json` files inside `<-storageDataPath>/{data,indexdb}` folders during [incremental backups](https://docs.victoriametrics.com/vmbackup.html#incremental-backups). Previously the new `parts.json` could be skipped during incremental backups, which could lead to inability to restore from the backup. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5005). This issue has been introduced in [v1.90.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.90.0).
|
||||
* BUGFIX: [VictoriaMetrics enterprise](https://docs.victoriametrics.com/enterprise.html) validate `-dedup.minScrapeInterval` value and `-downsampling.period` intervals are multiples of each other. See [these docs](https://docs.victoriametrics.com/#downsampling).
|
||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly close connections to Kubernetes API server after the change in `selectors` or `namespaces` sections of [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs). Previously `vmagent` could continue polling Kubernetes API server with the old `selectors` or `namespaces` configs additionally to polling new configs. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4850).
|
||||
|
||||
## [v1.93.4](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.4)
|
||||
|
||||
Released at 2023-09-10
|
||||
|
||||
**v1.93.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.93.x line will be supported for at least 12 months since [v1.93.0](https://docs.victoriametrics.com/CHANGELOG.html#v1930) release**
|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.21.0 to Go1.21.1. See [the list of issues addressed in Go1.20.6](https://github.com/golang/go/issues?q=milestone%3AGo1.21.1+label%3ACherryPickApproved).
|
||||
|
||||
* BUGFIX: [vminsert enterprise](https://docs.victoriametrics.com/enterprise.html): properly parse `/insert/multitenant/*` urls, which have been broken since [v1.93.2](#v1932). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4947).
|
||||
* BUGFIX: properly build production armv5 binaries for `GOARCH=arm`. This has been broken after the upgrading of Go builder to Go1.21.0. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4965).
|
||||
* BUGFIX: [vmselect](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): return `503 Service Unavailable` status code when [partial responses](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-availability) are denied and some of `vmstorage` nodes are temporarily unavailable. Previously `422 Unprocessable Entiry` status code was mistakenly returned in this case, which could prevent from automatic recovery by re-sending the request to healthy cluster replica in another availability zone.
|
||||
|
@ -463,6 +477,20 @@ Released at 2023-02-24
|
|||
* BUGFIX: properly parse timestamps in milliseconds when [ingesting data via OpenTSDB telnet put protocol](https://docs.victoriametrics.com/#sending-data-via-telnet-put-protocol). Previously timestamps in milliseconds were mistakenly multiplied by 1000. Thanks to @Droxenator for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3810).
|
||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): do not add extrapolated points outside the real points when using [interpolate()](https://docs.victoriametrics.com/MetricsQL.html#interpolate) function. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3816).
|
||||
|
||||
## [v1.87.9](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.9)
|
||||
|
||||
Released at 2023-09-10
|
||||
|
||||
**v1.87.x is a line of LTS releases (e.g. long-time support). It contains important up-to-date bugfixes.
|
||||
The v1.87.x line will be supported for at least 12 months since [v1.87.0](https://docs.victoriametrics.com/CHANGELOG.html#v1870) release**
|
||||
|
||||
* SECURITY: upgrade Go builder from Go1.21.0 to Go1.21.1. See [the list of issues addressed in Go1.20.6](https://github.com/golang/go/issues?q=milestone%3AGo1.21.1+label%3ACherryPickApproved).
|
||||
|
||||
* BUGFIX: [vminsert enterprise](https://docs.victoriametrics.com/enterprise.html): properly parse `/insert/multitenant/*` urls, which have been broken since [v1.93.2](#v1932). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4947).
|
||||
* BUGFIX: properly build production armv5 binaries for `GOARCH=arm`. This has been broken after the upgrading of Go builder to Go1.21.0. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4965).
|
||||
* BUGFIX: [vmselect](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): return `503 Service Unavailable` status code when [partial responses](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#cluster-availability) are denied and some of `vmstorage` nodes are temporarily unavailable. Previously `422 Unprocessable Entiry` status code was mistakenly returned in this case, which could prevent from automatic recovery by re-sending the request to healthy cluster replica in another availability zone.
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert.html): fix the bug when Group's `params` fields with multiple values were overriding each other instead of adding up. The bug was introduced in [this commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/eccecdf177115297fa1dc4d42d38e23de9a9f2cb) starting from [v1.87.7](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.7). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4908).
|
||||
|
||||
## [v1.87.8](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.87.8)
|
||||
|
||||
Released at 2023-09-01
|
||||
|
|
|
@ -1697,6 +1697,10 @@ Downsampling happens during [background merges](https://docs.victoriametrics.com
|
|||
and can't be performed if there is not enough of free disk space or if vmstorage
|
||||
is in [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
|
||||
Please, note that intervals of `-downsampling.period` must be multiples of each other.
|
||||
In case [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled value of `-dedup.minScrapeInterval` must also be multiple of `-downsampling.period` intervals.
|
||||
This is required to ensure consistency of deduplication and downsampling results.
|
||||
|
||||
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
## Multi-tenancy
|
||||
|
|
|
@ -127,6 +127,27 @@ Bumping the limits may significantly improve build speed.
|
|||
* Publish message in Telegram at <https://t.me/VictoriaMetrics_en> and <https://t.me/VictoriaMetrics_ru1>
|
||||
* Publish message in Google Groups at <https://groups.google.com/forum/#!forum/victorametrics-users>
|
||||
|
||||
## Operator
|
||||
|
||||
The operator repository [https://github.com/VictoriaMetrics/operator/](https://github.com/VictoriaMetrics/operator/)
|
||||
|
||||
### Bump the version of images
|
||||
|
||||
- Bump `Version` field in [file `internal/config/config.go`](https://github.com/VictoriaMetrics/operator/blob/master/internal/config/config.go) with new release version for:
|
||||
- `vmalert` in `BaseOperatorConf.VMAlertDefault.Version`,
|
||||
- `vmagent` in `BaseOperatorConf.VMAgentDefault.Version`,
|
||||
- `vmsingle` in `BaseOperatorConf.VMSingleDefault.Version`,
|
||||
- `vmselect` in `BaseOperatorConf.VMClusterDefault.VMSelectDefault.Version`,
|
||||
- `vmstorage` in `BaseOperatorConf.VMClusterDefault.VMStorageDefault.Version`,
|
||||
- `vminsert` in `BaseOperatorConf.VMClusterDefault.VMInsertDefault.Version`,
|
||||
- `vmbackupmanager` in `BaseOperatorConf.VMBackup.Version` (should be enterprise version),
|
||||
- `vmauth` in `BaseOperatorConf.VMAuthDefault.Version`.
|
||||
- Run `make operator-conf`.
|
||||
- Rename "Next release" section in `CHANGELOG.md` to the *new release version* and create new empty "Next release" section.
|
||||
- Commit and push changes to `master`.
|
||||
- Create and push a new tag with the *new release version*.
|
||||
- Create github release from this tag with "Release notes" from `CHANGELOG` for this version in description.
|
||||
|
||||
## Helm Charts
|
||||
|
||||
The helm chart repository [https://github.com/VictoriaMetrics/helm-charts/](https://github.com/VictoriaMetrics/helm-charts/)
|
||||
|
@ -135,7 +156,9 @@ The helm chart repository [https://github.com/VictoriaMetrics/helm-charts/](http
|
|||
|
||||
Bump `tag` field in `values.yaml` with new release version.
|
||||
Bump `appVersion` field in `Chart.yaml` with new release version.
|
||||
Bump `version` field in `Chart.yaml` with incremental semver version.
|
||||
Add new line to "Next release" section in `CHANGELOG.md` about version update (the line must always start with "`-`"). Do **NOT** change headers in `CHANGELOG.md`.
|
||||
Bump `version` field in `Chart.yaml` with incremental semver version (based on the `CHANGELOG.md` analysis).
|
||||
|
||||
Do these updates to the following charts:
|
||||
|
||||
1. Update `vmagent` chart version in [`values.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-agent/values.yaml) and [`Chart.yaml`](https://github.com/VictoriaMetrics/helm-charts/blob/master/charts/victoria-metrics-agent/Chart.yaml)
|
||||
|
@ -148,11 +171,10 @@ Do these updates to the following charts:
|
|||
|
||||
Once updated, run the following commands:
|
||||
|
||||
1. Run `make gen-docs`
|
||||
1. Run `make package` that creates or updates zip file with the packed chart
|
||||
1. Run `make merge`. It creates or updates metadata for charts in index.yaml
|
||||
1. Push changes to master. `master` is a source of truth
|
||||
1. Push the same changes to `gh-pages` branch
|
||||
1. Commit and push changes to `master`.
|
||||
1. Run "Release" action on Github:
|
||||

|
||||
1. Merge new PRs *"Automatic update CHANGELOGs and READMEs"* and *"Synchronize docs"* after pipelines are complete.
|
||||
|
||||
## Ansible Roles
|
||||
|
||||
|
|
BIN
docs/Release-Guide_helm-release.png
Normal file
BIN
docs/Release-Guide_helm-release.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 433 KiB |
|
@ -1705,6 +1705,10 @@ Downsampling happens during [background merges](https://docs.victoriametrics.com
|
|||
and can't be performed if there is not enough of free disk space or if vmstorage
|
||||
is in [read-only mode](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#readonly-mode).
|
||||
|
||||
Please, note that intervals of `-downsampling.period` must be multiples of each other.
|
||||
In case [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled value of `-dedup.minScrapeInterval` must also be multiple of `-downsampling.period` intervals.
|
||||
This is required to ensure consistency of deduplication and downsampling results.
|
||||
|
||||
The downsampling can be evaluated for free by downloading and using enterprise binaries from [the releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases).
|
||||
|
||||
## Multi-tenancy
|
||||
|
|
|
@ -9,6 +9,8 @@ according to [these docs](https://docs.victoriametrics.com/VictoriaLogs/QuickSta
|
|||
* FEATURE: expose the following metrics at [/metrics](monitoring) page:
|
||||
* `vl_data_size_bytes{type="storage"}` - on-disk size for data excluding [log stream](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#stream-fields) indexes.
|
||||
* `vl_data_size_bytes{type="indexdb"}` - on-disk size for [log stream](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#stream-fields) indexes.
|
||||
* FEATURE: add `-insert.maxFieldsPerLine` command-line flag, which can be used for limiting the number of fields per line in logs sent to VictoriaLogs via ingestion protocols. This helps to avoid issues like [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4762).
|
||||
* FEATURE: expose `vl_http_request_duration_seconds` histogram at the [/metrics](https://docs.victoriametrics.com/VictoriaLogs/#monitoring) page. Thanks to @crossoverJie for [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4934).
|
||||
|
||||
* BUGFIX: fix possible panic when no data is written to VictoriaLogs for a long time. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4895). Thanks to @crossoverJie for filing and fixing the issue.
|
||||
* BUGFIX: add `/insert/loky/ready` endpoint, which is used by Promtail for healthchecks. This should remove `unsupported path requested: /insert/loki/ready` warning logs. See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4762#issuecomment-1690966722).
|
||||
|
|
|
@ -171,6 +171,8 @@ Pass `-help` to VictoriaLogs in order to see the list of supported command-line
|
|||
Whether to use proxy protocol for connections accepted at -httpListenAddr . See https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt . With enabled proxy protocol http server cannot serve regular /metrics endpoint. Use -pushmetrics.url for metrics pushing
|
||||
-inmemoryDataFlushInterval duration
|
||||
The interval for guaranteed saving of in-memory data to disk. The saved data survives unclean shutdowns such as OOM crash, hardware reset, SIGKILL, etc. Bigger intervals may help increase the lifetime of flash storage with limited write cycles (e.g. Raspberry PI). Smaller intervals increase disk IO load. Minimum supported value is 1s (default 5s)
|
||||
-insert.maxFieldsPerLine int
|
||||
The maximum number of log fields per line, which can be read by /insert/* handlers (default 1000)
|
||||
-insert.maxLineSizeBytes size
|
||||
The maximum size of a single line, which can be read by /insert/* handlers
|
||||
Supports the following optional suffixes for size values: KB, MB, GB, TB, KiB, MiB, GiB, TiB (default 262144)
|
||||
|
|
|
@ -81,6 +81,8 @@ The response by default contains [`_msg`](https://docs.victoriametrics.com/Victo
|
|||
[`_time`](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#time-field) fields plus the explicitly mentioned fields.
|
||||
See [these docs](https://docs.victoriametrics.com/VictoriaLogs/LogsQL.html#querying-specific-fields) for details.
|
||||
|
||||
The duration of requests to `/insert/elasticsearch/_bulk` can be monitored with `vl_http_request_duration_seconds{path="/insert/elasticsearch/_bulk"}` metric.
|
||||
|
||||
See also:
|
||||
|
||||
- [How to debug data ingestion](#troubleshooting).
|
||||
|
@ -133,6 +135,8 @@ The response by default contains [`_msg`](https://docs.victoriametrics.com/Victo
|
|||
[`_time`](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#time-field) fields plus the explicitly mentioned fields.
|
||||
See [these docs](https://docs.victoriametrics.com/VictoriaLogs/LogsQL.html#querying-specific-fields) for details.
|
||||
|
||||
The duration of requests to `/insert/jsonline` can be monitored with `vl_http_request_duration_seconds{path="/insert/jsonline"}` metric.
|
||||
|
||||
See also:
|
||||
|
||||
- [How to debug data ingestion](#troubleshooting).
|
||||
|
@ -172,6 +176,8 @@ The response by default contains [`_msg`](https://docs.victoriametrics.com/Victo
|
|||
[`_time`](https://docs.victoriametrics.com/VictoriaLogs/keyConcepts.html#time-field) fields plus the explicitly mentioned fields.
|
||||
See [these docs](https://docs.victoriametrics.com/VictoriaLogs/LogsQL.html#querying-specific-fields) for details.
|
||||
|
||||
The duration of requests to `/insert/loki/api/v1/push` can be monitored with `vl_http_request_duration_seconds{path="/insert/loki/api/v1/push"}` metric.
|
||||
|
||||
See also:
|
||||
|
||||
- [How to debug data ingestion](#troubleshooting).
|
||||
|
|
|
@ -2,10 +2,24 @@
|
|||
|
||||
## Next release
|
||||
|
||||
### Features
|
||||
|
||||
- [vmoperator](https://docs.victoriametrics.com/operator/): upgrade vmagent/vmauth's default config-reloader image.
|
||||
|
||||
### Fixes
|
||||
|
||||
* [vmuser](https://docs.victoriametrics.com/operator/api.html#vmuser): [Enterprise] fixes ip_filters indent for url_prefix. Previously it wasn't possible to use ip_filters with multiple target refs
|
||||
* [vmoperator](https://docs.victoriametrics.com/operator/): turn off `EnableStrictSecurity` by default. Before, upgrade operator to v0.36.0+ could fail components with volume attached, see [this issue](https://github.com/VictoriaMetrics/operator/issues/749) for details.
|
||||
- [vmcluster](https://docs.victoriametrics.com/operator/api.html#vmcluster): remove redundant annotation `operator.victoriametrics/last-applied-spec` from created workloads like vmstorage statefulset.
|
||||
|
||||
<a name="v0.38.0"></a>
|
||||
## [v0.38.0](https://github.com/VictoriaMetrics/operator/releases/tag/v0.38.0) - 11 Sep 2023
|
||||
|
||||
**Default version of VictoriaMetrics components**: `v1.93.4`
|
||||
|
||||
### Fixes
|
||||
|
||||
- [vmuser](https://docs.victoriametrics.com/operator/api.html#vmuser): [Enterprise] fixes ip_filters indent for url_prefix. Previously it wasn't possible to use ip_filters with multiple target refs
|
||||
- [vmoperator](https://docs.victoriametrics.com/operator/): turn off `EnableStrictSecurity` by default. Before, upgrade operator to v0.36.0+ could fail components with volume attached, see [this issue](https://github.com/VictoriaMetrics/operator/issues/749) for details.
|
||||
- [vmoperator](https://docs.victoriametrics.com/operator/): bump default version of VictoriaMetrics components to [1.93.4](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.93.4).
|
||||
|
||||
### Features
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ aliases:
|
|||
- /operator/vars.html
|
||||
---
|
||||
# Auto Generated vars for package config
|
||||
updated at Thu Sep 7 11:46:52 UTC 2023
|
||||
updated at Wed Sep 13 14:05:24 UTC 2023
|
||||
|
||||
|
||||
| varible name | variable default value | variable required | variable description |
|
||||
|
@ -20,7 +20,7 @@ aliases:
|
|||
| VM_CUSTOMCONFIGRELOADERIMAGE | victoriametrics/operator:config-reloader-v0.32.0 | false | - |
|
||||
| VM_PSPAUTOCREATEENABLED | false | false | - |
|
||||
| VM_VMALERTDEFAULT_IMAGE | victoriametrics/vmalert | false | - |
|
||||
| VM_VMALERTDEFAULT_VERSION | v1.93.3 | false | - |
|
||||
| VM_VMALERTDEFAULT_VERSION | v1.93.4 | false | - |
|
||||
| VM_VMALERTDEFAULT_PORT | 8080 | false | - |
|
||||
| VM_VMALERTDEFAULT_USEDEFAULTRESOURCES | true | false | - |
|
||||
| VM_VMALERTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - |
|
||||
|
@ -31,8 +31,8 @@ aliases:
|
|||
| VM_VMALERTDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - |
|
||||
| VM_VMALERTDEFAULT_CONFIGRELOADIMAGE | jimmidyson/configmap-reload:v0.3.0 | false | - |
|
||||
| VM_VMAGENTDEFAULT_IMAGE | victoriametrics/vmagent | false | - |
|
||||
| VM_VMAGENTDEFAULT_VERSION | v1.93.3 | false | - |
|
||||
| VM_VMAGENTDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.58.0 | false | - |
|
||||
| VM_VMAGENTDEFAULT_VERSION | v1.93.4 | false | - |
|
||||
| VM_VMAGENTDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0 | false | - |
|
||||
| VM_VMAGENTDEFAULT_PORT | 8429 | false | - |
|
||||
| VM_VMAGENTDEFAULT_USEDEFAULTRESOURCES | true | false | - |
|
||||
| VM_VMAGENTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - |
|
||||
|
@ -42,7 +42,7 @@ aliases:
|
|||
| VM_VMAGENTDEFAULT_CONFIGRELOADERCPU | 100m | false | - |
|
||||
| VM_VMAGENTDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - |
|
||||
| VM_VMSINGLEDEFAULT_IMAGE | victoriametrics/victoria-metrics | false | - |
|
||||
| VM_VMSINGLEDEFAULT_VERSION | v1.93.3 | false | - |
|
||||
| VM_VMSINGLEDEFAULT_VERSION | v1.93.4 | false | - |
|
||||
| VM_VMSINGLEDEFAULT_PORT | 8429 | false | - |
|
||||
| VM_VMSINGLEDEFAULT_USEDEFAULTRESOURCES | true | false | - |
|
||||
| VM_VMSINGLEDEFAULT_RESOURCE_LIMIT_MEM | 1500Mi | false | - |
|
||||
|
@ -53,14 +53,14 @@ aliases:
|
|||
| VM_VMSINGLEDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_USEDEFAULTRESOURCES | true | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_IMAGE | victoriametrics/vmselect | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_VERSION | v1.93.3-cluster | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_VERSION | v1.93.4-cluster | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_PORT | 8481 | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_LIMIT_MEM | 1000Mi | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_LIMIT_CPU | 500m | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_REQUEST_MEM | 500Mi | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_REQUEST_CPU | 100m | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_IMAGE | victoriametrics/vmstorage | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VERSION | v1.93.3-cluster | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VERSION | v1.93.4-cluster | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VMINSERTPORT | 8400 | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VMSELECTPORT | 8401 | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_PORT | 8482 | false | - |
|
||||
|
@ -69,7 +69,7 @@ aliases:
|
|||
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_RESOURCE_REQUEST_MEM | 500Mi | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_RESOURCE_REQUEST_CPU | 250m | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_IMAGE | victoriametrics/vminsert | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_VERSION | v1.93.3-cluster | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_VERSION | v1.93.4-cluster | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_PORT | 8480 | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - |
|
||||
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_RESOURCE_LIMIT_CPU | 500m | false | - |
|
||||
|
@ -88,7 +88,7 @@ aliases:
|
|||
| VM_VMALERTMANAGER_RESOURCE_REQUEST_CPU | 30m | false | - |
|
||||
| VM_DISABLESELFSERVICESCRAPECREATION | false | false | - |
|
||||
| VM_VMBACKUP_IMAGE | victoriametrics/vmbackupmanager | false | - |
|
||||
| VM_VMBACKUP_VERSION | v1.93.3-enterprise | false | - |
|
||||
| VM_VMBACKUP_VERSION | v1.93.4-enterprise | false | - |
|
||||
| VM_VMBACKUP_PORT | 8300 | false | - |
|
||||
| VM_VMBACKUP_USEDEFAULTRESOURCES | true | false | - |
|
||||
| VM_VMBACKUP_RESOURCE_LIMIT_MEM | 500Mi | false | - |
|
||||
|
@ -97,8 +97,8 @@ aliases:
|
|||
| VM_VMBACKUP_RESOURCE_REQUEST_CPU | 150m | false | - |
|
||||
| VM_VMBACKUP_LOGLEVEL | INFO | false | - |
|
||||
| VM_VMAUTHDEFAULT_IMAGE | victoriametrics/vmauth | false | - |
|
||||
| VM_VMAUTHDEFAULT_VERSION | v1.93.3 | false | - |
|
||||
| VM_VMAUTHDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.48.1 | false | - |
|
||||
| VM_VMAUTHDEFAULT_VERSION | v1.93.4 | false | - |
|
||||
| VM_VMAUTHDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.68.0 | false | - |
|
||||
| VM_VMAUTHDEFAULT_PORT | 8427 | false | - |
|
||||
| VM_VMAUTHDEFAULT_USEDEFAULTRESOURCES | true | false | - |
|
||||
| VM_VMAUTHDEFAULT_RESOURCE_LIMIT_MEM | 300Mi | false | - |
|
||||
|
|
2
go.mod
2
go.mod
|
@ -12,7 +12,7 @@ require (
|
|||
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
|
||||
github.com/VictoriaMetrics/fasthttp v1.2.0
|
||||
github.com/VictoriaMetrics/metrics v1.24.0
|
||||
github.com/VictoriaMetrics/metricsql v0.64.0
|
||||
github.com/VictoriaMetrics/metricsql v0.65.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.21.0
|
||||
github.com/aws/aws-sdk-go-v2/config v1.18.39
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.11.83
|
||||
|
|
4
go.sum
4
go.sum
|
@ -70,8 +70,8 @@ github.com/VictoriaMetrics/fasthttp v1.2.0 h1:nd9Wng4DlNtaI27WlYh5mGXCJOmee/2c2b
|
|||
github.com/VictoriaMetrics/fasthttp v1.2.0/go.mod h1:zv5YSmasAoSyv8sBVexfArzFDIGGTN4TfCKAtAw7IfE=
|
||||
github.com/VictoriaMetrics/metrics v1.24.0 h1:ILavebReOjYctAGY5QU2F9X0MYvkcrG3aEn2RKa1Zkw=
|
||||
github.com/VictoriaMetrics/metrics v1.24.0/go.mod h1:eFT25kvsTidQFHb6U0oa0rTrDRdz4xTYjpL8+UPohys=
|
||||
github.com/VictoriaMetrics/metricsql v0.64.0 h1:uty6AXQFY3OpQ+eopo1jDjCcTctuqkqYLnRbQVhukW8=
|
||||
github.com/VictoriaMetrics/metricsql v0.64.0/go.mod h1:k4UaP/+CjuZslIjd+kCigNG9TQmUqh5v0TP/nMEy90I=
|
||||
github.com/VictoriaMetrics/metricsql v0.65.0 h1:+/Oit3QycM8z/NbMHy4KENSUDS5q9QRx8h2x6cvoQOk=
|
||||
github.com/VictoriaMetrics/metricsql v0.65.0/go.mod h1:k4UaP/+CjuZslIjd+kCigNG9TQmUqh5v0TP/nMEy90I=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
|
|
|
@ -2,10 +2,12 @@ package common
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
@ -15,6 +17,9 @@ import (
|
|||
// Each source file can be split into parts with up to MaxPartSize sizes.
|
||||
type Part struct {
|
||||
// Path is the path to file for backup.
|
||||
//
|
||||
// Path must consistently use `/` as directory separator.
|
||||
// Use ToCanonicalPath() function for converting local directory separators to `/`.
|
||||
Path string
|
||||
|
||||
// FileSize is the size of the whole file for the given part.
|
||||
|
@ -33,11 +38,21 @@ type Part struct {
|
|||
ActualSize uint64
|
||||
}
|
||||
|
||||
// key returns a string, which uniquely identifies p.
|
||||
func (p *Part) key() string {
|
||||
if strings.HasSuffix(p.Path, "/parts.json") {
|
||||
// parts.json file contents changes over time, so it must have an unique key in order
|
||||
// to always copy it during backup, restore and server-side copy.
|
||||
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5005
|
||||
id := atomic.AddUint64(&uniqueKeyID, 1)
|
||||
return fmt.Sprintf("unique-%016X", id)
|
||||
}
|
||||
// Do not use p.FileSize in the key, since it cannot be properly initialized when resuming the restore for partially restored file
|
||||
return fmt.Sprintf("%s%016X%016X%016X", p.Path, p.Offset, p.Size, p.ActualSize)
|
||||
}
|
||||
|
||||
var uniqueKeyID uint64
|
||||
|
||||
// String returns human-readable representation of the part.
|
||||
func (p *Part) String() string {
|
||||
return fmt.Sprintf("part{path: %q, file_size: %d, offset: %d, size: %d}", p.Path, p.FileSize, p.Offset, p.Size)
|
||||
|
@ -51,11 +66,30 @@ func (p *Part) RemotePath(prefix string) string {
|
|||
return fmt.Sprintf("%s/%s/%016X_%016X_%016X", prefix, p.Path, p.FileSize, p.Offset, p.Size)
|
||||
}
|
||||
|
||||
// LocalPath returns local path for p at the given dir.
|
||||
func (p *Part) LocalPath(dir string) string {
|
||||
path := p.Path
|
||||
if filepath.Separator != '/' {
|
||||
path = strings.ReplaceAll(path, "/", string(filepath.Separator))
|
||||
}
|
||||
return filepath.Join(dir, path)
|
||||
}
|
||||
|
||||
// ToCanonicalPath returns canonical path by replacing local directory separators with `/`.
|
||||
func ToCanonicalPath(path string) string {
|
||||
if filepath.Separator == '/' {
|
||||
return path
|
||||
}
|
||||
return strings.ReplaceAll(path, string(filepath.Separator), "/")
|
||||
}
|
||||
|
||||
var partNameRegexp = regexp.MustCompile(`^(.+)[/\\]([0-9A-F]{16})_([0-9A-F]{16})_([0-9A-F]{16})$`)
|
||||
|
||||
// ParseFromRemotePath parses p from remotePath.
|
||||
//
|
||||
// Returns true on success.
|
||||
//
|
||||
// remotePath must be in canonical form received from ToCanonicalPath().
|
||||
func (p *Part) ParseFromRemotePath(remotePath string) bool {
|
||||
tmp := partNameRegexp.FindStringSubmatch(remotePath)
|
||||
if len(tmp) != 5 {
|
||||
|
|
|
@ -10,9 +10,10 @@ import (
|
|||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
)
|
||||
|
||||
// AppendFiles appends all the files from dir to dst.
|
||||
// AppendFiles appends paths to all the files from local dir to dst.
|
||||
//
|
||||
// All the appended files will have dir prefix.
|
||||
// All the appended filepaths will have dir prefix.
|
||||
// The returned paths have local OS-specific directory separators.
|
||||
func AppendFiles(dst []string, dir string) ([]string, error) {
|
||||
d, err := os.Open(dir)
|
||||
if err != nil {
|
||||
|
|
|
@ -77,7 +77,7 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot stat %q: %w", file, err)
|
||||
}
|
||||
path := file[len(dir):]
|
||||
path := common.ToCanonicalPath(file[len(dir):])
|
||||
size := uint64(fi.Size())
|
||||
if size == 0 {
|
||||
parts = append(parts, common.Part{
|
||||
|
@ -146,8 +146,9 @@ func (fs *FS) NewWriteCloser(p common.Part) (io.WriteCloser, error) {
|
|||
return blwc, nil
|
||||
}
|
||||
|
||||
// DeletePath deletes the given path from fs and returns the size
|
||||
// for the deleted file.
|
||||
// DeletePath deletes the given path from fs and returns the size for the deleted file.
|
||||
//
|
||||
// The path must be in canonical form, e.g. it must have `/` directory separators
|
||||
func (fs *FS) DeletePath(path string) (uint64, error) {
|
||||
p := common.Part{
|
||||
Path: path,
|
||||
|
@ -187,7 +188,7 @@ func (fs *FS) mkdirAll(filePath string) error {
|
|||
}
|
||||
|
||||
func (fs *FS) path(p common.Part) string {
|
||||
return filepath.Join(fs.Dir, p.Path)
|
||||
return p.LocalPath(fs.Dir)
|
||||
}
|
||||
|
||||
type limitedReadCloser struct {
|
||||
|
|
|
@ -58,7 +58,8 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
continue
|
||||
}
|
||||
var p common.Part
|
||||
if !p.ParseFromRemotePath(file[len(dir):]) {
|
||||
remotePath := common.ToCanonicalPath(file[len(dir):])
|
||||
if !p.ParseFromRemotePath(remotePath) {
|
||||
logger.Infof("skipping unknown file %s", file)
|
||||
continue
|
||||
}
|
||||
|
@ -68,7 +69,6 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
return nil, fmt.Errorf("cannot stat file %q for part %q: %w", file, p.Path, err)
|
||||
}
|
||||
p.ActualSize = uint64(fi.Size())
|
||||
p.Path = pathToCanonical(p.Path)
|
||||
parts = append(parts, p)
|
||||
}
|
||||
return parts, nil
|
||||
|
@ -76,7 +76,6 @@ func (fs *FS) ListParts() ([]common.Part, error) {
|
|||
|
||||
// DeletePart deletes the given part p from fs.
|
||||
func (fs *FS) DeletePart(p common.Part) error {
|
||||
p.Path = canonicalPathToLocal(p.Path)
|
||||
path := fs.path(p)
|
||||
if err := os.Remove(path); err != nil {
|
||||
return fmt.Errorf("cannot remove %q: %w", path, err)
|
||||
|
@ -97,7 +96,6 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
|||
if !ok {
|
||||
return fmt.Errorf("cannot perform server-side copying from %s to %s: both of them must be fsremote", srcFS, fs)
|
||||
}
|
||||
p.Path = canonicalPathToLocal(p.Path)
|
||||
srcPath := src.path(p)
|
||||
dstPath := fs.path(p)
|
||||
if err := fs.mkdirAll(dstPath); err != nil {
|
||||
|
@ -142,7 +140,6 @@ func (fs *FS) CopyPart(srcFS common.OriginFS, p common.Part) error {
|
|||
|
||||
// DownloadPart download part p from fs to w.
|
||||
func (fs *FS) DownloadPart(p common.Part, w io.Writer) error {
|
||||
p.Path = canonicalPathToLocal(p.Path)
|
||||
path := fs.path(p)
|
||||
r, err := os.Open(path)
|
||||
if err != nil {
|
||||
|
@ -198,14 +195,13 @@ func (fs *FS) mkdirAll(filePath string) error {
|
|||
}
|
||||
|
||||
func (fs *FS) path(p common.Part) string {
|
||||
return filepath.Join(fs.Dir, p.Path, fmt.Sprintf("%016X_%016X_%016X", p.FileSize, p.Offset, p.Size))
|
||||
return filepath.Join(p.LocalPath(fs.Dir), fmt.Sprintf("%016X_%016X_%016X", p.FileSize, p.Offset, p.Size))
|
||||
}
|
||||
|
||||
// DeleteFile deletes filePath at fs.
|
||||
//
|
||||
// The function does nothing if the filePath doesn't exist.
|
||||
func (fs *FS) DeleteFile(filePath string) error {
|
||||
filePath = canonicalPathToLocal(filePath)
|
||||
path := filepath.Join(fs.Dir, filePath)
|
||||
err := os.Remove(path)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
|
@ -247,6 +243,5 @@ func (fs *FS) HasFile(filePath string) (bool, error) {
|
|||
// ReadFile returns the content of filePath at fs.
|
||||
func (fs *FS) ReadFile(filePath string) ([]byte, error) {
|
||||
path := filepath.Join(fs.Dir, filePath)
|
||||
|
||||
return os.ReadFile(path)
|
||||
}
|
||||
|
|
|
@ -1,12 +0,0 @@
|
|||
//go:build freebsd || openbsd || dragonfly || netbsd
|
||||
// +build freebsd openbsd dragonfly netbsd
|
||||
|
||||
package fsremote
|
||||
|
||||
func pathToCanonical(path string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func canonicalPathToLocal(path string) string {
|
||||
return path
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package fsremote
|
||||
|
||||
func pathToCanonical(path string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func canonicalPathToLocal(path string) string {
|
||||
return path
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package fsremote
|
||||
|
||||
func pathToCanonical(path string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func canonicalPathToLocal(path string) string {
|
||||
return path
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package fsremote
|
||||
|
||||
func pathToCanonical(path string) string {
|
||||
return path
|
||||
}
|
||||
|
||||
func canonicalPathToLocal(path string) string {
|
||||
return path
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
package fsremote
|
||||
|
||||
import "strings"
|
||||
|
||||
func pathToCanonical(path string) string {
|
||||
return strings.ReplaceAll(path, "\\", "/")
|
||||
}
|
||||
|
||||
func canonicalPathToLocal(path string) string {
|
||||
return strings.ReplaceAll(path, "/", "\\")
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
|
@ -16,11 +17,13 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
||||
"github.com/VictoriaMetrics/metrics"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
|
||||
)
|
||||
|
||||
var apiServerTimeout = flag.Duration("promscrape.kubernetes.apiServerTimeout", 30*time.Minute, "How frequently to reload the full state from Kubernetes API server")
|
||||
|
@ -269,7 +272,11 @@ func selectorsKey(selectors []Selector) string {
|
|||
|
||||
var (
|
||||
groupWatchersLock sync.Mutex
|
||||
groupWatchers = make(map[string]*groupWatcher)
|
||||
groupWatchers = func() map[string]*groupWatcher {
|
||||
gws := make(map[string]*groupWatcher)
|
||||
go groupWatchersCleaner(gws)
|
||||
return gws
|
||||
}()
|
||||
|
||||
_ = metrics.NewGauge(`vm_promscrape_discovery_kubernetes_group_watchers`, func() float64 {
|
||||
groupWatchersLock.Lock()
|
||||
|
@ -279,6 +286,21 @@ var (
|
|||
})
|
||||
)
|
||||
|
||||
func groupWatchersCleaner(gws map[string]*groupWatcher) {
|
||||
for {
|
||||
time.Sleep(7 * time.Second)
|
||||
groupWatchersLock.Lock()
|
||||
for key, gw := range gws {
|
||||
gw.mu.Lock()
|
||||
if len(gw.m) == 0 {
|
||||
delete(gws, key)
|
||||
}
|
||||
gw.mu.Unlock()
|
||||
}
|
||||
groupWatchersLock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
type swosByKeyWithLock struct {
|
||||
mu sync.Mutex
|
||||
swosByKey map[string][]interface{}
|
||||
|
@ -378,31 +400,14 @@ func (gw *groupWatcher) startWatchersForRole(role string, aw *apiWatcher) {
|
|||
// This should guarantee that the ScrapeWork objects for these objects are properly updated
|
||||
// as soon as the objects they depend on are updated.
|
||||
// This should fix https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1240 .
|
||||
go func() {
|
||||
const minSleepTime = 5 * time.Second
|
||||
sleepTime := minSleepTime
|
||||
for {
|
||||
time.Sleep(sleepTime)
|
||||
startTime := time.Now()
|
||||
gw.mu.Lock()
|
||||
if uw.needRecreateScrapeWorks {
|
||||
uw.needRecreateScrapeWorks = false
|
||||
uw.recreateScrapeWorksLocked(uw.objectsByKey, uw.aws)
|
||||
sleepTime = time.Since(startTime)
|
||||
if sleepTime < minSleepTime {
|
||||
sleepTime = minSleepTime
|
||||
}
|
||||
}
|
||||
gw.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
go uw.recreateScrapeWorks()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// doRequest performs http request to the given requestURL.
|
||||
func (gw *groupWatcher) doRequest(requestURL string) (*http.Response, error) {
|
||||
func (gw *groupWatcher) doRequest(ctx context.Context, requestURL string) (*http.Response, error) {
|
||||
if strings.Contains(requestURL, "/apis/networking.k8s.io/v1/") && atomic.LoadUint32(&gw.useNetworkingV1Beta1) == 1 {
|
||||
// Update networking URL for old Kubernetes API, which supports only v1beta1 path.
|
||||
requestURL = strings.Replace(requestURL, "/apis/networking.k8s.io/v1/", "/apis/networking.k8s.io/v1beta1/", 1)
|
||||
|
@ -411,7 +416,7 @@ func (gw *groupWatcher) doRequest(requestURL string) (*http.Response, error) {
|
|||
// Update discovery URL for old Kubernetes API, which supports only v1beta1 path.
|
||||
requestURL = strings.Replace(requestURL, "/apis/discovery.k8s.io/v1/", "/apis/discovery.k8s.io/v1beta1/", 1)
|
||||
}
|
||||
req, err := http.NewRequest(http.MethodGet, requestURL, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, requestURL, nil)
|
||||
if err != nil {
|
||||
logger.Fatalf("cannot create a request for %q: %s", requestURL, err)
|
||||
}
|
||||
|
@ -423,11 +428,11 @@ func (gw *groupWatcher) doRequest(requestURL string) (*http.Response, error) {
|
|||
if resp.StatusCode == http.StatusNotFound {
|
||||
if strings.Contains(requestURL, "/apis/networking.k8s.io/v1/") && atomic.LoadUint32(&gw.useNetworkingV1Beta1) == 0 {
|
||||
atomic.StoreUint32(&gw.useNetworkingV1Beta1, 1)
|
||||
return gw.doRequest(requestURL)
|
||||
return gw.doRequest(ctx, requestURL)
|
||||
}
|
||||
if strings.Contains(requestURL, "/apis/discovery.k8s.io/v1/") && atomic.LoadUint32(&gw.useDiscoveryV1Beta1) == 0 {
|
||||
atomic.StoreUint32(&gw.useDiscoveryV1Beta1, 1)
|
||||
return gw.doRequest(requestURL)
|
||||
return gw.doRequest(ctx, requestURL)
|
||||
}
|
||||
}
|
||||
return resp, nil
|
||||
|
@ -446,6 +451,9 @@ func (gw *groupWatcher) unsubscribeAPIWatcher(aw *apiWatcher) {
|
|||
defer gw.mu.Unlock()
|
||||
for _, uw := range gw.m {
|
||||
uw.unsubscribeAPIWatcherLocked(aw)
|
||||
if len(uw.aws)+len(uw.awsPending) == 0 {
|
||||
time.AfterFunc(10*time.Second, uw.stopIfNoUsers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -458,6 +466,9 @@ type urlWatcher struct {
|
|||
apiURL string
|
||||
gw *groupWatcher
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
parseObject parseObjectFunc
|
||||
parseObjectList parseObjectListFunc
|
||||
|
||||
|
@ -488,11 +499,16 @@ type urlWatcher struct {
|
|||
func newURLWatcher(role, apiURL string, gw *groupWatcher) *urlWatcher {
|
||||
parseObject, parseObjectList := getObjectParsersForRole(role)
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_url_watchers{role=%q}`, role)).Inc()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
uw := &urlWatcher{
|
||||
role: role,
|
||||
apiURL: apiURL,
|
||||
gw: gw,
|
||||
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
|
||||
parseObject: parseObject,
|
||||
parseObjectList: parseObjectList,
|
||||
|
||||
|
@ -510,6 +526,44 @@ func newURLWatcher(role, apiURL string, gw *groupWatcher) *urlWatcher {
|
|||
return uw
|
||||
}
|
||||
|
||||
func (uw *urlWatcher) stopIfNoUsers() {
|
||||
gw := uw.gw
|
||||
gw.mu.Lock()
|
||||
if len(uw.aws)+len(uw.awsPending) == 0 {
|
||||
uw.cancel()
|
||||
delete(gw.m, uw.apiURL)
|
||||
}
|
||||
gw.mu.Unlock()
|
||||
}
|
||||
|
||||
func (uw *urlWatcher) recreateScrapeWorks() {
|
||||
const minSleepTime = 5 * time.Second
|
||||
sleepTime := minSleepTime
|
||||
gw := uw.gw
|
||||
stopCh := uw.ctx.Done()
|
||||
for {
|
||||
t := timerpool.Get(sleepTime)
|
||||
select {
|
||||
case <-stopCh:
|
||||
timerpool.Put(t)
|
||||
return
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
}
|
||||
startTime := time.Now()
|
||||
gw.mu.Lock()
|
||||
if uw.needRecreateScrapeWorks {
|
||||
uw.needRecreateScrapeWorks = false
|
||||
uw.recreateScrapeWorksLocked(uw.objectsByKey, uw.aws)
|
||||
sleepTime = time.Since(startTime)
|
||||
if sleepTime < minSleepTime {
|
||||
sleepTime = minSleepTime
|
||||
}
|
||||
}
|
||||
gw.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (uw *urlWatcher) subscribeAPIWatcherLocked(aw *apiWatcher) {
|
||||
if _, ok := uw.aws[aw]; !ok {
|
||||
if _, ok := uw.awsPending[aw]; !ok {
|
||||
|
@ -587,9 +641,11 @@ func (uw *urlWatcher) reloadObjects() string {
|
|||
// and https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4855 .
|
||||
delimiter := getQueryArgsDelimiter(apiURL)
|
||||
requestURL := apiURL + delimiter + "resourceVersion=0&resourceVersionMatch=NotOlderThan"
|
||||
resp, err := uw.gw.doRequest(requestURL)
|
||||
resp, err := uw.gw.doRequest(uw.ctx, requestURL)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot perform request to %q: %s", requestURL, err)
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Errorf("cannot perform request to %q: %s", requestURL, err)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
|
@ -653,10 +709,18 @@ func (uw *urlWatcher) reloadObjects() string {
|
|||
//
|
||||
// See https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes
|
||||
func (uw *urlWatcher) watchForUpdates() {
|
||||
stopCh := uw.ctx.Done()
|
||||
backoffDelay := time.Second
|
||||
maxBackoffDelay := 30 * time.Second
|
||||
backoffSleep := func() {
|
||||
time.Sleep(backoffDelay)
|
||||
t := timerpool.Get(backoffDelay)
|
||||
select {
|
||||
case <-stopCh:
|
||||
timerpool.Put(t)
|
||||
return
|
||||
case <-t.C:
|
||||
timerpool.Put(t)
|
||||
}
|
||||
backoffDelay *= 2
|
||||
if backoffDelay > maxBackoffDelay {
|
||||
backoffDelay = maxBackoffDelay
|
||||
|
@ -667,16 +731,26 @@ func (uw *urlWatcher) watchForUpdates() {
|
|||
timeoutSeconds := time.Duration(0.9 * float64(uw.gw.client.Timeout)).Seconds()
|
||||
apiURL += delimiter + "watch=1&allowWatchBookmarks=true&timeoutSeconds=" + strconv.Itoa(int(timeoutSeconds))
|
||||
for {
|
||||
select {
|
||||
case <-stopCh:
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_discovery_kubernetes_url_watchers{role=%q}`, uw.role)).Dec()
|
||||
logger.Infof("stopped %s watcher for %q", uw.role, uw.apiURL)
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
resourceVersion := uw.reloadObjects()
|
||||
if resourceVersion == "" {
|
||||
backoffSleep()
|
||||
continue
|
||||
}
|
||||
requestURL := apiURL + "&resourceVersion=" + url.QueryEscape(resourceVersion)
|
||||
resp, err := uw.gw.doRequest(requestURL)
|
||||
resp, err := uw.gw.doRequest(uw.ctx, requestURL)
|
||||
if err != nil {
|
||||
logger.Errorf("cannot perform request to %q: %s", requestURL, err)
|
||||
backoffSleep()
|
||||
if !errors.Is(err, context.Canceled) {
|
||||
logger.Errorf("cannot perform request to %q: %s", requestURL, err)
|
||||
backoffSleep()
|
||||
}
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
|
|
|
@ -1695,6 +1695,8 @@ func (db *indexDB) getTSIDsFromMetricIDs(qt *querytracer.Tracer, metricIDs []uin
|
|||
if len(metricIDs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Search for TSIDs in the current indexdb
|
||||
tsids := make([]TSID, len(metricIDs))
|
||||
var extMetricIDs []uint64
|
||||
i := 0
|
||||
|
@ -1719,13 +1721,10 @@ func (db *indexDB) getTSIDsFromMetricIDs(qt *querytracer.Tracer, metricIDs []uin
|
|||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if err := is.getTSIDByMetricID(tsid, metricID); err != nil {
|
||||
if err == io.EOF {
|
||||
// Postpone searching for the metricID in the extDB.
|
||||
extMetricIDs = append(extMetricIDs, metricID)
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("cannot find tsid %d out of %d for metricID %d: %w", i, len(metricIDs), metricID, err)
|
||||
if !is.getTSIDByMetricID(tsid, metricID) {
|
||||
// Postpone searching for the missing metricID in the extDB.
|
||||
extMetricIDs = append(extMetricIDs, metricID)
|
||||
continue
|
||||
}
|
||||
is.db.putToMetricIDCache(metricID, tsid)
|
||||
i++
|
||||
|
@ -1738,40 +1737,37 @@ func (db *indexDB) getTSIDsFromMetricIDs(qt *querytracer.Tracer, metricIDs []uin
|
|||
tsidsFound := i
|
||||
qt.Printf("found %d tsids for %d metricIDs in the current indexdb", tsidsFound, len(metricIDs))
|
||||
|
||||
// Search for extMetricIDs in the extDB.
|
||||
db.doExtDB(func(extDB *indexDB) {
|
||||
is := extDB.getIndexSearch(deadline)
|
||||
defer extDB.putIndexSearch(is)
|
||||
for loopsPaceLimiter, metricID := range extMetricIDs {
|
||||
if loopsPaceLimiter&paceLimiterSlowIterationsMask == 0 {
|
||||
if err = checkSearchDeadlineAndPace(is.deadline); err != nil {
|
||||
return
|
||||
if len(extMetricIDs) > 0 {
|
||||
// Search for extMetricIDs in the previous indexdb (aka extDB)
|
||||
db.doExtDB(func(extDB *indexDB) {
|
||||
is := extDB.getIndexSearch(deadline)
|
||||
defer extDB.putIndexSearch(is)
|
||||
for loopsPaceLimiter, metricID := range extMetricIDs {
|
||||
if loopsPaceLimiter&paceLimiterSlowIterationsMask == 0 {
|
||||
if err = checkSearchDeadlineAndPace(is.deadline); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// There is no need in searching for TSIDs in MetricID->TSID cache, since
|
||||
// this has been already done in the loop above (the MetricID->TSID cache is global).
|
||||
tsid := &tsids[i]
|
||||
if err = is.getTSIDByMetricID(tsid, metricID); err != nil {
|
||||
if err == io.EOF {
|
||||
// There is no need in searching for TSIDs in MetricID->TSID cache, since
|
||||
// this has been already done in the loop above (the MetricID->TSID cache is global).
|
||||
tsid := &tsids[i]
|
||||
if !is.getTSIDByMetricID(tsid, metricID) {
|
||||
// Cannot find TSID for the given metricID.
|
||||
// This may be the case on incomplete indexDB
|
||||
// due to snapshot or due to unflushed entries.
|
||||
// Just increment errors counter and skip it for now.
|
||||
atomic.AddUint64(&is.db.missingTSIDsForMetricID, 1)
|
||||
err = nil
|
||||
continue
|
||||
}
|
||||
err = fmt.Errorf("cannot find tsid for metricID=%d: %w", metricID, err)
|
||||
return
|
||||
is.db.putToMetricIDCache(metricID, tsid)
|
||||
i++
|
||||
}
|
||||
is.db.putToMetricIDCache(metricID, tsid)
|
||||
i++
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when searching for TSIDs by metricIDs in the previous indexdb: %w", err)
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error when searching for TSIDs by metricIDs in the previous indexdb: %w", err)
|
||||
qt.Printf("found %d tsids for %d metricIDs in the previous indexdb", i-tsidsFound, len(extMetricIDs))
|
||||
}
|
||||
qt.Printf("found %d tsids for %d metricIDs in the previous indexdb", i-tsidsFound, len(extMetricIDs))
|
||||
|
||||
tsids = tsids[:i]
|
||||
qt.Printf("load %d tsids for %d metricIDs from both current and previous indexdb", len(tsids), len(metricIDs))
|
||||
|
@ -1884,7 +1880,7 @@ func (is *indexSearch) containsTimeRange(tr TimeRange) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
func (is *indexSearch) getTSIDByMetricID(dst *TSID, metricID uint64) error {
|
||||
func (is *indexSearch) getTSIDByMetricID(dst *TSID, metricID uint64) bool {
|
||||
// There is no need in checking for deleted metricIDs here, since they
|
||||
// must be checked by the caller.
|
||||
ts := &is.ts
|
||||
|
@ -1893,19 +1889,19 @@ func (is *indexSearch) getTSIDByMetricID(dst *TSID, metricID uint64) error {
|
|||
kb.B = encoding.MarshalUint64(kb.B, metricID)
|
||||
if err := ts.FirstItemWithPrefix(kb.B); err != nil {
|
||||
if err == io.EOF {
|
||||
return err
|
||||
return false
|
||||
}
|
||||
return fmt.Errorf("error when searching TSID by metricID; searchPrefix %q: %w", kb.B, err)
|
||||
logger.Panicf("FATAL: error when searching TSID by metricID=%d; searchPrefix %q: %s", metricID, kb.B, err)
|
||||
}
|
||||
v := ts.Item[len(kb.B):]
|
||||
tail, err := dst.Unmarshal(v)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot unmarshal TSID=%X: %w", v, err)
|
||||
logger.Panicf("FATAL: cannot unmarshal the found TSID=%X for metricID=%d: %s", v, metricID, err)
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
return fmt.Errorf("unexpected non-zero tail left after unmarshaling TSID: %X", tail)
|
||||
logger.Panicf("FATAL: unexpected non-zero tail left after unmarshaling TSID for metricID=%d: %X", metricID, tail)
|
||||
}
|
||||
return nil
|
||||
return true
|
||||
}
|
||||
|
||||
// updateMetricIDsByMetricNameMatch matches metricName values for the given srcMetricIDs against tfs
|
||||
|
|
|
@ -936,29 +936,25 @@ func (pt *partition) ForceMergeAllParts() error {
|
|||
// Nothing to merge.
|
||||
return nil
|
||||
}
|
||||
for {
|
||||
// Check whether there is enough disk space for merging pws.
|
||||
newPartSize := getPartsSize(pws)
|
||||
maxOutBytes := fs.MustGetFreeSpace(pt.bigPartsPath)
|
||||
if newPartSize > maxOutBytes {
|
||||
freeSpaceNeededBytes := newPartSize - maxOutBytes
|
||||
forceMergeLogger.Warnf("cannot initiate force merge for the partition %s; additional space needed: %d bytes", pt.name, freeSpaceNeededBytes)
|
||||
pt.releasePartsToMerge(pws)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If len(pws) == 1, then the merge must run anyway.
|
||||
// This allows applying the configured retention, removing the deleted series
|
||||
// and performing de-duplication if needed.
|
||||
if err := pt.mergePartsOptimal(pws, pt.stopCh); err != nil {
|
||||
return fmt.Errorf("cannot force merge %d parts from partition %q: %w", len(pws), pt.name, err)
|
||||
}
|
||||
pws = pt.getAllPartsForMerge()
|
||||
if len(pws) <= 1 {
|
||||
pt.releasePartsToMerge(pws)
|
||||
return nil
|
||||
}
|
||||
// Check whether there is enough disk space for merging pws.
|
||||
newPartSize := getPartsSize(pws)
|
||||
maxOutBytes := fs.MustGetFreeSpace(pt.bigPartsPath)
|
||||
if newPartSize > maxOutBytes {
|
||||
freeSpaceNeededBytes := newPartSize - maxOutBytes
|
||||
forceMergeLogger.Warnf("cannot initiate force merge for the partition %s; additional space needed: %d bytes", pt.name, freeSpaceNeededBytes)
|
||||
pt.releasePartsToMerge(pws)
|
||||
return nil
|
||||
}
|
||||
|
||||
// If len(pws) == 1, then the merge must run anyway.
|
||||
// This allows applying the configured retention, removing the deleted series
|
||||
// and performing de-duplication if needed.
|
||||
if err := pt.mergePartsOptimal(pws, pt.stopCh); err != nil {
|
||||
return fmt.Errorf("cannot force merge %d parts from partition %q: %w", len(pws), pt.name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var forceMergeLogger = logger.WithThrottler("forceMerge", time.Minute)
|
||||
|
|
|
@ -1156,7 +1156,7 @@ func (s *Storage) prefetchMetricNames(qt *querytracer.Tracer, srcMetricIDs []uin
|
|||
qt.Printf("%d out of %d metric names must be pre-fetched", len(metricIDs), len(srcMetricIDs))
|
||||
if len(metricIDs) < 500 {
|
||||
// It is cheaper to skip pre-fetching and obtain metricNames inline.
|
||||
qt.Printf("skip pre-fetching metric names for low number of metrid ids=%d", len(metricIDs))
|
||||
qt.Printf("skip pre-fetching metric names for low number of metric ids=%d", len(metricIDs))
|
||||
return nil
|
||||
}
|
||||
atomic.AddUint64(&s.slowMetricNameLoads, uint64(len(metricIDs)))
|
||||
|
|
3
vendor/github.com/VictoriaMetrics/metricsql/aggr.go
generated
vendored
3
vendor/github.com/VictoriaMetrics/metricsql/aggr.go
generated
vendored
|
@ -43,7 +43,8 @@ var aggrFuncs = map[string]bool{
|
|||
"zscore": true,
|
||||
}
|
||||
|
||||
func isAggrFunc(s string) bool {
|
||||
// IsAggrFunc returns whether funcName is a known aggregate function.
|
||||
func IsAggrFunc(s string) bool {
|
||||
s = strings.ToLower(s)
|
||||
return aggrFuncs[s]
|
||||
}
|
||||
|
|
2
vendor/github.com/VictoriaMetrics/metricsql/optimizer.go
generated
vendored
2
vendor/github.com/VictoriaMetrics/metricsql/optimizer.go
generated
vendored
|
@ -362,7 +362,7 @@ func getFuncArgIdxForOptimization(funcName string, args []Expr) int {
|
|||
if IsTransformFunc(funcName) {
|
||||
return getTransformArgIdxForOptimization(funcName, args)
|
||||
}
|
||||
if isAggrFunc(funcName) {
|
||||
if IsAggrFunc(funcName) {
|
||||
return getAggrArgIdxForOptimization(funcName, args)
|
||||
}
|
||||
return -1
|
||||
|
|
9
vendor/github.com/VictoriaMetrics/metricsql/parser.go
generated
vendored
9
vendor/github.com/VictoriaMetrics/metricsql/parser.go
generated
vendored
|
@ -31,6 +31,9 @@ func Parse(s string) (Expr, error) {
|
|||
}
|
||||
e = removeParensExpr(e)
|
||||
e = simplifyConstants(e)
|
||||
if err := checkSupportedFunctions(e); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
|
@ -596,7 +599,7 @@ func (p *parser) parseParensExpr() (*parensExpr, error) {
|
|||
}
|
||||
|
||||
func (p *parser) parseAggrFuncExpr() (*AggrFuncExpr, error) {
|
||||
if !isAggrFunc(p.lex.Token) {
|
||||
if !IsAggrFunc(p.lex.Token) {
|
||||
return nil, fmt.Errorf(`AggrFuncExpr: unexpected token %q; want aggregate func`, p.lex.Token)
|
||||
}
|
||||
|
||||
|
@ -1608,7 +1611,7 @@ func (p *parser) parseIdentExpr() (Expr, error) {
|
|||
}
|
||||
if isIdentPrefix(p.lex.Token) {
|
||||
p.lex.Prev()
|
||||
if isAggrFunc(p.lex.Token) {
|
||||
if IsAggrFunc(p.lex.Token) {
|
||||
return p.parseAggrFuncExpr()
|
||||
}
|
||||
return p.parseMetricExpr()
|
||||
|
@ -1620,7 +1623,7 @@ func (p *parser) parseIdentExpr() (Expr, error) {
|
|||
switch p.lex.Token {
|
||||
case "(":
|
||||
p.lex.Prev()
|
||||
if isAggrFunc(p.lex.Token) {
|
||||
if IsAggrFunc(p.lex.Token) {
|
||||
return p.parseAggrFuncExpr()
|
||||
}
|
||||
return p.parseFuncExpr()
|
||||
|
|
42
vendor/github.com/VictoriaMetrics/metricsql/utils.go
generated
vendored
42
vendor/github.com/VictoriaMetrics/metricsql/utils.go
generated
vendored
|
@ -1,5 +1,10 @@
|
|||
package metricsql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ExpandWithExprs expands WITH expressions inside q and returns the resulting
|
||||
// PromQL without WITH expressions.
|
||||
func ExpandWithExprs(q string) (string, error) {
|
||||
|
@ -14,7 +19,7 @@ func ExpandWithExprs(q string) (string, error) {
|
|||
// VisitAll recursively calls f for all the Expr children in e.
|
||||
//
|
||||
// It visits leaf children at first and then visits parent nodes.
|
||||
// It is safe modifying expr in f.
|
||||
// It is safe modifying e in f.
|
||||
func VisitAll(e Expr, f func(expr Expr)) {
|
||||
switch expr := e.(type) {
|
||||
case *BinaryOpExpr:
|
||||
|
@ -36,3 +41,38 @@ func VisitAll(e Expr, f func(expr Expr)) {
|
|||
}
|
||||
f(e)
|
||||
}
|
||||
|
||||
// IsSupportedFunction returns true if funcName contains supported MetricsQL function
|
||||
func IsSupportedFunction(funcName string) bool {
|
||||
funcName = strings.ToLower(funcName)
|
||||
if IsRollupFunc(funcName) {
|
||||
return true
|
||||
}
|
||||
if IsTransformFunc(funcName) {
|
||||
return true
|
||||
}
|
||||
if IsAggrFunc(funcName) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func checkSupportedFunctions(e Expr) error {
|
||||
var err error
|
||||
VisitAll(e, func(expr Expr) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
switch t := expr.(type) {
|
||||
case *FuncExpr:
|
||||
if !IsRollupFunc(t.Name) && !IsTransformFunc(t.Name) {
|
||||
err = fmt.Errorf("unsupported function %q", t.Name)
|
||||
}
|
||||
case *AggrFuncExpr:
|
||||
if !IsAggrFunc(t.Name) {
|
||||
err = fmt.Errorf("unsupported aggregate function %q", t.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
|
2
vendor/modules.txt
vendored
2
vendor/modules.txt
vendored
|
@ -99,7 +99,7 @@ github.com/VictoriaMetrics/fasthttp/stackless
|
|||
# github.com/VictoriaMetrics/metrics v1.24.0
|
||||
## explicit; go 1.20
|
||||
github.com/VictoriaMetrics/metrics
|
||||
# github.com/VictoriaMetrics/metricsql v0.64.0
|
||||
# github.com/VictoriaMetrics/metricsql v0.65.0
|
||||
## explicit; go 1.13
|
||||
github.com/VictoriaMetrics/metricsql
|
||||
github.com/VictoriaMetrics/metricsql/binaryop
|
||||
|
|
Loading…
Reference in a new issue