Merge branch 'public-single-node' into pmm-6401-read-prometheus-data-files

This commit is contained in:
Aliaksandr Valialkin 2023-03-20 20:39:18 -07:00
commit f42572e049
No known key found for this signature in database
GPG key ID: A72BEC6CD3D0DED1
147 changed files with 4125 additions and 3724 deletions

View file

@ -55,7 +55,7 @@ jobs:
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: 1.20.2
check-latest: true

View file

@ -30,7 +30,7 @@ jobs:
uses: actions/checkout@v3
- name: Setup Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: 1.20.2
check-latest: true
@ -54,7 +54,7 @@ jobs:
uses: actions/checkout@v3
- name: Setup Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: 1.20.2
check-latest: true
@ -79,7 +79,7 @@ jobs:
- name: Setup Go
id: go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: 1.20.2
check-latest: true

View file

@ -186,7 +186,8 @@ release-victoria-metrics: \
release-victoria-metrics-darwin-amd64 \
release-victoria-metrics-darwin-arm64 \
release-victoria-metrics-freebsd-amd64 \
release-victoria-metrics-openbsd-amd64
release-victoria-metrics-openbsd-amd64 \
release-victoria-metrics-windows-amd64
# adds i386 arch
release-victoria-metrics-linux-386:
@ -213,6 +214,9 @@ release-victoria-metrics-freebsd-amd64:
release-victoria-metrics-openbsd-amd64:
GOOS=openbsd GOARCH=amd64 $(MAKE) release-victoria-metrics-goos-goarch
release-victoria-metrics-windows-amd64:
GOARCH=amd64 $(MAKE) release-victoria-metrics-windows-goarch
release-victoria-metrics-goos-goarch: victoria-metrics-$(GOOS)-$(GOARCH)-prod
cd bin && \
tar --transform="flags=r;s|-$(GOOS)-$(GOARCH)||" -czf victoria-metrics-$(GOOS)-$(GOARCH)-$(PKG_TAG).tar.gz \
@ -222,6 +226,16 @@ release-victoria-metrics-goos-goarch: victoria-metrics-$(GOOS)-$(GOARCH)-prod
| sed s/-$(GOOS)-$(GOARCH)-prod/-prod/ > victoria-metrics-$(GOOS)-$(GOARCH)-$(PKG_TAG)_checksums.txt
cd bin && rm -rf victoria-metrics-$(GOOS)-$(GOARCH)-prod
release-victoria-metrics-windows-goarch: victoria-metrics-windows-$(GOARCH)-prod
cd bin && \
zip victoria-metrics-windows-$(GOARCH)-$(PKG_TAG).zip \
victoria-metrics-windows-$(GOARCH)-prod.exe \
&& sha256sum victoria-metrics-windows-$(GOARCH)-$(PKG_TAG).zip \
victoria-metrics-windows-$(GOARCH)-prod.exe \
> victoria-metrics-windows-$(GOARCH)-$(PKG_TAG)_checksums.txt
cd bin && rm -rf \
victoria-metrics-windows-$(GOARCH)-prod.exe
release-vmutils: \
release-vmutils-linux-386 \
release-vmutils-linux-amd64 \
@ -314,7 +328,6 @@ release-vmutils-windows-goarch: \
vmauth-windows-$(GOARCH)-prod.exe \
vmctl-windows-$(GOARCH)-prod.exe
pprof-cpu:
go tool pprof -trim_path=github.com/VictoriaMetrics/VictoriaMetrics@ $(PPROF_FILE)

View file

@ -201,7 +201,7 @@ Changing scrape configuration is possible with text editor:
vi $SNAP_DATA/var/snap/victoriametrics/current/etc/victoriametrics-scrape-config.yaml
```
After changes were made, trigger config re-read with the command `curl 127.0.0.1:8248/-/reload`.
After changes were made, trigger config re-read with the command `curl 127.0.0.1:8428/-/reload`.
## Prometheus setup
@ -1447,12 +1447,14 @@ can be configured with the `-inmemoryDataFlushInterval` command-line flag (note
In-memory parts are persisted to disk into `part` directories under the `<-storageDataPath>/data/small/YYYY_MM/` folder,
where `YYYY_MM` is the month partition for the stored data. For example, `2022_11` is the partition for `parts`
with [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) from `November 2022`.
Each partition directory contains `parts.json` file with the actual list of parts in the partition.
The `part` directory has the following name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`, where:
Every `part` directory contains `metadata.json` file with the following fields:
- `rowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part
- `blocksCount` - the number of blocks stored in the part (see details about blocks below)
- `minTimestamp` and `maxTimestamp` - minimum and maximum timestamps across raw samples stored in the part
- `RowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part
- `BlocksCount` - the number of blocks stored in the part (see details about blocks below)
- `MinTimestamp` and `MaxTimestamp` - minimum and maximum timestamps across raw samples stored in the part
- `MinDedupInterval` - the [deduplication interval](#deduplication) applied to the given part.
Each `part` consists of `blocks` sorted by internal time series id (aka `TSID`).
Each `block` contains up to 8K [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples),
@ -1474,9 +1476,8 @@ for fast block lookups, which belong to the given `TSID` and cover the given tim
and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge
Newly added `parts` either successfully appear in the storage or fail to appear.
The newly added `parts` are being created in a temporary directory under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` folder.
When the newly added `part` is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html)
to a temporary directory, then it is atomically moved to the storage directory.
The newly added `part` is atomically registered in the `parts.json` file under the corresponding partition
after it is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html) to the storage.
Thanks to this alogrithm, storage never contains partially created parts, even if hardware power off
occurrs in the middle of writing the `part` to disk - such incompletely written `parts`
are automatically deleted on the next VictoriaMetrics start.
@ -1505,8 +1506,7 @@ Retention is configured with the `-retentionPeriod` command-line flag, which tak
Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
Data partitions outside the configured retention are deleted on the first day of the new month.
Each partition consists of one or more data parts with the following name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`.
Data parts outside of the configured retention are eventually deleted during
Each partition consists of one or more data parts. Data parts outside of the configured retention are eventually deleted during
[background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
The maximum disk space usage for a given `-retentionPeriod` is going to be (`-retentionPeriod` + 1) months.

View file

@ -4,10 +4,10 @@
| Version | Supported |
|---------|--------------------|
| 1.81.x | :white_check_mark: |
| 1.80.x | :x: |
| 1.79.x | :white_check_mark: |
| < 1.78 | :x: |
| [latest release](https://docs.victoriametrics.com/CHANGELOG.html) | :white_check_mark: |
| v1.87.x LTS release | :white_check_mark: |
| v1.79.x LTS release | :white_check_mark: |
| other releases | :x: |
## Reporting a Vulnerability

View file

@ -39,6 +39,9 @@ victoria-metrics-freebsd-amd64-prod:
victoria-metrics-openbsd-amd64-prod:
APP_NAME=victoria-metrics $(MAKE) app-via-docker-openbsd-amd64
victoria-metrics-windows-amd64-prod:
APP_NAME=victoria-metrics $(MAKE) app-via-docker-windows-amd64
package-victoria-metrics:
APP_NAME=victoria-metrics $(MAKE) package-via-docker
@ -100,6 +103,9 @@ victoria-metrics-freebsd-amd64:
victoria-metrics-openbsd-amd64:
APP_NAME=victoria-metrics CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-local-goos-goarch
victoria-metrics-windows-amd64:
GOARCH=amd64 APP_NAME=victoria-metrics $(MAKE) app-local-windows-goarch
victoria-metrics-pure:
APP_NAME=victoria-metrics $(MAKE) app-local-pure

View file

@ -10,9 +10,9 @@ import (
"gopkg.in/yaml.v2"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config/log"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/utils"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
)
@ -199,9 +199,17 @@ func (r *Rule) Validate() error {
// ValidateTplFn must validate the given annotations
type ValidateTplFn func(annotations map[string]string) error
// cLogger is a logger with support of logs suppressing.
// it is used when logs emitted by config package needs
// to be suppressed.
var cLogger = &log.Logger{}
// ParseSilent parses rule configs from given file patterns without emitting logs
func ParseSilent(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressions bool) ([]Group, error) {
files, err := readFromFS(pathPatterns, true)
cLogger.Suppress(true)
defer cLogger.Suppress(false)
files, err := readFromFS(pathPatterns)
if err != nil {
return nil, fmt.Errorf("failed to read from the config: %s", err)
}
@ -210,7 +218,7 @@ func ParseSilent(pathPatterns []string, validateTplFn ValidateTplFn, validateExp
// Parse parses rule configs from given file patterns
func Parse(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressions bool) ([]Group, error) {
files, err := readFromFS(pathPatterns, false)
files, err := readFromFS(pathPatterns)
if err != nil {
return nil, fmt.Errorf("failed to read from the config: %s", err)
}
@ -219,7 +227,7 @@ func Parse(pathPatterns []string, validateTplFn ValidateTplFn, validateExpressio
return nil, fmt.Errorf("failed to parse %s: %s", pathPatterns, err)
}
if len(groups) < 1 {
logger.Warnf("no groups found in %s", strings.Join(pathPatterns, ";"))
cLogger.Warnf("no groups found in %s", strings.Join(pathPatterns, ";"))
}
return groups, nil
}

View file

@ -4,9 +4,9 @@ import (
"fmt"
"strings"
"sync"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config/fslocal"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
// FS represent a file system abstract for reading files.
@ -36,10 +36,9 @@ var (
// readFromFS returns an error if at least one FS failed to init.
// The function can be called multiple times but each unique path
// will be inited only once.
// If silent == true, readFromFS will not emit any logs.
//
// It is allowed to mix different FS types in path list.
func readFromFS(paths []string, silent bool) (map[string][]byte, error) {
func readFromFS(paths []string) (map[string][]byte, error) {
var err error
result := make(map[string][]byte)
for _, path := range paths {
@ -65,18 +64,19 @@ func readFromFS(paths []string, silent bool) (map[string][]byte, error) {
return nil, fmt.Errorf("failed to list files from %q", fs)
}
if !silent {
logger.Infof("found %d files to read from %q", len(list), fs)
}
cLogger.Infof("found %d files to read from %q", len(list), fs)
if len(list) < 1 {
continue
}
ts := time.Now()
files, err := fs.Read(list)
if err != nil {
return nil, fmt.Errorf("error while reading files from %q: %w", fs, err)
}
cLogger.Infof("finished reading %d files in %v from %q", len(list), time.Since(ts), fs)
for k, v := range files {
if _, ok := result[k]; ok {
return nil, fmt.Errorf("duplicate found for file name %q: file names must be unique", k)

View file

@ -0,0 +1,59 @@
package log
import (
"sync"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
// Logger is using lib/logger for logging
// but can be suppressed via Suppress method
type Logger struct {
mu sync.RWMutex
disabled bool
}
// Suppress whether to ignore message logging.
// Once suppressed, logging continues to be ignored
// until logger is un-suppressed.
func (l *Logger) Suppress(v bool) {
l.mu.Lock()
l.disabled = v
l.mu.Unlock()
}
func (l *Logger) isDisabled() bool {
l.mu.RLock()
defer l.mu.RUnlock()
return l.disabled
}
// Errorf logs error message.
func (l *Logger) Errorf(format string, args ...interface{}) {
if l.isDisabled() {
return
}
logger.Errorf(format, args...)
}
// Warnf logs warning message.
func (l *Logger) Warnf(format string, args ...interface{}) {
if l.isDisabled() {
return
}
logger.Warnf(format, args...)
}
// Infof logs info message.
func (l *Logger) Infof(format string, args ...interface{}) {
if l.isDisabled() {
return
}
logger.Infof(format, args...)
}
// Panicf logs panic message and panics.
// Panicf can't be suppressed
func (l *Logger) Panicf(format string, args ...interface{}) {
logger.Panicf(format, args...)
}

View file

@ -0,0 +1,54 @@
package log
import (
"bytes"
"fmt"
"strings"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
func TestOutput(t *testing.T) {
testOutput := &bytes.Buffer{}
logger.SetOutputForTests(testOutput)
defer logger.ResetOutputForTest()
log := &Logger{}
mustMatch := func(exp string) {
t.Helper()
if exp == "" {
if testOutput.String() != "" {
t.Errorf("expected output to be empty; got %q", testOutput.String())
return
}
}
if !strings.Contains(testOutput.String(), exp) {
t.Errorf("output %q should contain %q", testOutput.String(), exp)
}
fmt.Println(testOutput.String())
testOutput.Reset()
}
log.Warnf("foo")
mustMatch("foo")
log.Infof("info %d", 2)
mustMatch("info 2")
log.Errorf("error %s %d", "baz", 5)
mustMatch("error baz 5")
log.Suppress(true)
log.Warnf("foo")
mustMatch("")
log.Infof("info %d", 2)
mustMatch("")
log.Errorf("error %q %d", "baz", 5)
mustMatch("")
}

View file

@ -319,6 +319,7 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
// init reload metrics with positive values to improve alerting conditions
configSuccess.Set(1)
configTimestamp.Set(fasttime.UnixTimestamp())
parseFn := config.Parse
for {
select {
case <-ctx.Done():
@ -330,7 +331,11 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
}
logger.Infof("SIGHUP received. Going to reload rules %q %s...", *rulePath, tmplMsg)
configReloads.Inc()
// allow logs emitting during manual config reload
parseFn = config.Parse
case <-configCheckCh:
// disable logs emitting during per-interval config reload
parseFn = config.ParseSilent
}
if err := notifier.Reload(); err != nil {
configReloadErrors.Inc()
@ -345,7 +350,7 @@ func configReload(ctx context.Context, m *manager, groupsCfg []config.Group, sig
logger.Errorf("failed to load new templates: %s", err)
continue
}
newGroupsCfg, err := config.ParseSilent(*rulePath, validateTplFn, *validateExpressions)
newGroupsCfg, err := parseFn(*rulePath, validateTplFn, *validateExpressions)
if err != nil {
configReloadErrors.Inc()
configSuccess.Set(0)

View file

@ -111,11 +111,7 @@ func (a *Alert) ExecTemplate(q templates.QueryFn, labels, annotations map[string
ActiveAt: a.ActiveAt,
For: a.For,
}
tmpl, err := templates.GetWithFuncs(templates.FuncsWithQuery(q))
if err != nil {
return nil, fmt.Errorf("error getting a template: %w", err)
}
return templateAnnotations(annotations, tplData, tmpl, true)
return ExecTemplate(q, annotations, tplData)
}
// ExecTemplate executes the given template for given annotations map.

View file

@ -200,6 +200,9 @@ func TestAlert_ExecTemplate(t *testing.T) {
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if err := ValidateTemplates(tc.annotations); err != nil {
t.Fatal(err)
}
tpl, err := tc.alert.ExecTemplate(qFn, tc.alert.Labels, tc.annotations)
if err != nil {
t.Fatal(err)

View file

@ -81,10 +81,6 @@ var (
//
// Init returns an error if both mods are used.
func Init(gen AlertURLGenerator, extLabels map[string]string, extURL string) (func() []Notifier, error) {
if externalLabels != nil || externalURL != "" {
return nil, fmt.Errorf("BUG: notifier.Init was called multiple times")
}
externalURL = extURL
externalLabels = extLabels
eu, err := url.Parse(externalURL)

View file

@ -0,0 +1,37 @@
package notifier
import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"testing"
)
func TestInit(t *testing.T) {
oldAddrs := *addrs
defer func() { *addrs = oldAddrs }()
*addrs = flagutil.ArrayString{"127.0.0.1", "127.0.0.2"}
fn, err := Init(nil, nil, "")
if err != nil {
t.Fatalf("%s", err)
}
nfs := fn()
if len(nfs) != 2 {
t.Fatalf("expected to get 2 notifiers; got %d", len(nfs))
}
targets := GetTargets()
if targets == nil || targets[TargetStatic] == nil {
t.Fatalf("expected to get static targets in response")
}
nf1 := targets[TargetStatic][0]
if nf1.Addr() != "127.0.0.1/api/v2/alerts" {
t.Fatalf("expected to get \"127.0.0.1/api/v2/alerts\"; got %q instead", nf1.Addr())
}
nf2 := targets[TargetStatic][1]
if nf2.Addr() != "127.0.0.2/api/v2/alerts" {
t.Fatalf("expected to get \"127.0.0.2/api/v2/alerts\"; got %q instead", nf2.Addr())
}
}

View file

@ -0,0 +1,20 @@
package remotewrite
import (
"context"
"testing"
)
func TestInit(t *testing.T) {
oldAddr := *addr
defer func() { *addr = oldAddr }()
*addr = "http://localhost:8428"
cl, err := Init(context.Background())
if err != nil {
t.Fatal(err)
}
if err := cl.Close(); err != nil {
t.Fatal(err)
}
}

View file

@ -76,6 +76,20 @@ func TestTemplateFuncs(t *testing.T) {
formatting("humanize1024", float64(146521335255970361638912), "124.1Zi")
formatting("humanize1024", float64(150037847302113650318245888), "124.1Yi")
formatting("humanize1024", float64(153638755637364377925883789312), "1.271e+05Yi")
formatting("humanize", float64(127087), "127.1k")
formatting("humanize", float64(136458627186688), "136.5T")
formatting("humanizeDuration", 1, "1s")
formatting("humanizeDuration", 0.2, "200ms")
formatting("humanizeDuration", 42000, "11h 40m 0s")
formatting("humanizeDuration", 16790555, "194d 8h 2m 35s")
formatting("humanizePercentage", 1, "100%")
formatting("humanizePercentage", 0.8, "80%")
formatting("humanizePercentage", 0.015, "1.5%")
formatting("humanizeTimestamp", 1679055557, "2023-03-17 12:19:17 +0000 UTC")
}
func mkTemplate(current, replacement interface{}) textTemplate {

View file

@ -7,6 +7,7 @@ import (
"net/http/httptest"
"reflect"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
)
@ -19,9 +20,18 @@ func TestHandler(t *testing.T) {
},
state: newRuleState(10),
}
ar.state.add(ruleStateEntry{
time: time.Now(),
at: time.Now(),
samples: 10,
})
rr := &RecordingRule{
Name: "record",
state: newRuleState(10),
}
g := &Group{
Name: "group",
Rules: []Rule{ar},
Rules: []Rule{ar, rr},
}
m := &manager{groups: make(map[uint64]*Group)}
m.groups[0] = g
@ -62,6 +72,14 @@ func TestHandler(t *testing.T) {
t.Run("/vmalert/rule", func(t *testing.T) {
a := ar.ToAPI()
getResp(ts.URL+"/vmalert/"+a.WebLink(), nil, 200)
r := rr.ToAPI()
getResp(ts.URL+"/vmalert/"+r.WebLink(), nil, 200)
})
t.Run("/vmalert/alert", func(t *testing.T) {
alerts := ar.AlertsToAPI()
for _, a := range alerts {
getResp(ts.URL+"/vmalert/"+a.WebLink(), nil, 200)
}
})
t.Run("/vmalert/rule?badParam", func(t *testing.T) {
params := fmt.Sprintf("?%s=0&%s=1", paramGroupID, paramRuleID)

View file

@ -15,6 +15,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/terminal"
"github.com/urfave/cli/v2"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
@ -71,7 +72,7 @@ func main() {
}
otsdbProcessor := newOtsdbProcessor(otsdbClient, importer, c.Int(otsdbConcurrency))
return otsdbProcessor.run(c.Bool(globalSilent), c.Bool(globalVerbose))
return otsdbProcessor.run(isNonInteractive(c), c.Bool(globalVerbose))
},
},
{
@ -112,7 +113,7 @@ func main() {
c.String(influxMeasurementFieldSeparator),
c.Bool(influxSkipDatabaseLabel),
c.Bool(influxPrometheusMode))
return processor.run(c.Bool(globalSilent), c.Bool(globalVerbose))
return processor.run(isNonInteractive(c), c.Bool(globalVerbose))
},
},
{
@ -152,7 +153,7 @@ func main() {
},
cc: c.Int(remoteReadConcurrency),
}
return rmp.run(ctx, c.Bool(globalSilent), c.Bool(globalVerbose))
return rmp.run(ctx, isNonInteractive(c), c.Bool(globalVerbose))
},
},
{
@ -186,7 +187,7 @@ func main() {
im: importer,
cc: c.Int(promConcurrency),
}
return pp.run(c.Bool(globalSilent), c.Bool(globalVerbose))
return pp.run(isNonInteractive(c), c.Bool(globalVerbose))
},
},
{
@ -244,7 +245,7 @@ func main() {
backoff: backoff.New(),
cc: c.Int(vmConcurrency),
}
return p.run(ctx, c.Bool(globalSilent))
return p.run(ctx, isNonInteractive(c))
},
},
{
@ -317,3 +318,8 @@ func initConfigVM(c *cli.Context) vm.Config {
DisableProgressBar: c.Bool(vmDisableProgressBar),
}
}
func isNonInteractive(c *cli.Context) bool {
isTerminal := terminal.IsTerminal(int(os.Stdout.Fd()))
return c.Bool(globalSilent) || !isTerminal
}

View file

@ -0,0 +1,14 @@
//go:build darwin || linux || solaris
// +build darwin linux solaris
package terminal
import (
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the file descriptor is terminal
func IsTerminal(fd int) bool {
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
return err == nil
}

View file

@ -0,0 +1,8 @@
//go:build aix || linux || solaris || zos
// +build aix linux solaris zos
package terminal
import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TCGETS

View file

@ -0,0 +1,8 @@
//go:build darwin
// +build darwin
package terminal
import "golang.org/x/sys/unix"
const ioctlReadTermios = unix.TIOCGETA

View file

@ -149,40 +149,28 @@ func timeseriesWorker(qt *querytracer.Tracer, workChs []chan *timeseriesWork, wo
// Then help others with the remaining work.
rowsProcessed = 0
seriesProcessed = 0
idx := int(workerID)
for {
tsw, idxNext := stealTimeseriesWork(workChs, idx)
if tsw == nil {
// There is no more work
break
for i := uint(1); i < uint(len(workChs)); i++ {
idx := (i + workerID) % uint(len(workChs))
ch := workChs[idx]
for len(ch) > 0 {
// Give a chance other goroutines to perform their work.
runtime.Gosched()
// It is expected that every channel in the workChs is already closed,
// so the next line should return immediately.
tsw, ok := <-ch
if !ok {
break
}
tsw.err = tsw.do(&tmpResult.rs, workerID)
rowsProcessed += tsw.rowsProcessed
seriesProcessed++
}
tsw.err = tsw.do(&tmpResult.rs, workerID)
rowsProcessed += tsw.rowsProcessed
seriesProcessed++
idx = idxNext
}
qt.Printf("others work processed: series=%d, samples=%d", seriesProcessed, rowsProcessed)
putTmpResult(tmpResult)
}
func stealTimeseriesWork(workChs []chan *timeseriesWork, startIdx int) (*timeseriesWork, int) {
for i := startIdx; i < startIdx+len(workChs); i++ {
// Give a chance other goroutines to perform their work
runtime.Gosched()
idx := i % len(workChs)
ch := workChs[idx]
// It is expected that every channel in the workChs is already closed,
// so the next line should return immediately.
tsw, ok := <-ch
if ok {
return tsw, idx
}
}
return nil, startIdx
}
func getTmpResult() *result {
v := resultPool.Get()
if v == nil {
@ -208,10 +196,17 @@ type result struct {
var resultPool sync.Pool
// MaxWorkers returns the maximum number of workers netstorage can spin when calling RunParallel()
func MaxWorkers() int {
return gomaxprocs
}
var gomaxprocs = cgroup.AvailableCPUs()
// RunParallel runs f in parallel for all the results from rss.
//
// f shouldn't hold references to rs after returning.
// workerID is the id of the worker goroutine that calls f.
// workerID is the id of the worker goroutine that calls f. The workerID is in the range [0..MaxWorkers()-1].
// Data processing is immediately stopped if f returns non-nil error.
//
// rss becomes unusable after the call to RunParallel.
@ -245,7 +240,8 @@ func (rss *Results) runParallel(qt *querytracer.Tracer, f func(rs *Result, worke
tsw.f = f
tsw.mustStop = &mustStop
}
if gomaxprocs == 1 || tswsLen == 1 {
maxWorkers := MaxWorkers()
if maxWorkers == 1 || tswsLen == 1 {
// It is faster to process time series in the current goroutine.
tsw := getTimeseriesWork()
tmpResult := getTmpResult()
@ -281,8 +277,8 @@ func (rss *Results) runParallel(qt *querytracer.Tracer, f func(rs *Result, worke
// Prepare worker channels.
workers := len(tsws)
if workers > gomaxprocs {
workers = gomaxprocs
if workers > maxWorkers {
workers = maxWorkers
}
itemsPerWorker := (len(tsws) + workers - 1) / workers
workChs := make([]chan *timeseriesWork, workers)
@ -334,8 +330,6 @@ var (
seriesReadPerQuery = metrics.NewHistogram(`vm_series_read_per_query`)
)
var gomaxprocs = cgroup.AvailableCPUs()
type packedTimeseries struct {
metricName string
brs []blockRef
@ -398,37 +392,25 @@ func unpackWorker(workChs []chan *unpackWork, workerID uint) {
}
// Then help others with their work.
idx := int(workerID)
for {
upw, idxNext := stealUnpackWork(workChs, idx)
if upw == nil {
// There is no more work
break
for i := uint(1); i < uint(len(workChs)); i++ {
idx := (i + workerID) % uint(len(workChs))
ch := workChs[idx]
for len(ch) > 0 {
// Give a chance other goroutines to perform their work
runtime.Gosched()
// It is expected that every channel in the workChs is already closed,
// so the next line should return immediately.
upw, ok := <-ch
if !ok {
break
}
upw.unpack(tmpBlock)
}
upw.unpack(tmpBlock)
idx = idxNext
}
putTmpStorageBlock(tmpBlock)
}
func stealUnpackWork(workChs []chan *unpackWork, startIdx int) (*unpackWork, int) {
for i := startIdx; i < startIdx+len(workChs); i++ {
// Give a chance other goroutines to perform their work
runtime.Gosched()
idx := i % len(workChs)
ch := workChs[idx]
// It is expected that every channel in the workChs is already closed,
// so the next line should return immediately.
upw, ok := <-ch
if ok {
return upw, idx
}
}
return nil, startIdx
}
func getTmpStorageBlock() *storage.Block {
v := tmpStorageBlockPool.Get()
if v == nil {
@ -1096,7 +1078,6 @@ func ExportBlocks(qt *querytracer.Tracer, sq *storage.SearchQuery, deadline sear
indexSearchDuration.UpdateDuration(startTime)
// Start workers that call f in parallel on available CPU cores.
gomaxprocs := cgroup.AvailableCPUs()
workCh := make(chan *exportWork, gomaxprocs*8)
var (
errGlobal error

View file

@ -3,8 +3,9 @@ package promql
import (
"math"
"strings"
"sync"
"unsafe"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/metricsql"
)
@ -63,31 +64,36 @@ var incrementalAggrFuncCallbacksMap = map[string]*incrementalAggrFuncCallbacks{
},
}
type incrementalAggrContextMap struct {
m map[string]*incrementalAggrContext
// The padding prevents false sharing on widespread platforms with
// 128 mod (cache line size) = 0 .
_ [128 - unsafe.Sizeof(map[string]*incrementalAggrContext{})%128]byte
}
type incrementalAggrFuncContext struct {
ae *metricsql.AggrFuncExpr
m sync.Map
byWorkerID []incrementalAggrContextMap
callbacks *incrementalAggrFuncCallbacks
}
func newIncrementalAggrFuncContext(ae *metricsql.AggrFuncExpr, callbacks *incrementalAggrFuncCallbacks) *incrementalAggrFuncContext {
return &incrementalAggrFuncContext{
ae: ae,
callbacks: callbacks,
ae: ae,
byWorkerID: make([]incrementalAggrContextMap, netstorage.MaxWorkers()),
callbacks: callbacks,
}
}
func (iafc *incrementalAggrFuncContext) updateTimeseries(tsOrig *timeseries, workerID uint) {
v, ok := iafc.m.Load(workerID)
if !ok {
// It is safe creating and storing m in iafc.m without locking,
// since it is guaranteed that only a single goroutine can execute
// code for the given workerID at a time.
v = make(map[string]*incrementalAggrContext, 1)
iafc.m.Store(workerID, v)
v := &iafc.byWorkerID[workerID]
if v.m == nil {
v.m = make(map[string]*incrementalAggrContext, 1)
}
m := v.(map[string]*incrementalAggrContext)
m := v.m
ts := tsOrig
keepOriginal := iafc.callbacks.keepOriginal
@ -128,9 +134,9 @@ func (iafc *incrementalAggrFuncContext) updateTimeseries(tsOrig *timeseries, wor
func (iafc *incrementalAggrFuncContext) finalizeTimeseries() []*timeseries {
mGlobal := make(map[string]*incrementalAggrContext)
mergeAggrFunc := iafc.callbacks.mergeAggrFunc
iafc.m.Range(func(k, v interface{}) bool {
m := v.(map[string]*incrementalAggrContext)
for k, iac := range m {
byWorkerID := iafc.byWorkerID
for i := range byWorkerID {
for k, iac := range byWorkerID[i].m {
iacGlobal := mGlobal[k]
if iacGlobal == nil {
if iafc.ae.Limit > 0 && len(mGlobal) >= iafc.ae.Limit {
@ -142,8 +148,7 @@ func (iafc *incrementalAggrFuncContext) finalizeTimeseries() []*timeseries {
}
mergeAggrFunc(iacGlobal, iac)
}
return true
})
}
tss := make([]*timeseries, 0, len(mGlobal))
finalizeAggrFunc := iafc.callbacks.finalizeAggrFunc
for _, iac := range mGlobal {

View file

@ -8,6 +8,7 @@ import (
"sync"
"testing"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
"github.com/VictoriaMetrics/metricsql"
)
@ -99,7 +100,7 @@ func TestIncrementalAggr(t *testing.T) {
}
func testIncrementalParallelAggr(iafc *incrementalAggrFuncContext, tssSrc, tssExpected []*timeseries) error {
const workersCount = 3
workersCount := netstorage.MaxWorkers()
tsCh := make(chan *timeseries)
var wg sync.WaitGroup
wg.Add(workersCount)

View file

@ -4,6 +4,7 @@ import (
"os"
"reflect"
"testing"
"unsafe"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
)
@ -24,6 +25,26 @@ func TestMarshalTimeseriesFast(t *testing.T) {
if !reflect.DeepEqual(tss, tss2) {
t.Fatalf("unexpected timeseries unmarshaled\ngot\n%#v\nwant\n%#v", tss2[0], tss[0])
}
// Check 8-byte alignment.
// This prevents from SIGBUS error on arm architectures.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3927
for _, ts := range tss2 {
if len(ts.Values) == 0 {
continue
}
// check float64 alignment
addr := uintptr(unsafe.Pointer(&ts.Values[0]))
if mod := addr % unsafe.Alignof(ts.Values[0]); mod != 0 {
t.Fatalf("mis-aligned; &ts.Values[0]=%p; mod=%d", &ts.Values[0], mod)
}
// check int64 alignment
addr = uintptr(unsafe.Pointer(&ts.Timestamps[0]))
if mod := addr % unsafe.Alignof(ts.Timestamps[0]); mod != 0 {
t.Fatalf("mis-aligned; &ts.Timestamps[0]=%p; mod=%d", &ts.Timestamps[0], mod)
}
}
}
// Single series

View file

@ -1,14 +1,14 @@
{
"files": {
"main.css": "./static/css/main.8d8c45cf.css",
"main.js": "./static/js/main.d5e360af.js",
"main.css": "./static/css/main.69d78cc2.css",
"main.js": "./static/js/main.1be8603e.js",
"static/js/27.c1ccfd29.chunk.js": "./static/js/27.c1ccfd29.chunk.js",
"static/media/Lato-Regular.ttf": "./static/media/Lato-Regular.d714fec1633b69a9c2e9.ttf",
"static/media/Lato-Bold.ttf": "./static/media/Lato-Bold.32360ba4b57802daa4d6.ttf",
"index.html": "./index.html"
},
"entrypoints": [
"static/css/main.8d8c45cf.css",
"static/js/main.d5e360af.js"
"static/css/main.69d78cc2.css",
"static/js/main.1be8603e.js"
]
}

View file

@ -1 +1 @@
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.d5e360af.js"></script><link href="./static/css/main.8d8c45cf.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>
<!doctype html><html lang="en"><head><meta charset="utf-8"/><link rel="icon" href="./favicon.ico"/><meta name="viewport" content="width=device-width,initial-scale=1,maximum-scale=1,user-scalable=no"/><meta name="theme-color" content="#000000"/><meta name="description" content="UI for VictoriaMetrics"/><link rel="apple-touch-icon" href="./apple-touch-icon.png"/><link rel="icon" type="image/png" sizes="32x32" href="./favicon-32x32.png"><link rel="manifest" href="./manifest.json"/><title>VM UI</title><script src="./dashboards/index.js" type="module"></script><meta name="twitter:card" content="summary_large_image"><meta name="twitter:image" content="./preview.jpg"><meta name="twitter:title" content="UI for VictoriaMetrics"><meta name="twitter:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta name="twitter:site" content="@VictoriaMetrics"><meta property="og:title" content="Metric explorer for VictoriaMetrics"><meta property="og:description" content="Explore and troubleshoot your VictoriaMetrics data"><meta property="og:image" content="./preview.jpg"><meta property="og:type" content="website"><script defer="defer" src="./static/js/main.1be8603e.js"></script><link href="./static/css/main.69d78cc2.css" rel="stylesheet"></head><body><noscript>You need to enable JavaScript to run this app.</noscript><div id="root"></div></body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -18,6 +18,7 @@
"@types/marked": "^4.0.2",
"@types/node": "^17.0.21",
"@types/qs": "^6.9.7",
"@types/react-input-mask": "^3.0.2",
"@types/react-router-dom": "^5.3.3",
"@types/webpack-env": "^1.16.3",
"classnames": "^2.3.2",
@ -28,6 +29,7 @@
"marked": "^4.0.14",
"preact": "^10.7.1",
"qs": "^6.10.3",
"react-input-mask": "^2.0.4",
"react-router-dom": "^6.3.0",
"sass": "^1.56.0",
"typescript": "~4.6.2",
@ -4392,6 +4394,14 @@
"@types/react": "*"
}
},
"node_modules/@types/react-input-mask": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/@types/react-input-mask/-/react-input-mask-3.0.2.tgz",
"integrity": "sha512-WTli3kUyvUqqaOLYG/so2pLqUvRb+n4qnx2He5klfqZDiQmRyD07jVIt/bco/1BrcErkPMtpOm+bHii4Oed6cQ==",
"dependencies": {
"@types/react": "*"
}
},
"node_modules/@types/react-router": {
"version": "5.1.20",
"resolved": "https://registry.npmjs.org/@types/react-router/-/react-router-5.1.20.tgz",
@ -10236,6 +10246,14 @@
"node": ">= 0.4"
}
},
"node_modules/invariant": {
"version": "2.2.4",
"resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz",
"integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==",
"dependencies": {
"loose-envify": "^1.0.0"
}
},
"node_modules/ipaddr.js": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.0.1.tgz",
@ -16403,6 +16421,19 @@
"dev": true,
"peer": true
},
"node_modules/react-input-mask": {
"version": "2.0.4",
"resolved": "https://registry.npmjs.org/react-input-mask/-/react-input-mask-2.0.4.tgz",
"integrity": "sha512-1hwzMr/aO9tXfiroiVCx5EtKohKwLk/NT8QlJXHQ4N+yJJFyUuMT+zfTpLBwX/lK3PkuMlievIffncpMZ3HGRQ==",
"dependencies": {
"invariant": "^2.2.4",
"warning": "^4.0.2"
},
"peerDependencies": {
"react": ">=0.14.0",
"react-dom": ">=0.14.0"
}
},
"node_modules/react-is": {
"version": "17.0.2",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-17.0.2.tgz",
@ -18811,6 +18842,14 @@
"makeerror": "1.0.12"
}
},
"node_modules/warning": {
"version": "4.0.3",
"resolved": "https://registry.npmjs.org/warning/-/warning-4.0.3.tgz",
"integrity": "sha512-rpJyN222KWIvHJ/F53XSZv0Zl/accqHR8et1kpaMTD/fLCRxtV8iX8czMzY7sVZupTI3zcUTg8eycS2kNF9l6w==",
"dependencies": {
"loose-envify": "^1.0.0"
}
},
"node_modules/watchpack": {
"version": "2.4.0",
"resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.0.tgz",
@ -18851,9 +18890,9 @@
}
},
"node_modules/webpack": {
"version": "5.75.0",
"resolved": "https://registry.npmjs.org/webpack/-/webpack-5.75.0.tgz",
"integrity": "sha512-piaIaoVJlqMsPtX/+3KTTO6jfvrSYgauFVdt8cr9LTHKmcq/AMd4mhzsiP7ZF/PGRNPGA8336jldh9l2Kt2ogQ==",
"version": "5.76.2",
"resolved": "https://registry.npmjs.org/webpack/-/webpack-5.76.2.tgz",
"integrity": "sha512-Th05ggRm23rVzEOlX8y67NkYCHa9nTNcwHPBhdg+lKG+mtiW7XgggjAeeLnADAe7mLjJ6LUNfgHAuRRh+Z6J7w==",
"dev": true,
"peer": true,
"dependencies": {

View file

@ -14,6 +14,7 @@
"@types/marked": "^4.0.2",
"@types/node": "^17.0.21",
"@types/qs": "^6.9.7",
"@types/react-input-mask": "^3.0.2",
"@types/react-router-dom": "^5.3.3",
"@types/webpack-env": "^1.16.3",
"classnames": "^2.3.2",
@ -24,6 +25,7 @@
"marked": "^4.0.14",
"preact": "^10.7.1",
"qs": "^6.10.3",
"react-input-mask": "^2.0.4",
"react-router-dom": "^6.3.0",
"sass": "^1.56.0",
"typescript": "~4.6.2",

View file

@ -1,7 +1,7 @@
@use "src/styles/variables" as *;
.vm-time-duration {
max-height: 200px;
max-height: 227px;
overflow: auto;
font-size: $font-size;

View file

@ -4,18 +4,18 @@ import TimeDurationSelector from "../TimeDurationSelector/TimeDurationSelector";
import dayjs from "dayjs";
import { getAppModeEnable } from "../../../../utils/app-mode";
import { useTimeDispatch, useTimeState } from "../../../../state/time/TimeStateContext";
import { AlarmIcon, ArrowDownIcon, CalendarIcon, ClockIcon } from "../../../Main/Icons";
import { AlarmIcon, ArrowDownIcon, ClockIcon } from "../../../Main/Icons";
import Button from "../../../Main/Button/Button";
import Popper from "../../../Main/Popper/Popper";
import Tooltip from "../../../Main/Tooltip/Tooltip";
import { DATE_TIME_FORMAT } from "../../../../constants/date";
import useResize from "../../../../hooks/useResize";
import DatePicker from "../../../Main/DatePicker/DatePicker";
import "./style.scss";
import useClickOutside from "../../../../hooks/useClickOutside";
import classNames from "classnames";
import { useAppState } from "../../../../state/common/StateContext";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import DateTimeInput from "../../../Main/DatePicker/DateTimeInput/DateTimeInput";
export const TimeSelector: FC = () => {
const { isMobile } = useDeviceDetect();
@ -27,9 +27,6 @@ export const TimeSelector: FC = () => {
const [until, setUntil] = useState<string>();
const [from, setFrom] = useState<string>();
const formFormat = useMemo(() => dayjs.tz(from).format(DATE_TIME_FORMAT), [from]);
const untilFormat = useMemo(() => dayjs.tz(until).format(DATE_TIME_FORMAT), [until]);
const { period: { end, start }, relativeTime, timezone, duration } = useTimeState();
const dispatch = useTimeDispatch();
const appModeEnable = getAppModeEnable();
@ -55,10 +52,7 @@ export const TimeSelector: FC = () => {
const formatRange = useMemo(() => {
const startFormat = dayjs.tz(dateFromSeconds(start)).format(DATE_TIME_FORMAT);
const endFormat = dayjs.tz(dateFromSeconds(end)).format(DATE_TIME_FORMAT);
return {
start: startFormat,
end: endFormat
};
return { start: startFormat, end: endFormat };
}, [start, end, timezone]);
const dateTitle = useMemo(() => {
@ -66,8 +60,6 @@ export const TimeSelector: FC = () => {
return isRelativeTime ? relativeTime.replace(/_/g, " ") : `${formatRange.start} - ${formatRange.end}`;
}, [relativeTime, formatRange]);
const fromRef = useRef<HTMLDivElement>(null);
const untilRef = useRef<HTMLDivElement>(null);
const fromPickerRef = useRef<HTMLDivElement>(null);
const untilPickerRef = useRef<HTMLDivElement>(null);
const [openOptions, setOpenOptions] = useState(false);
@ -82,11 +74,6 @@ export const TimeSelector: FC = () => {
}
setOpenOptions(false);
};
const handleFromChange = (from: string) => setFrom(from);
const handleUntilChange = (until: string) => setUntil(until);
const onApplyClick = () => setTimeAndClosePicker();
const onSwitchToNow = () => dispatch({ type: "RUN_QUERY_TO_NOW" });
@ -116,11 +103,9 @@ export const TimeSelector: FC = () => {
useClickOutside(wrapperRef, (e) => {
if (isMobile) return;
const target = e.target as HTMLElement;
const isFromButton = fromRef?.current && fromRef.current.contains(target);
const isUntilButton = untilRef?.current && untilRef.current.contains(target);
const isFromPicker = fromPickerRef?.current && fromPickerRef?.current?.contains(target);
const isUntilPicker = untilPickerRef?.current && untilPickerRef?.current?.contains(target);
if (isFromButton || isUntilButton || isFromPicker || isUntilPicker) return;
if (isFromPicker || isUntilPicker) return;
handleCloseOptions();
});
@ -174,38 +159,22 @@ export const TimeSelector: FC = () => {
"vm-time-selector-left-inputs_dark": isDarkTheme
})}
>
<div
className="vm-time-selector-left-inputs__date"
ref={fromRef}
>
<label>From:</label>
<span>{formFormat}</span>
<CalendarIcon/>
<DatePicker
label={"Date From"}
ref={fromPickerRef}
date={from || ""}
onChange={handleFromChange}
targetRef={fromRef}
timepicker={true}
/>
</div>
<div
className="vm-time-selector-left-inputs__date"
ref={untilRef}
>
<label>To:</label>
<span>{untilFormat}</span>
<CalendarIcon/>
<DatePicker
label={"Date To"}
ref={untilPickerRef}
date={until || ""}
onChange={handleUntilChange}
targetRef={untilRef}
timepicker={true}
/>
</div>
<DateTimeInput
value={from}
label="From:"
pickerLabel="Date From"
pickerRef={fromPickerRef}
onChange={setFrom}
onEnter={setTimeAndClosePicker}
/>
<DateTimeInput
value={until}
label="To:"
pickerLabel="Date To"
pickerRef={untilPickerRef}
onChange={setUntil}
onEnter={setTimeAndClosePicker}
/>
</div>
<div className="vm-time-selector-left-timezone">
<div className="vm-time-selector-left-timezone__title">{activeTimezone.region}</div>
@ -228,7 +197,7 @@ export const TimeSelector: FC = () => {
</Button>
<Button
color="primary"
onClick={onApplyClick}
onClick={setTimeAndClosePicker}
>
Apply
</Button>

View file

@ -31,48 +31,6 @@
display: grid;
align-items: flex-start;
justify-content: stretch;
&_dark &__date {
border-color: $color-text-disabled;
}
&__date {
display: grid;
grid-template-columns: 1fr 14px;
gap: $padding-small;
align-items: center;
justify-content: center;
padding-bottom: $padding-small;
margin-bottom: $padding-global;
border-bottom: $border-divider;
cursor: pointer;
transition: color 200ms ease-in-out, border-bottom-color 300ms ease;
&:last-child {
margin-bottom: 0;
}
&:hover {
border-bottom-color: $color-primary;
}
&:hover svg,
&:hover {
color: $color-primary;
}
label {
grid-column: 1/3;
font-size: $font-size-small;
color: $color-text-secondary;
user-select: none;
}
svg {
color: $color-text-secondary;
transition: color 200ms ease-in-out;
}
}
}
&-timezone {

View file

@ -3,65 +3,45 @@ import dayjs, { Dayjs } from "dayjs";
import CalendarHeader from "./CalendarHeader/CalendarHeader";
import CalendarBody from "./CalendarBody/CalendarBody";
import YearsList from "./YearsList/YearsList";
import TimePicker from "../TImePicker/TimePicker";
import { DATE_TIME_FORMAT } from "../../../../constants/date";
import "./style.scss";
import { CalendarIcon, ClockIcon } from "../../Icons";
import Tabs from "../../Tabs/Tabs";
import useDeviceDetect from "../../../../hooks/useDeviceDetect";
import classNames from "classnames";
import MonthsList from "./MonthsList/MonthsList";
interface DatePickerProps {
date: Date | Dayjs
format?: string
timepicker?: boolean,
onChange: (date: string) => void
onClose?: () => void
}
const tabs = [
{ value: "date", icon: <CalendarIcon/> },
{ value: "time", icon: <ClockIcon/> }
];
enum CalendarTypeView {
"days",
"months",
"years"
}
const Calendar: FC<DatePickerProps> = ({
date,
timepicker = false,
format = DATE_TIME_FORMAT,
onChange,
onClose
}) => {
const [displayYears, setDisplayYears] = useState(false);
const [viewType, setViewType] = useState<CalendarTypeView>(CalendarTypeView.days);
const [viewDate, setViewDate] = useState(dayjs.tz(date));
const [selectDate, setSelectDate] = useState(dayjs.tz(date));
const [tab, setTab] = useState(tabs[0].value);
const { isMobile } = useDeviceDetect();
const toggleDisplayYears = () => {
setDisplayYears(prev => !prev);
setViewType(prev => prev === CalendarTypeView.years ? CalendarTypeView.days : CalendarTypeView.years);
};
const handleChangeViewDate = (date: Dayjs) => {
setViewDate(date);
setDisplayYears(false);
setViewType(prev => prev === CalendarTypeView.years ? CalendarTypeView.months : CalendarTypeView.days);
};
const handleChangeSelectDate = (date: Dayjs) => {
setSelectDate(date);
if (timepicker) setTab("time");
};
const handleChangeTime = (time: string) => {
const [hour, minute, second] = time.split(":");
setSelectDate(prev => prev.set("hour", +hour).set("minute", +minute).set("second", +second));
};
const handleChangeTab = (value: string) => {
setTab(value);
};
const handleClose = () => {
onClose && onClose();
};
useEffect(() => {
@ -69,6 +49,12 @@ const Calendar: FC<DatePickerProps> = ({
onChange(selectDate.format(format));
}, [selectDate]);
useEffect(() => {
const value = dayjs.tz(date);
setViewDate(value);
setSelectDate(value);
}, [date]);
return (
<div
className={classNames({
@ -76,51 +62,32 @@ const Calendar: FC<DatePickerProps> = ({
"vm-calendar_mobile": isMobile,
})}
>
{tab === "date" && (
<CalendarHeader
<CalendarHeader
viewDate={viewDate}
onChangeViewDate={handleChangeViewDate}
toggleDisplayYears={toggleDisplayYears}
showArrowNav={viewType === CalendarTypeView.days}
/>
{viewType === CalendarTypeView.days && (
<CalendarBody
viewDate={viewDate}
selectDate={selectDate}
onChangeSelectDate={handleChangeSelectDate}
/>
)}
{viewType === CalendarTypeView.years && (
<YearsList
viewDate={viewDate}
onChangeViewDate={handleChangeViewDate}
toggleDisplayYears={toggleDisplayYears}
displayYears={displayYears}
/>
)}
{tab === "date" && (
<>
{!displayYears && (
<CalendarBody
viewDate={viewDate}
selectDate={selectDate}
onChangeSelectDate={handleChangeSelectDate}
/>
)}
{displayYears && (
<YearsList
viewDate={viewDate}
onChangeViewDate={handleChangeViewDate}
/>
)}
</>
)}
{tab === "time" && (
<TimePicker
{viewType === CalendarTypeView.months && (
<MonthsList
selectDate={selectDate}
onChangeTime={handleChangeTime}
onClose={handleClose}
viewDate={viewDate}
onChangeViewDate={handleChangeViewDate}
/>
)}
{timepicker && (
<div className="vm-calendar__tabs">
<Tabs
activeItem={tab}
items={tabs}
onChange={handleChangeTab}
indicatorPlacement="top"
/>
</div>
)}
</div>
);
};

View file

@ -5,11 +5,11 @@ import { ArrowDownIcon, ArrowDropDownIcon } from "../../../Icons";
interface CalendarHeaderProps {
viewDate: Dayjs
onChangeViewDate: (date: Dayjs) => void
displayYears: boolean
showArrowNav: boolean
toggleDisplayYears: () => void
}
const CalendarHeader: FC<CalendarHeaderProps> = ({ viewDate, displayYears, onChangeViewDate, toggleDisplayYears }) => {
const CalendarHeader: FC<CalendarHeaderProps> = ({ viewDate, showArrowNav, onChangeViewDate, toggleDisplayYears }) => {
const setPrevMonth = () => {
onChangeViewDate(viewDate.subtract(1, "month"));
@ -32,7 +32,7 @@ const CalendarHeader: FC<CalendarHeaderProps> = ({ viewDate, displayYears, onCha
<ArrowDropDownIcon/>
</div>
</div>
{!displayYears && (
{showArrowNav && (
<div className="vm-calendar-header-right">
<div
className="vm-calendar-header-right__prev"

View file

@ -0,0 +1,50 @@
import React, { FC, useEffect, useMemo } from "preact/compat";
import dayjs, { Dayjs } from "dayjs";
import classNames from "classnames";
interface CalendarMonthsProps {
viewDate: Dayjs,
selectDate: Dayjs
onChangeViewDate: (date: Dayjs) => void
}
const MonthsList: FC<CalendarMonthsProps> = ({ viewDate, selectDate, onChangeViewDate }) => {
const today = dayjs().format("MM");
const currentMonths = useMemo(() => selectDate.format("MM"), [selectDate]);
const months: Dayjs[] = useMemo(() => {
return new Array(12).fill("").map((d, i) => dayjs(viewDate).month(i));
}, [viewDate]);
useEffect(() => {
const selectedEl = document.getElementById(`vm-calendar-year-${currentMonths}`);
if (!selectedEl) return;
selectedEl.scrollIntoView({ block: "center" });
}, []);
const createHandlerClick = (date: Dayjs) => () => {
onChangeViewDate(date);
};
return (
<div className="vm-calendar-years">
{months.map(m => (
<div
className={classNames({
"vm-calendar-years__year": true,
"vm-calendar-years__year_selected": m.format("MM") === currentMonths,
"vm-calendar-years__year_today": m.format("MM") === today
})}
id={`vm-calendar-year-${m.format("MM")}`}
key={m.format("MM")}
onClick={createHandlerClick(m)}
>
{m.format("MMMM")}
</div>
))}
</div>
);
};
export default MonthsList;

View file

@ -9,9 +9,10 @@ interface CalendarYearsProps {
const YearsList: FC<CalendarYearsProps> = ({ viewDate, onChangeViewDate }) => {
const today = dayjs().format("YYYY");
const currentYear = useMemo(() => viewDate.format("YYYY"), [viewDate]);
const years: Dayjs[] = useMemo(() => {
const displayYears = 206;
const displayYears = 18;
const year = dayjs();
const startYear = year.subtract(displayYears/2, "year");
return new Array(displayYears).fill(startYear).map((d, i) => d.add(i, "year"));
@ -33,7 +34,8 @@ const YearsList: FC<CalendarYearsProps> = ({ viewDate, onChangeViewDate }) => {
<div
className={classNames({
"vm-calendar-years__year": true,
"vm-calendar-years__year_selected": y.format("YYYY") === currentYear
"vm-calendar-years__year_selected": y.format("YYYY") === currentYear,
"vm-calendar-years__year_today": y.format("YYYY") === today
})}
id={`vm-calendar-year-${y.format("YYYY")}`}
key={y.format("YYYY")}

View file

@ -13,12 +13,6 @@
padding: 0 $padding-global;
}
&__tabs {
margin: 0 0-$padding-global 0-$padding-global;
border-top: $border-divider;
margin-top: $padding-global;
}
&-header {
display: grid;
grid-template-columns: 1fr auto;
@ -88,14 +82,14 @@
&-body {
display: grid;
grid-template-columns: repeat(7, 32px);
grid-template-rows: repeat(6, 32px);
grid-template-rows: repeat(7, 32px);
align-items: center;
justify-content: center;
gap: 2px;
@media (max-width: 500px) {
grid-template-columns: repeat(7, calc((100vw - ($padding-global * 2) - (6 * 2px))/7));
grid-template-rows: repeat(6, calc((100vw - ($padding-global * 2) - (5 * 2px))/7));
grid-template-rows: repeat(7, calc((100vw - ($padding-global * 2) - (6 * 2px))/7));
}
&-cell {
@ -166,143 +160,9 @@
background-color: $color-primary;
}
}
}
}
&-time-picker {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
&-clock {
$clock-size: 230px;
$clock-offset: 42px;
position: relative;
height: $clock-size;
width: $clock-size;
border-radius: 50%;
border: $border-divider;
box-shadow: $box-shadow;
box-sizing: content-box;
&:after {
content: '';
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
width: 6px;
height: 6px;
background-color: $color-primary;
border-radius: 50%;
}
&__arrow {
position: absolute;
top: 0;
left: calc(($clock-size/2) - 1px);
width: 2px;
margin-top: $padding-small;
height: calc(($clock-size/2) - $padding-small);
background-color: $color-primary;
transform-origin: bottom;
transition: transform 200ms ease-in-out;
opacity: 0.8;
z-index: 0;
&_offset {
margin-top: $clock-offset;
height: calc(($clock-size/2) - $clock-offset);
z-index: 2;
}
&:after {
content: '';
position: absolute;
top: 0;
left: 50%;
transform: translateX(-50%);
width: 30px;
height: 30px;
background-color: $color-primary;
border-radius: 50%;
}
}
&__time {
display: flex;
align-items: flex-start;
justify-content: center;
text-align: center;
padding-top: $padding-small;
position: absolute;
top: 0;
width: 30px;
left: calc(($clock-size/2) - 15px);
height: calc($clock-size/2);
transform-origin: bottom;
cursor: pointer;
z-index: 1;
&_hide {
display: none;
}
&_offset {
padding: 0;
margin-top: $clock-offset;
height: calc(($clock-size/2) - $clock-offset);
z-index: 2;
}
&:hover span {
background-color: rgba($color-black, 0.1);
}
span {
position: relative;
display: grid;
align-items: center;
justify-content: center;
min-width: 30px;
min-height: 30px;
border-radius: 50%;
transform-origin: center;
transition: background-color 300ms ease;
}
}
}
&-fields {
display: flex;
align-items: center;
justify-content: space-between;
margin-top: $padding-global;
&_dark &__input {
border-color: $color-text-disabled;
}
span {
margin: 0 $padding-small;
}
&__input {
width: 64px;
height: 32px;
border: $border-divider;
border-radius: $border-radius-small;
font-size: $font-size-medium;
padding: 2px $padding-small;
text-align: center;
background-color: transparent;
color: $color-text;
&:focus {
border-color: $color-primary;
}
&_today {
border: 1px solid $color-primary;
}
}
}

View file

@ -9,7 +9,6 @@ interface DatePickerProps {
date: string | Date | Dayjs,
targetRef: Ref<HTMLElement>
format?: string
timepicker?: boolean
label?: string
onChange: (val: string) => void
}
@ -18,12 +17,11 @@ const DatePicker = forwardRef<HTMLDivElement, DatePickerProps>(({
date,
targetRef,
format = DATE_TIME_FORMAT,
timepicker,
onChange,
label
}, ref) => {
const [openCalendar, setOpenCalendar] = useState(false);
const dateDayjs = useMemo(() => date ? dayjs.tz(date) : dayjs().tz(), [date]);
const dateDayjs = useMemo(() => dayjs(date).isValid() ? dayjs.tz(date) : dayjs().tz(), [date]);
const { isMobile } = useDeviceDetect();
const toggleOpenCalendar = () => {
@ -35,8 +33,8 @@ const DatePicker = forwardRef<HTMLDivElement, DatePickerProps>(({
};
const handleChangeDate = (val: string) => {
if (!timepicker) handleCloseCalendar();
onChange(val);
handleCloseCalendar();
};
const handleKeyUp = (e: KeyboardEvent) => {
@ -71,9 +69,7 @@ const DatePicker = forwardRef<HTMLDivElement, DatePickerProps>(({
<Calendar
date={dateDayjs}
format={format}
timepicker={timepicker}
onChange={handleChangeDate}
onClose={handleCloseCalendar}
/>
</div>
</Popper>

View file

@ -0,0 +1,127 @@
import React, { FC, useEffect, useRef, useState } from "preact/compat";
import { ChangeEvent, KeyboardEvent } from "react";
import { CalendarIcon } from "../../Icons";
import DatePicker from "../DatePicker";
import Button from "../../Button/Button";
import { DATE_TIME_FORMAT } from "../../../../constants/date";
import InputMask from "react-input-mask";
import dayjs from "dayjs";
import classNames from "classnames";
import "./style.scss";
const formatStringDate = (val: string) => {
return dayjs(val).isValid() ? dayjs.tz(val).format(DATE_TIME_FORMAT) : val;
};
interface DateTimeInputProps {
value?: string;
label: string;
pickerLabel: string;
pickerRef: React.RefObject<HTMLDivElement>;
onChange: (date: string) => void;
onEnter: () => void;
}
const DateTimeInput: FC<DateTimeInputProps> = ({
value = "",
label,
pickerLabel,
pickerRef,
onChange,
onEnter
}) => {
const wrapperRef = useRef<HTMLDivElement>(null);
const [inputRef, setInputRef] = useState<HTMLInputElement | null>(null);
const [maskedValue, setMaskedValue] = useState(formatStringDate(value));
const [focusToTime, setFocusToTime] = useState(false);
const [awaitChangeForEnter, setAwaitChangeForEnter] = useState(false);
const error = dayjs(maskedValue).isValid() ? "" : "Expected format: YYYY-MM-DD HH:mm:ss";
const handleMaskedChange = (e: ChangeEvent<HTMLInputElement>) => {
setMaskedValue(e.currentTarget.value);
};
const handleBlur = () => {
onChange(maskedValue);
};
const handleKeyUp = (e: KeyboardEvent) => {
if (e.key === "Enter") {
onChange(maskedValue);
setAwaitChangeForEnter(true);
}
};
const handleChangeDate = (val: string) => {
setMaskedValue(val);
setFocusToTime(true);
};
useEffect(() => {
const newValue = formatStringDate(value);
if (newValue !== maskedValue) {
setMaskedValue(newValue);
}
if (awaitChangeForEnter) {
onEnter();
setAwaitChangeForEnter(false);
}
}, [value]);
useEffect(() => {
if (focusToTime && inputRef) {
inputRef.focus();
inputRef.setSelectionRange(11, 11);
setFocusToTime(false);
}
}, [focusToTime]);
return (
<div
className={classNames({
"vm-date-time-input": true,
"vm-date-time-input_error": error
})}
>
<label>{label}</label>
<InputMask
tabIndex={1}
inputRef={setInputRef}
mask="9999-99-99 99:99:99"
placeholder="YYYY-MM-DD HH:mm:ss"
value={maskedValue}
autoCapitalize={"none"}
inputMode={"numeric"}
maskChar={null}
onChange={handleMaskedChange}
onBlur={handleBlur}
onKeyUp={handleKeyUp}
/>
{error && (
<span className="vm-date-time-input__error-text">{error}</span>
)}
<div
className="vm-date-time-input__icon"
ref={wrapperRef}
>
<Button
variant="text"
color="gray"
size="small"
startIcon={<CalendarIcon/>}
/>
</div>
<DatePicker
label={pickerLabel}
ref={pickerRef}
date={maskedValue}
onChange={handleChangeDate}
targetRef={wrapperRef}
/>
</div>
);
};
export default DateTimeInput;

View file

@ -0,0 +1,61 @@
@use "src/styles/variables" as *;
.vm-date-time-input {
position: relative;
display: grid;
grid-template-columns: 1fr;
gap: $padding-small 0;
align-items: center;
justify-content: center;
margin-bottom: $padding-global;
cursor: pointer;
transition: color 200ms ease-in-out, border-bottom-color 300ms ease;
&:hover input {
border-bottom-color: $color-primary;
}
label {
grid-column: 1/3;
width: 100%;
font-size: $font-size-small;
color: $color-text-secondary;
user-select: none;
}
&__icon {
position: absolute;
bottom: 2px;
right: 0;
}
input {
padding: 0 0 $padding-small;
border-bottom: $border-divider;
border-top: none;
border-left: none;
border-right: none;
background: transparent;
color: $color-text;
&:focus {
border-bottom-color: $color-primary;
}
}
&_error input {
border-color: $color-error;
&:focus {
border-bottom-color: $color-error;
}
}
&__error-text {
color: $color-error;
font-size: $font-size-small;
position: absolute;
left: 0;
bottom: -$font-size-small;
}
}

View file

@ -1,4 +1,5 @@
import React, { FC, useState } from "preact/compat";
import React, { FC, useEffect, useRef, useState } from "preact/compat";
import { MouseEvent } from "react";
import LineProgress from "../../Main/LineProgress/LineProgress";
import Trace from "../Trace";
import { ArrowDownIcon } from "../../Main/Icons";
@ -6,6 +7,7 @@ import "./style.scss";
import classNames from "classnames";
import { useAppState } from "../../../state/common/StateContext";
import useDeviceDetect from "../../../hooks/useDeviceDetect";
import Button from "../../Main/Button/Button";
interface RecursiveProps {
trace: Trace;
@ -20,6 +22,23 @@ const NestedNav: FC<RecursiveProps> = ({ trace, totalMsec }) => {
const { isDarkTheme } = useAppState();
const { isMobile } = useDeviceDetect();
const [openLevels, setOpenLevels] = useState({} as OpenLevels);
const messageRef = useRef<HTMLDivElement>(null);
const [isExpanded, setIsExpanded] = useState(false);
const [showFullMessage, setShowFullMessage] = useState(false);
useEffect(() => {
if (!messageRef.current) return;
const contentElement = messageRef.current;
const child = messageRef.current.children[0];
const { height } = child.getBoundingClientRect();
setIsExpanded(height > contentElement.clientHeight);
}, [trace]);
const handleClickShowMore = (e: MouseEvent<HTMLButtonElement>) => {
e.stopPropagation();
setShowFullMessage(prev => !prev);
};
const handleListClick = (level: number) => () => {
setOpenLevels((prevState:OpenLevels) => {
@ -54,11 +73,28 @@ const NestedNav: FC<RecursiveProps> = ({ trace, totalMsec }) => {
<div className="vm-nested-nav-header__progress">
<LineProgress value={progress}/>
</div>
<div className="vm-nested-nav-header__message">
{trace.message}
<div
className={classNames({
"vm-nested-nav-header__message": true,
"vm-nested-nav-header__message_show-full": showFullMessage,
})}
ref={messageRef}
>
<span>{trace.message}</span>
</div>
<div className="vm-nested-nav-header__duration">
{`duration: ${trace.duration} ms`}
<div className="vm-nested-nav-header-bottom">
<div className="vm-nested-nav-header-bottom__duration">
{`duration: ${trace.duration} ms`}
</div>
{(isExpanded || showFullMessage) && (
<Button
variant="text"
size="small"
onClick={handleClickShowMore}
>
{showFullMessage ? "Hide" : "Show more"}
</Button>
)}
</div>
</div>
{openLevels[trace.idValue] && <div>

View file

@ -43,12 +43,33 @@
}
&__message {
position: relative;
grid-column: 2;
line-height: 130%;
overflow: hidden;
text-overflow: ellipsis;
display: -moz-box;
-moz-box-orient: vertical;
display: -webkit-box;
-webkit-line-clamp: 3;
-webkit-box-orient: vertical;
line-clamp: 3;
&_show-full {
display: block;
overflow: visible;
}
}
&__duration {
&-bottom {
display: grid;
grid-template-columns: 1fr auto;
align-items: center;
grid-column: 2;
color: $color-text-secondary;
&__duration {
color: $color-text-secondary;
}
}
}
}

View file

@ -0,0 +1,68 @@
import { useState, useEffect } from "preact/compat";
const useDropzone = (node: HTMLElement | null): {dragging: boolean, files: File[]} => {
const [files, setFiles] = useState<File[]>([]);
const [dragging, setDragging] = useState(false);
const handleAddFiles = (fileList: FileList) => {
const filesArray = Array.from(fileList || []);
setFiles(filesArray);
};
// handle drag events
const handleDrag = (e: DragEvent) => {
e.preventDefault();
e.stopPropagation();
if (e.type === "dragenter" || e.type === "dragover") {
setDragging(true);
} else if (e.type === "dragleave") {
setDragging(false);
}
};
// triggers when file is dropped
const handleDrop = (e: DragEvent) => {
e.preventDefault();
e.stopPropagation();
setDragging(false);
if (e?.dataTransfer?.files && e.dataTransfer.files[0]) {
handleAddFiles(e.dataTransfer.files);
}
};
// triggers when file is pasted
const handlePaste = (e: ClipboardEvent) => {
const items = e.clipboardData?.items;
if (!items) return;
const jsonFiles = Array.from(items)
.filter(item => item.type === "application/json")
.map(item => item.getAsFile())
.filter(file => file !== null) as File[];
setFiles(jsonFiles);
};
useEffect(() => {
node?.addEventListener("dragenter", handleDrag);
node?.addEventListener("dragleave", handleDrag);
node?.addEventListener("dragover", handleDrag);
node?.addEventListener("drop", handleDrop);
node?.addEventListener("paste", handlePaste);
return () => {
node?.removeEventListener("dragenter", handleDrag);
node?.removeEventListener("dragleave", handleDrag);
node?.removeEventListener("dragover", handleDrag);
node?.removeEventListener("drop", handleDrop);
node?.removeEventListener("paste", handlePaste);
};
}, [node]);
return {
files,
dragging,
};
};
export default useDropzone;

View file

@ -0,0 +1,35 @@
import React, { FC } from "preact/compat";
import Button from "../../../components/Main/Button/Button";
import Tooltip from "../../../components/Main/Tooltip/Tooltip";
import { ChangeEvent } from "react";
interface TraceUploadButtonsProps {
onOpenModal: () => void;
onChange: (e: ChangeEvent<HTMLInputElement>) => void;
}
const TraceUploadButtons: FC<TraceUploadButtonsProps> = ({ onOpenModal, onChange }) => (
<div className="vm-trace-page-controls">
<Button
variant="outlined"
onClick={onOpenModal}
>
Paste JSON
</Button>
<Tooltip title="The file must contain tracing information in JSON format">
<Button>
Upload Files
<input
id="json"
type="file"
accept="application/json"
multiple
title=" "
onChange={onChange}
/>
</Button>
</Tooltip>
</div>
);
export default TraceUploadButtons;

View file

@ -2,7 +2,6 @@ import React, { FC, useEffect, useMemo, useState } from "preact/compat";
import { ChangeEvent } from "react";
import Trace from "../../components/TraceQuery/Trace";
import TracingsView from "../../components/TraceQuery/TracingsView";
import Tooltip from "../../components/Main/Tooltip/Tooltip";
import Button from "../../components/Main/Button/Button";
import Alert from "../../components/Main/Alert/Alert";
import "./style.scss";
@ -11,6 +10,8 @@ import Modal from "../../components/Main/Modal/Modal";
import JsonForm from "./JsonForm/JsonForm";
import { ErrorTypes } from "../../types";
import { useSearchParams } from "react-router-dom";
import useDropzone from "../../hooks/useDropzone";
import TraceUploadButtons from "./TraceUploadButtons/TraceUploadButtons";
const TracePage: FC = () => {
const [openModal, setOpenModal] = useState(false);
@ -46,9 +47,7 @@ const TracePage: FC = () => {
}
};
const handleChange = (e: ChangeEvent<HTMLInputElement>) => {
setErrors([]);
const files = Array.from(e.target.files || []);
const handleReadFiles = (files: File[]) => {
files.map(f => {
const reader = new FileReader();
const filename = f?.name || "";
@ -58,6 +57,12 @@ const TracePage: FC = () => {
};
reader.readAsText(f);
});
};
const handleChange = (e: ChangeEvent<HTMLInputElement>) => {
setErrors([]);
const files = Array.from(e.target.files || []);
handleReadFiles(files);
e.target.value = "";
};
@ -78,29 +83,12 @@ const TracePage: FC = () => {
setSearchParams({});
}, []);
const UploadButtons = () => (
<div className="vm-trace-page-controls">
<Button
variant="outlined"
onClick={handleOpenModal}
>
Paste JSON
</Button>
<Tooltip title="The file must contain tracing information in JSON format">
<Button>
Upload Files
<input
id="json"
type="file"
accept="application/json"
multiple
title=" "
onChange={handleChange}
/>
</Button>
</Tooltip>
</div>
);
const { files, dragging } = useDropzone(document.body);
useEffect(() => {
handleReadFiles(files);
}, [files]);
return (
<div className="vm-trace-page">
@ -126,7 +114,12 @@ const TracePage: FC = () => {
))}
</div>
<div>
{hasTraces && <UploadButtons/>}
{hasTraces && (
<TraceUploadButtons
onOpenModal={handleOpenModal}
onChange={handleChange}
/>
)}
</div>
</div>
@ -158,8 +151,13 @@ const TracePage: FC = () => {
</a>
{"\n"}
Tracing graph will be displayed after file upload.
{"\n"}
Attach files by dragging & dropping, selecting or pasting them.
</p>
<UploadButtons/>
<TraceUploadButtons
onOpenModal={handleOpenModal}
onChange={handleChange}
/>
</div>
)}
@ -177,6 +175,10 @@ const TracePage: FC = () => {
/>
</Modal>
)}
{dragging && (
<div className="vm-trace-page__dropzone"/>
)}
</div>
);
};

View file

@ -75,4 +75,19 @@
line-height: 1.8;
}
}
&__dropzone {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
display: flex;
align-items: center;
justify-content: center;
box-shadow: inset $color-primary 0 0 10px;
opacity: 0.5;
z-index: 100;
pointer-events: none;
}
}

View file

@ -2746,7 +2746,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "The percentage of slow inserts comparing to total insertion rate during the last 5 minutes. \n\nThe less value is better. If percentage remains high (>10%) during extended periods of time, then it is likely more RAM is needed for optimal handling of the current number of active time series. \n\nIn general, VictoriaMetrics requires ~1KB or RAM per active time series, so it should be easy calculating the required amounts of RAM for the current workload according to capacity planning docs. But the resulting number may be far from the real number because the required amounts of memory depends on may other factors such as the number of labels per time series and the length of label values.",
"description": "The percentage of slow inserts comparing to total insertion rate during the last 5 minutes. \n\nThe less value is better. If percentage remains high (>10%) during extended periods of time, then it is likely more RAM is needed for optimal handling of the current number of active time series. \n\nIn general, VictoriaMetrics requires ~1KB or RAM per active time series, so it should be easy calculating the required amounts of RAM for the current workload according to capacity planning docs. But the resulting number may be far from the real number because the required amounts of memory depends on many other factors such as the number of labels per time series and the length of label values. See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3976#issuecomment-1476883183",
"fieldConfig": {
"defaults": {
"color": {

View file

@ -2803,7 +2803,7 @@
"type": "prometheus",
"uid": "$ds"
},
"description": "The percentage of slow inserts comparing to total insertion rate during the last 5 minutes. \n\nThe less value is better. If percentage remains high (>10%) during extended periods of time, then it is likely more RAM is needed for optimal handling of the current number of active time series. \n\nIn general, VictoriaMetrics requires ~1KB or RAM per active time series, so it should be easy calculating the required amounts of RAM for the current workload according to capacity planning docs. But the resulting number may be far from the real number because the required amounts of memory depends on may other factors such as the number of labels per time series and the length of label values.",
"description": "The percentage of slow inserts comparing to total insertion rate during the last 5 minutes. \n\nThe less value is better. If percentage remains high (>10%) during extended periods of time, then it is likely more RAM is needed for optimal handling of the current number of active time series. \n\nIn general, VictoriaMetrics requires ~1KB or RAM per active time series, so it should be easy calculating the required amounts of RAM for the current workload according to capacity planning docs. But the resulting number may be far from the real number because the required amounts of memory depends on many other factors such as the number of labels per time series and the length of label values. See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3976#issuecomment-1476883183",
"fieldConfig": {
"defaults": {
"color": {

View file

@ -105,13 +105,9 @@ app-via-docker-linux-arm:
CGO_ENABLED=0 GOOS=linux GOARCH=arm $(MAKE) app-via-docker-goos-goarch
app-via-docker-linux-arm64:
ifeq ($(APP_NAME),vmagent)
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 $(MAKE) app-via-docker-goos-goarch
else
APP_SUFFIX='-linux-arm64' \
DOCKER_OPTS='--env CGO_ENABLED=1 --env GOOS=linux --env GOARCH=arm64 --env CC=/opt/cross-builder/aarch64-linux-musl-cross/bin/aarch64-linux-musl-gcc' \
$(MAKE) app-via-docker
endif
app-via-docker-linux-ppc64le:
CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le $(MAKE) app-via-docker-goos-goarch
@ -132,7 +128,7 @@ app-via-docker-openbsd-amd64:
CGO_ENABLED=0 GOOS=openbsd GOARCH=amd64 $(MAKE) app-via-docker-goos-goarch
app-via-docker-windows-amd64:
APP_SUFFIX='-$(GOARCH)' \
APP_SUFFIX='-amd64' \
DOCKER_OPTS='--env CGO_ENABLED=0 --env GOOS=windows --env GOARCH=amd64' \
$(MAKE) app-via-docker-windows
@ -162,12 +158,7 @@ package-via-docker-arm:
GOARCH=arm $(MAKE) package-via-docker-goarch-nocgo
package-via-docker-arm64:
ifeq ($(APP_NAME),vmagent)
GOARCH=arm64 $(MAKE) package-via-docker-goarch-nocgo
else
$(MAKE) package-via-docker-goarch-arm64
endif
package-via-docker-ppc64le:
GOARCH=ppc64le $(MAKE) package-via-docker-goarch-nocgo

View file

@ -152,7 +152,8 @@ groups:
dashboard: "http://localhost:3000/d/oS7Bi_0Wz?viewPanel=108"
summary: "Percentage of slow inserts is more than 5% for the last 15m"
description: "High rate of slow inserts may be a sign of resource exhaustion
for the current load. It is likely more RAM is needed for optimal handling of the current number of active time series."
for the current load. It is likely more RAM is needed for optimal handling of the current number of active time series.
See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3976#issuecomment-1476883183"
- alert: ProcessNearFDLimits
expr: (process_max_fds - process_open_fds) < 100

View file

@ -62,3 +62,14 @@ groups:
summary: "Too many logs printed for job \"{{ $labels.job }}\" ({{ $labels.instance }})"
description: "Logging rate for job \"{{ $labels.job }}\" ({{ $labels.instance }}) is {{ $value }} for last 15m.\n
Worth to check logs for specific error messages."
- alert: TooManyTSIDMisses
expr: sum(rate(vm_missing_tsids_for_metric_id_total[5m])) by (job, instance) > 0
for: 10m
labels:
severity: critical
annotations:
summary: "Too many TSID misses for job \"{{ $labels.job }}\" ({{ $labels.instance }})"
description: "The rate of TSID misses during query lookups is too high for \"{{ $labels.job }}\" ({{ $labels.instance }}).\n
Make sure you're running VictoriaMetrics of v1.85.3 or higher.\n
Related issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3502"

View file

@ -132,7 +132,8 @@ groups:
dashboard: "http://localhost:3000/d/wNf0q_kZk?viewPanel=68&var-instance={{ $labels.instance }}"
summary: "Percentage of slow inserts is more than 5% on \"{{ $labels.instance }}\" for the last 15m"
description: "High rate of slow inserts on \"{{ $labels.instance }}\" may be a sign of resource exhaustion
for the current load. It is likely more RAM is needed for optimal handling of the current number of active time series."
for the current load. It is likely more RAM is needed for optimal handling of the current number of active time series.
See also https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3976#issuecomment-1476883183"
- alert: LabelsLimitExceededOnIngestion
expr: sum(increase(vm_metrics_with_dropped_labels_total[5m])) by (instance) > 0

View file

@ -15,7 +15,21 @@ The following tip changes can be tested by building VictoriaMetrics components f
## tip
**Update note: this release contains backwards-incompatible change in storage data format,
so the previous versions of VictoriaMetrics will exit with the `unexpected number of substrings in the part name` error when trying to run them on the data
created by v1.90.0 or newer versions. The solution is to upgrade to v1.90.0 or newer releases**
* FEATURE: publish VictoriaMetrics binaries for Windows. See [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3236), [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3821) and [this](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/70) issues.
* FEATURE: log metrics with truncated labels if the length of label value in the ingested metric exceeds `-maxLabelValueLen`. This should simplify debugging for this case.
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for [VictoriaMetrics remote write protocol](https://docs.victoriametrics.com/vmagent.html#victoriametrics-remote-write-protocol) when [sending / receiving data to / from Kafka](https://docs.victoriametrics.com/vmagent.html#kafka-integration). This protocol allows saving egress network bandwidth costs when sending data from `vmagent` to `Kafka` located in another datacenter or availability zone. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1225).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add `--kafka.consumer.topic.concurrency` command-line flag. It controls the number of Kafka consumer workers to use by `vmagent`. It should eliminate the need to start multiple `vmagent` instances to improve data transfer rate. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1957).
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for [Kafka producer and consumer](https://docs.victoriametrics.com/vmagent.html#kafka-integration) on `arm64` machines. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2271).
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add support for drag'n'drop and paste from clipboard in the "Trace analyzer" page. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3971).
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): hide messages longer than 3 lines in the trace. You can view the full message by clicking on the `show more` button. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3971).
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): add the ability to manually input date and time when selecting a time range. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/3968).
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): automatically disable progress bar when TTY isn't available. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3823).
* BUGFIX: prevent from slow [snapshot creating](https://docs.victoriametrics.com/#how-to-work-with-snapshots) under high data ingestion rate. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3551).
## [v1.89.1](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.89.1)

View file

@ -202,7 +202,7 @@ Changing scrape configuration is possible with text editor:
vi $SNAP_DATA/var/snap/victoriametrics/current/etc/victoriametrics-scrape-config.yaml
```
After changes were made, trigger config re-read with the command `curl 127.0.0.1:8248/-/reload`.
After changes were made, trigger config re-read with the command `curl 127.0.0.1:8428/-/reload`.
## Prometheus setup
@ -1448,12 +1448,14 @@ can be configured with the `-inmemoryDataFlushInterval` command-line flag (note
In-memory parts are persisted to disk into `part` directories under the `<-storageDataPath>/data/small/YYYY_MM/` folder,
where `YYYY_MM` is the month partition for the stored data. For example, `2022_11` is the partition for `parts`
with [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) from `November 2022`.
Each partition directory contains `parts.json` file with the actual list of parts in the partition.
The `part` directory has the following name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`, where:
Every `part` directory contains `metadata.json` file with the following fields:
- `rowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part
- `blocksCount` - the number of blocks stored in the part (see details about blocks below)
- `minTimestamp` and `maxTimestamp` - minimum and maximum timestamps across raw samples stored in the part
- `RowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part
- `BlocksCount` - the number of blocks stored in the part (see details about blocks below)
- `MinTimestamp` and `MaxTimestamp` - minimum and maximum timestamps across raw samples stored in the part
- `MinDedupInterval` - the [deduplication interval](#deduplication) applied to the given part.
Each `part` consists of `blocks` sorted by internal time series id (aka `TSID`).
Each `block` contains up to 8K [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples),
@ -1475,9 +1477,8 @@ for fast block lookups, which belong to the given `TSID` and cover the given tim
and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge
Newly added `parts` either successfully appear in the storage or fail to appear.
The newly added `parts` are being created in a temporary directory under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` folder.
When the newly added `part` is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html)
to a temporary directory, then it is atomically moved to the storage directory.
The newly added `part` is atomically registered in the `parts.json` file under the corresponding partition
after it is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html) to the storage.
Thanks to this alogrithm, storage never contains partially created parts, even if hardware power off
occurrs in the middle of writing the `part` to disk - such incompletely written `parts`
are automatically deleted on the next VictoriaMetrics start.
@ -1506,8 +1507,7 @@ Retention is configured with the `-retentionPeriod` command-line flag, which tak
Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
Data partitions outside the configured retention are deleted on the first day of the new month.
Each partition consists of one or more data parts with the following name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`.
Data parts outside of the configured retention are eventually deleted during
Each partition consists of one or more data parts. Data parts outside of the configured retention are eventually deleted during
[background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
The maximum disk space usage for a given `-retentionPeriod` is going to be (`-retentionPeriod` + 1) months.

View file

@ -205,7 +205,7 @@ Changing scrape configuration is possible with text editor:
vi $SNAP_DATA/var/snap/victoriametrics/current/etc/victoriametrics-scrape-config.yaml
```
After changes were made, trigger config re-read with the command `curl 127.0.0.1:8248/-/reload`.
After changes were made, trigger config re-read with the command `curl 127.0.0.1:8428/-/reload`.
## Prometheus setup
@ -1451,12 +1451,14 @@ can be configured with the `-inmemoryDataFlushInterval` command-line flag (note
In-memory parts are persisted to disk into `part` directories under the `<-storageDataPath>/data/small/YYYY_MM/` folder,
where `YYYY_MM` is the month partition for the stored data. For example, `2022_11` is the partition for `parts`
with [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) from `November 2022`.
Each partition directory contains `parts.json` file with the actual list of parts in the partition.
The `part` directory has the following name pattern: `rowsCount_blocksCount_minTimestamp_maxTimestamp`, where:
Every `part` directory contains `metadata.json` file with the following fields:
- `rowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part
- `blocksCount` - the number of blocks stored in the part (see details about blocks below)
- `minTimestamp` and `maxTimestamp` - minimum and maximum timestamps across raw samples stored in the part
- `RowsCount` - the number of [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples) stored in the part
- `BlocksCount` - the number of blocks stored in the part (see details about blocks below)
- `MinTimestamp` and `MaxTimestamp` - minimum and maximum timestamps across raw samples stored in the part
- `MinDedupInterval` - the [deduplication interval](#deduplication) applied to the given part.
Each `part` consists of `blocks` sorted by internal time series id (aka `TSID`).
Each `block` contains up to 8K [raw samples](https://docs.victoriametrics.com/keyConcepts.html#raw-samples),
@ -1478,9 +1480,8 @@ for fast block lookups, which belong to the given `TSID` and cover the given tim
and [freeing up disk space for the deleted time series](#how-to-delete-time-series) are performed during the merge
Newly added `parts` either successfully appear in the storage or fail to appear.
The newly added `parts` are being created in a temporary directory under `<-storageDataPath>/data/{small,big}/YYYY_MM/tmp` folder.
When the newly added `part` is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html)
to a temporary directory, then it is atomically moved to the storage directory.
The newly added `part` is atomically registered in the `parts.json` file under the corresponding partition
after it is fully written and [fsynced](https://man7.org/linux/man-pages/man2/fsync.2.html) to the storage.
Thanks to this alogrithm, storage never contains partially created parts, even if hardware power off
occurrs in the middle of writing the `part` to disk - such incompletely written `parts`
are automatically deleted on the next VictoriaMetrics start.
@ -1509,8 +1510,7 @@ Retention is configured with the `-retentionPeriod` command-line flag, which tak
Data is split in per-month partitions inside `<-storageDataPath>/data/{small,big}` folders.
Data partitions outside the configured retention are deleted on the first day of the new month.
Each partition consists of one or more data parts with the following name pattern `rowsCount_blocksCount_minTimestamp_maxTimestamp`.
Data parts outside of the configured retention are eventually deleted during
Each partition consists of one or more data parts. Data parts outside of the configured retention are eventually deleted during
[background merge](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282).
The maximum disk space usage for a given `-retentionPeriod` is going to be (`-retentionPeriod` + 1) months.

View file

@ -186,6 +186,11 @@ There are the following most commons reasons for slow data ingestion in Victoria
Issues like this are very hard to catch via [official Grafana dashboard for cluster version of VictoriaMetrics](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html#monitoring)
and proper diagnosis would require checking resource usage on the instances where VictoriaMetrics runs.
6. If you see `TooHighSlowInsertsRate` [alert](https://docs.victoriametrics.com/#monitoring) when single-node VictoriaMetrics or `vmstorage` has enough
free CPU and RAM, then increase `-cacheExpireDuration` command-line flag at single-node VictoriaMetrics or at `vmstorage` to the value,
which exceeds the interval between ingested samples for the same time series (aka `scrape_interval`).
See [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3976#issuecomment-1476883183) for more details.
## Slow queries
Some queries may take more time and resources (CPU, RAM, network bandwidth) than others.

View file

@ -34,38 +34,41 @@ See details about all supported options in the [vmgateway documentation](https:/
[Keycloak](https://www.keycloak.org/) is an open source identity service that can be used to issue JWT tokens.
1. Log in with admin credentials to your Keycloak instance
2. Go to `Clients` -> `Create`.
Use `OpenID Connect` as `Client Type`.
Specify `grafana` as `Client ID`.
Click `Next`.
2. Go to `Clients` -> `Create`.<br>
Use `OpenID Connect` as `Client Type`.<br>
Specify `grafana` as `Client ID`.<br>
Click `Next`.<br>
<img src="grafana-vmgateway-openid-configuration/create-client-1.png" width="800">
3. Enable `Client authentication`.
Enable `Authorization`.
<img src="grafana-vmgateway-openid-configuration/create-client-2.png" width="800">
Click `Next`.
4. Add Grafana URL as `Valid Redirect URIs`. For example, `http://localhost:3000/`.
<img src="grafana-vmgateway-openid-configuration/create-client-3.png" width="800">
Click `Save`.
5. Go to `Clients` -> `grafana` -> `Credentials`.
<img src="grafana-vmgateway-openid-configuration/client-secret.png" width="800">
Copy the value of `Client secret`. It will be used later in Grafana configuration.
6. Go to `Clients` -> `grafana` -> `Client scopes`.
Click at `grafana-dedicated` -> `Add mapper`.
<img src="grafana-vmgateway-openid-configuration/create-mapper-1.png" width="800">
<img src="grafana-vmgateway-openid-configuration/create-mapper-2.png" width="800">
Configure the mapper as follows
- `Mapper Type` as `User Attribute`.
3. Enable `Client authentication`.<br>
Enable `Authorization`.<br>
<img src="grafana-vmgateway-openid-configuration/create-client-2.png" width="800"><br>
Click `Next`.<br>
4. Add Grafana URL as `Root URL`. For example, `http://localhost:3000/`.<br>
<img src="grafana-vmgateway-openid-configuration/create-client-3.png" width="800"><br>
Click `Save`.<br>
5. Go to `Clients` -> `grafana` -> `Credentials`.<br>
<img src="grafana-vmgateway-openid-configuration/client-secret.png" width="800"><br>
Copy the value of `Client secret`. It will be used later in Grafana configuration.<br>
6. Go to `Clients` -> `grafana` -> `Client scopes`.<br>
Click at `grafana-dedicated` -> `Add mapper` -> `By configuration` -> `User attribute`.<br>
<img src="grafana-vmgateway-openid-configuration/create-mapper-1.png" width="800"><br>
<img src="grafana-vmgateway-openid-configuration/create-mapper-2.png" width="800"><br>
Configure the mapper as follows<br>
- `Name` as `vm_access`.
- `Token Claim Name` as `vm_access`.
- `User Attribute` as `vm_access`.
- `Claim JSON Type` as `JSON`.
Enable `Add to ID token` and `Add to access token`.
<img src="grafana-vmgateway-openid-configuration/create-mapper-3.png" width="800">
Click `Save`.
7. Go to `Users` -> select user to configure claims -> `Attributes`.
Specify `vm_access` as `Key`.
Specify `{"tenant_id" : {"account_id": 0, "project_id": 0 }}` as `Value`.
<img src="grafana-vmgateway-openid-configuration/user-attributes.png" width="800">
Enable `Add to ID token` and `Add to access token`.<br>
<img src="grafana-vmgateway-openid-configuration/create-mapper-3.png" width="800"><br>
Click `Save`.<br>
7. Go to `Users` -> select user to configure claims -> `Attributes`.<br>
Specify `vm_access` as `Key`.<br>
For the purpose of this example, we will use 2 users:<br>
- for the first user we will specify `{"tenant_id" : {"account_id": 0, "project_id": 0 },"extra_labels":{ "team": "admin" }}` as `Value`.
- for the second user we will specify `{"tenant_id" : {"account_id": 0, "project_id": 1 },"extra_labels":{ "team": "dev" }}` as `Value`.
<br>
<img src="grafana-vmgateway-openid-configuration/user-attributes.png" width="800"><br>
Click `Save`.
## Configure grafana
@ -187,8 +190,146 @@ URL should point to the vmgateway instance.
You can also use VictoriaMetrics [Grafana datasource](https://github.com/VictoriaMetrics/grafana-datasource) plugin.
See installation instructions [here](https://github.com/VictoriaMetrics/grafana-datasource#installation).
Enable `Forward OAuth identity` flag.
Enable `Forward OAuth identity` flag.<br>
<img src="grafana-vmgateway-openid-configuration/grafana-ds.png" width="800">
Now you can use Grafana to query metrics from the specified tenant.
Users with `vm_access` claim will be able to query metrics from the specified tenant.
## Test multi-tenant access
For the test purpose we will setup the following services as [docker-compose](https://docs.docker.com/compose/) manifest:
- Grafana
- Keycloak
- vmagent to generate test metrics
- VictoriaMetrics cluster
- vmgateway configured to work in cluster mode
- VictoriaMetrics single node
- vmgateway configured to work in single node mode
```yaml
version: '3'
services:
keycloak:
image: quay.io/keycloak/keycloak:21.0
command:
- start-dev
ports:
- 3001:8080
environment:
KEYCLOAK_ADMIN: admin
KEYCLOAK_ADMIN_PASSWORD: change_me
grafana:
image: grafana/grafana-oss:9.4.3
network_mode: host
volumes:
- ./grafana.ini:/etc/grafana/grafana.ini
- grafana_data:/var/lib/grafana/
vmsingle:
image: victoriametrics/victoria-metrics:v1.89.1
command:
- -httpListenAddr=0.0.0.0:8429
vmstorage:
image: victoriametrics/vmstorage:v1.89.1-cluster
vminsert:
image: victoriametrics/vminsert:v1.89.1-cluster
command:
- -storageNode=vmstorage:8400
- -httpListenAddr=0.0.0.0:8480
vmselect:
image: victoriametrics/vmselect:v1.89.1-cluster
command:
- -storageNode=vmstorage:8401
- -httpListenAddr=0.0.0.0:8481
vmagent:
image: victoriametrics/vmagent:v1.89.1
volumes:
- ./scrape.yaml:/etc/vmagent/config.yaml
command:
- -promscrape.config=/etc/vmagent/config.yaml
- -remoteWrite.url=http://vminsert:8480/insert/0/prometheus/api/v1/write
- -remoteWrite.url=http://vmsingle:8429/api/v1/write
vmgateway-cluster:
image: victoriametrics/vmgateway:v1.89.1-enterprise
ports:
- 8431:8431
command:
- -eula
- -enable.auth=true
- -clusterMode=true
- -write.url=http://vminsert:8480
- -read.url=http://vmselect:8481
- -httpListenAddr=0.0.0.0:8431
- -auth.oidcDiscoveryEndpoints=http://keycloak:8080/realms/master/.well-known/openid-configuration
vmgateway-single:
image: victoriametrics/vmgateway:v1.89.1-enterprise
ports:
- 8432:8431
command:
- -eula
- -enable.auth=true
- -write.url=http://vmsingle:8429
- -read.url=http://vmsingle:8429
- -httpListenAddr=0.0.0.0:8431
- -auth.oidcDiscoveryEndpoints=http://keycloak:8080/realms/master/.well-known/openid-configuration
volumes:
grafana_data:
```
For the test purpose vmagent will be configured to scrape metrics from the following targets(`scrape.yaml` contents):
```yaml
scrape_configs:
- job_name: stat
metric_relabel_configs:
- if: "{instance =~ 'vmgateway.*'}"
action: replace
target_label: team
replacement: admin
- if: "{instance =~ 'localhost.*'}"
action: replace
target_label: team
replacement: dev
static_configs:
- targets:
- localhost:8429
- vmgateway-single:8431
- vmgateway-cluster:8431
```
Relabeling rules will add the `team` label to the scraped metrics in order to test multi-tenant access.
Metrics from `localhost` will be labeled with `team=dev` and metrics from `vmgateway` will be labeled with `team=admin`.
vmagent will write data into VictoriaMetrics single-node and cluster(with tenant `0:0`).
Grafana datasources configuration will be the following:
<img src="grafana-vmgateway-openid-configuration/grafana-test-datasources.png" width="800">
Let's login as user with `team=dev` labels limitation set via claims.
Using `vmgateway-cluster` results into `No data` response as proxied request will go to tenant `0:1`.
Since vmagent is only configured to write to `0:0` `No data` is an expected response.
<img src="grafana-vmgateway-openid-configuration/dev-cluster-nodata.png" width="800">
Switching to `vmgateway-single` does have data. Note that it is limited to metrics with `team=dev` label.
<img src="grafana-vmgateway-openid-configuration/dev-single-data.png" width="800">
Now lets login as user with `team=admin`.
Both cluster and single node datasources now return metrics for `team=admin`.
<img src="grafana-vmgateway-openid-configuration/admin-cluster-data.png" width="800">
<img src="grafana-vmgateway-openid-configuration/admin-single-data.png" width="800">

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 128 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 112 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

After

Width:  |  Height:  |  Size: 33 KiB

View file

@ -17,7 +17,7 @@ This guide explains the different ways in which you can use vmalert in conjuncti
* [vmalert](https://docs.victoriametrics.com/vmalert.html) is installed. You can obtain it by building it from [source](https://docs.victoriametrics.com/vmalert.html#quickstart), downloading it from the [GitHub releases page](https://github.com/VictoriaMetrics/VictoriaMetrics/releases), or using the [docker image](https://hub.docker.com/r/victoriametrics/vmalert) for the container ecosystem (such as docker, k8s, etc.).
* [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/) is installed.
* You have a [single or cluster](https://docs.victoriametrics.com/managed-victoriametrics/quickstart.html#creating-instance) deployment in [Managed VictoriaMetrics](https://docs.victoriametrics.com/managed-victoriametrics/overview.html).
* You have a [single or cluster](https://docs.victoriametrics.com/managed-victoriametrics/quickstart.html#creating-deployment) deployment in [Managed VictoriaMetrics](https://docs.victoriametrics.com/managed-victoriametrics/overview.html).
* If you are using helm, add the [VictoriaMetrics helm chart](https://github.com/VictoriaMetrics/helm-charts/tree/master/charts/victoria-metrics-alert#how-to-install) repository to your helm repositories. This step is optional.
* If you are using [vmoperator](https://docs.victoriametrics.com/operator/quick-start.html#quick-start), make sure that it and its CRDs are installed. This step is also optional.
@ -49,7 +49,7 @@ groups:
To use vmalert with Managed VictoriaMetrics, you must create a read/write token, or use an existing one. The token must have write access to ingest recording rules, ALERTS and ALERTS_FOR_STATE metrics, and read access for rules evaluation.
For instructions on how to create tokens, please refer to this section of the [documentation](https://docs.victoriametrics.com/managed-victoriametrics/quickstart.html#access).
For instructions on how to create tokens, please refer to this section of the [documentation](https://docs.victoriametrics.com/managed-victoriametrics/quickstart.html#deployment-access).
#### Single-Node

View file

@ -7,46 +7,67 @@ menu:
parent: "managed"
weight: 2
---
# Quick Start
# Quick Start in Managed VictoriaMetrics
Managed VictoriaMetrics - is a database-as-a-service platform, where users can run the VictoriaMetrics
that they know and love on AWS without the need to perform typical DevOps tasks such as proper configuration,
monitoring, logs collection, access protection, software updates, backups, etc.
The document covers the following topics
1. [How to register](#how-to-register)
2. [How to restore password](#how-to-restore-password)
3. [Creating deployment](#creating-deployment)
4. [Deployment access](#deployment-access)
5. [Modifying deployment](#modifying-deployment)
## How to register
Managed VictoriaMetrics id distributed via [AWS Marketplace](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc).
Managed VictoriaMetrics id distributed via <a href="https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc" target="_blank">AWS Marketplace</a>.
Please note, that initial registering is only possible via link from <a href="https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc" target="_blank">AWS Marketplace</a>.
To start using the service, one should have already registered AWS account
and visit [VictoriaMetrics product page](https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc).
and visit <a href="https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc" target="_blank">VictoriaMetrics product page</a>.
On that page click on `View purchase option` and you will be redirected to login page or to subscribe page.
### Amazon Web Services
<p>
<img src="quickstart_aws-purchase-click.png" width="800">
</p>
Managed VictoriaMetrics id distributed via <a href="https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc">AWS Marketplace</a>.
To start using the service, one should have already registered AWS account. Then, go to the
Then, go to the
<a href="https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc">VictoriaMetrics product page</a>
and click "Continue to Subscribe" button:
and click `Continue to Subscribe` button:
<p>
<img src="quickstart_continue-subscribe.png" width="800">
</p>
Then on product page press the "Subscribe" button:
Then on product page press the `Subscribe` button:
<p>
<img src="quickstart_subscribe.png" width="800">
</p>
You'll be taken to <a href="https://dbaas.victoriametrics.com/signIn">Managed VictoriaMetrics auth page</a>:
After that action you will be able to see success message where you should click `Set up your account` button:
<p>
<img src="quickstart_signin.png" width="800">
<img src="quickstart_setup-your-account.png" width="800">
</p>
You'll be taken to <a href="https://dbaas.victoriametrics.com//signUp">Managed VictoriaMetrics sign up page</a>:
<p>
<img src="quickstart_signup-page.png" width="800">
</p>
Choose to register manually or via Google Auth.
Please note, that initial registering is only possible via link from <a href="https://aws.amazon.com/marketplace/pp/prodview-4tbfq5icmbmyc" target="_blank">AWS Marketplace</a>.
If it was chosen manually registration, confirmation email wil be sent to your email address.
See what to do after registering in Quick Start guide.
<p>
<img src="quickstart_email-confirm.png" width="800">
</p>
After Google Auth process will redirect automatically to the main page.
## How to restore password
@ -66,15 +87,9 @@ If you forgot password, it can be restored in the following way:
3. Follow the instruction sent to your email in order to gain access to your VictoriaMetrics cloud account:
```
Victoria Metrics Cloud password restore
Follow https://dbaas.victoriametrics.com/login_by_link/{id} the link in order to restore access to Victoria Metrics Cloud.
Access link expires once you login successfully or after 30min.
You can change your password after login https://dbaas.victoriametrics.com/profile profile
Please, ignore this email if you didn't init this action on Victoria Metrics Cloud.
In case of questions contact our support support@victoriametrics.com
```
<p>
<img src="quickstart_password-restore-email.png" width="800">
</p>
4. Navigate to the Profile page by clicking the corresponding link at the top right corner:
@ -89,41 +104,48 @@ In case of questions contact our support support@victoriametrics.com
</p>
## Creating instance
## Creating deployment
Instances is a page where user can list and manage VictoriaMetrics single-node instances.
To create an instance click on the button `Create`:
Deployments is a page where user can list and manage VictoriaMetrics deployments.
To create an deployment click on the button `Create Deployment` button or link in the message:
<p>
<img src="quickstart_instances.png" width="800">
<img src="quickstart_deployments.png" width="800">
</p>
In the opened form, choose parameters of the new instance such as:
In the opened form, choose parameters of the new deployment such as:
* `Instance type` from preset of AWS instances (you always can change the type later);
* `Region` and `Zone` where instance should run;
* Desired `disk size` for storing metrics (you always can expand disk size later);
* `Deployment type` from preset single or cluster deployments;
* `Region` where deployment should run;
* Desired `storage capacity` for storing metrics (you always can expand disk size later);
* `Retention` period for stored metrics.
* `Size` of your deployment
<p>
<img src="quickstart_instance-create.png" width="800">
<img src="quickstart_deployment-create.png" width="800">
</p>
Once created, instance will remain for a short period of time in `PROVISIONING` status
When all parameters are entered, click on the `Create` button, and deployment will be created
Once created, deployment will remain for a short period of time in `PROVISIONING` status
while the hardware spins-up, just wait for a couple of minutes and reload the page.
You'll also be notified via email once provisioning is finished:
<p>
<img src="quickstart_instance-provisioning.png" width="800">
<img src="quickstart_deployment-created.png" width="800">
</p>
## Access
<p>
<img src="quickstart_deployments-running.png" width="800">
</p>
## Deployment access
After transition from `PROVISIONING` to `RUNNING` state, VictoriaMetrics is fully operational
and ready to accept write or read requests. But first, click on instance name to get the access token:
and ready to accept write or read requests. But first, click on deployment name to get the access token:
<p>
<img src="quickstart_tokens.png" width="800">
<img src="quickstart_deployment-access-token.png" width="800">
</p>
Access tokens are used in token-based authentication to allow an application to access the VictoriaMetrics API.
@ -131,13 +153,36 @@ Supported token types are `Read-Only`, `Write-Only` and `Read-Write`. Click on t
to see usage examples:
<p>
<img src="quickstart_token-usage.png" width="800">
<img src="quickstart_read-token.png" width="800">
</p>
<p>
<img src="quickstart_write-token.png" width="800">
</p>
Follow usage example in order to configure access to VictoriaMetrics for your Prometheus,
Grafana or any other software.
## Modifying
## Modifying deployment
Remember, you always can add, remove or modify existing instances by changing their type or increasing the disk space.
However, such an update requires an instance restart and may result into a couple of minutes of downtime.
Remember, you always can add, remove or modify existing deployment by changing their size or any parameters on the
update form.
<p>
<img src="quickstart_update-deployment.png" width="800">
</p>
There is another options present to customise you deployment setup.
To discover them click on `Customise` button
<p>
<img src="quickstart_customise-deployment.png" width="800">
</p>
In that section additional params can be set:
* `Deduplication` defines interval when deployment leaves a single raw sample with the biggest timestamp per each discrete interval;
* `Maintenance Window` when deployment should start upgrade process if needed;
* `Settings` allow to define different flags for the deployment.
However, such an update requires an deployment restart and may result into a couple of minutes of downtime.

Binary file not shown.

After

Width:  |  Height:  |  Size: 192 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 158 KiB

After

Width:  |  Height:  |  Size: 159 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 810 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 496 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 729 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 558 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 542 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 478 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 296 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 568 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 171 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 228 KiB

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 426 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 256 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 533 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 MiB

View file

@ -65,6 +65,8 @@ This Document documents the types introduced by the VictoriaMetrics to be consum
* [HTTPAuth](#httpauth)
* [ServiceSpec](#servicespec)
* [StorageSpec](#storagespec)
* [StreamAggrConfig](#streamaggrconfig)
* [StreamAggrRule](#streamaggrrule)
* [VMAlert](#vmalert)
* [VMAlertDatasourceSpec](#vmalertdatasourcespec)
* [VMAlertList](#vmalertlist)
@ -709,6 +711,7 @@ VMAgentRemoteWriteSpec defines the remote storage configuration for VmAgent
| tlsConfig | TLSConfig describes tls configuration for remote write target | *[TLSConfig](#tlsconfig) | false |
| sendTimeout | Timeout for sending a single block of data to -remoteWrite.url (default 1m0s) | *string | false |
| headers | Headers allow configuring custom http headers Must be in form of semicolon separated header with value e.g. headerName: headerValue vmagent supports since 1.79.0 version | []string | false |
| streamAggrConfig | StreamAggrConfig defines stream aggregation configuration for VMAgent for -remoteWrite.url | *[StreamAggrConfig](#streamaggrconfig) | false |
[Back to TOC](#table-of-contents)
@ -744,6 +747,7 @@ VMAgentSpec defines the desired state of VMAgent
| dnsPolicy | DNSPolicy set DNS policy for the pod | [v1.DNSPolicy](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#pod-v1-core) | false |
| topologySpreadConstraints | TopologySpreadConstraints embedded kubernetes pod configuration option, controls how pods are spread across your cluster among failure-domains such as regions, zones, nodes, and other user-defined topology domains https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ | [][v1.TopologySpreadConstraint](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) | false |
| scrapeInterval | ScrapeInterval defines how often scrape targets by default | string | false |
| scrapeTimeout | ScrapeTimeout defines global timeout for targets scrape | string | false |
| aPIServerConfig | APIServerConfig allows specifying a host and auth methods to access apiserver. If left empty, VMAgent is assumed to run inside of the cluster and will discover API servers automatically and use the pod&#39;s CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/. | *[APIServerConfig](#apiserverconfig) | false |
| overrideHonorLabels | OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceScrape or PodScrape to true, this overrides honor_labels to false. | bool | false |
| overrideHonorTimestamps | OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. | bool | false |
@ -958,6 +962,34 @@ StorageSpec defines the configured storage for a group Prometheus servers. If ne
[Back to TOC](#table-of-contents)
## StreamAggrConfig
StreamAggrConfig defines the stream aggregation config
| Field | Description | Scheme | Required |
| ----- | ----------- | ------ | -------- |
| rules | Stream aggregation rules | [][StreamAggrRule](#streamaggrrule) | true |
| keepInput | Allows writing both raw and aggregate data | bool | false |
| dedupInterval | Allows setting different de-duplication intervals per each configured remote storage | string | false |
[Back to TOC](#table-of-contents)
## StreamAggrRule
StreamAggrRule defines the rule in stream aggregation config
| Field | Description | Scheme | Required |
| ----- | ----------- | ------ | -------- |
| match | Match is a label selector for filtering time series for the given selector.\n\nIf the match isn&#39;t set, then all the input time series are processed. | string | false |
| interval | Interval is the interval between aggregations. | string | true |
| outputs | Outputs is a list of output aggregate functions to produce.\n\nThe following names are allowed:\n\n- total - aggregates input counters - increase - counts the increase over input counters - count_series - counts the input series - count_samples - counts the input samples - sum_samples - sums the input samples - last - the last biggest sample value - min - the minimum sample value - max - the maximum sample value - avg - the average value across all the samples - stddev - standard deviation across all the samples - stdvar - standard variance across all the samples - histogram_bucket - creates VictoriaMetrics histogram for input samples - quantiles(phi1, ..., phiN) - quantiles&#39; estimation for phi in the range [0..1]\n\nThe output time series will have the following names:\n\n input_name:aggr_&lt;interval&gt;_&lt;output&gt; | []string | true |
| by | By is an optional list of labels for grouping input series.\n\nSee also Without.\n\nIf neither By nor Without are set, then the Outputs are calculated individually per each input time series. | []string | false |
| without | Without is an optional list of labels, which must be excluded when grouping input series.\n\nSee also By.\n\nIf neither By nor Without are set, then the Outputs are calculated individually per each input time series. | []string | false |
| input_relabel_configs | InputRelabelConfigs is an optional relabeling rules, which are applied on the input before aggregation. | [][RelabelConfig](#relabelconfig) | false |
| output_relabel_configs | OutputRelabelConfigs is an optional relabeling rules, which are applied on the aggregated output before being sent to remote storage. | [][RelabelConfig](#relabelconfig) | false |
[Back to TOC](#table-of-contents)
## VMAlert
VMAlert executes a list of given alerting or recording rules against configured address.
@ -1198,6 +1230,7 @@ VMSingleSpec defines the desired state of VMSingle
| nodeSelector | NodeSelector Define which Nodes the Pods are scheduled on. | map[string]string | false |
| terminationGracePeriodSeconds | TerminationGracePeriodSeconds period for container graceful termination | *int64 | false |
| readinessGates | ReadinessGates defines pod readiness gates | []v1.PodReadinessGate | false |
| streamAggrConfig | StreamAggrConfig defines stream aggregation configuration for VMSingle | *[StreamAggrConfig](#streamaggrconfig) | false |
[Back to TOC](#table-of-contents)

View file

@ -9,17 +9,18 @@ menu:
aliases:
- /operator/vars.html
---
# Auto Generated vars for package config
updated at Fri Jan 21 15:57:41 UTC 2022
# Auto Generated vars for package config
updated at Wed Mar 15 16:18:54 UTC 2023
| varible name | variable default value | variable required | variable description |
| --- | --- | --- | --- |
| VM_USECUSTOMCONFIGRELOADER | false | false | enables custom config reloader for vmauth and vmagent,it should speed-up config reloading process. |
| VM_CUSTOMCONFIGRELOADERIMAGE | victoriametrics/operator:config-reloader-0.1.0 | false | - |
| VM_CONTAINERREGISTRY | - | false | container registry name prefix, e.g. docker.io |
| VM_CUSTOMCONFIGRELOADERIMAGE | victoriametrics/operator:config-reloader-v0.27.0 | false | - |
| VM_PSPAUTOCREATEENABLED | true | false | - |
| VM_VMALERTDEFAULT_IMAGE | victoriametrics/vmalert | false | - |
| VM_VMALERTDEFAULT_VERSION | v1.72.0 | false | - |
| VM_VMALERTDEFAULT_VERSION | v1.85.3 | false | - |
| VM_VMALERTDEFAULT_PORT | 8080 | false | - |
| VM_VMALERTDEFAULT_USEDEFAULTRESOURCES | true | false | - |
| VM_VMALERTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - |
@ -30,8 +31,8 @@ aliases:
| VM_VMALERTDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - |
| VM_VMALERTDEFAULT_CONFIGRELOADIMAGE | jimmidyson/configmap-reload:v0.3.0 | false | - |
| VM_VMAGENTDEFAULT_IMAGE | victoriametrics/vmagent | false | - |
| VM_VMAGENTDEFAULT_VERSION | v1.72.0 | false | - |
| VM_VMAGENTDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.48.1 | false | - |
| VM_VMAGENTDEFAULT_VERSION | v1.85.3 | false | - |
| VM_VMAGENTDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.58.0 | false | - |
| VM_VMAGENTDEFAULT_PORT | 8429 | false | - |
| VM_VMAGENTDEFAULT_USEDEFAULTRESOURCES | true | false | - |
| VM_VMAGENTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - |
@ -41,7 +42,7 @@ aliases:
| VM_VMAGENTDEFAULT_CONFIGRELOADERCPU | 100m | false | - |
| VM_VMAGENTDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - |
| VM_VMSINGLEDEFAULT_IMAGE | victoriametrics/victoria-metrics | false | - |
| VM_VMSINGLEDEFAULT_VERSION | v1.72.0 | false | - |
| VM_VMSINGLEDEFAULT_VERSION | v1.85.3 | false | - |
| VM_VMSINGLEDEFAULT_PORT | 8429 | false | - |
| VM_VMSINGLEDEFAULT_USEDEFAULTRESOURCES | true | false | - |
| VM_VMSINGLEDEFAULT_RESOURCE_LIMIT_MEM | 1500Mi | false | - |
@ -52,14 +53,14 @@ aliases:
| VM_VMSINGLEDEFAULT_CONFIGRELOADERMEMORY | 25Mi | false | - |
| VM_VMCLUSTERDEFAULT_USEDEFAULTRESOURCES | true | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_IMAGE | victoriametrics/vmselect | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_VERSION | v1.72.0-cluster | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_VERSION | v1.85.3-cluster | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_PORT | 8481 | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_LIMIT_MEM | 1000Mi | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_LIMIT_CPU | 500m | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_REQUEST_MEM | 500Mi | false | - |
| VM_VMCLUSTERDEFAULT_VMSELECTDEFAULT_RESOURCE_REQUEST_CPU | 100m | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_IMAGE | victoriametrics/vmstorage | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VERSION | v1.72.0-cluster | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VERSION | v1.85.3-cluster | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VMINSERTPORT | 8400 | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_VMSELECTPORT | 8401 | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_PORT | 8482 | false | - |
@ -68,7 +69,7 @@ aliases:
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_RESOURCE_REQUEST_MEM | 500Mi | false | - |
| VM_VMCLUSTERDEFAULT_VMSTORAGEDEFAULT_RESOURCE_REQUEST_CPU | 250m | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_IMAGE | victoriametrics/vminsert | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_VERSION | v1.72.0-cluster | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_VERSION | v1.85.3-cluster | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_PORT | 8480 | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_RESOURCE_LIMIT_MEM | 500Mi | false | - |
| VM_VMCLUSTERDEFAULT_VMINSERTDEFAULT_RESOURCE_LIMIT_CPU | 500m | false | - |
@ -78,7 +79,7 @@ aliases:
| VM_VMALERTMANAGER_CONFIGRELOADERCPU | 100m | false | - |
| VM_VMALERTMANAGER_CONFIGRELOADERMEMORY | 25Mi | false | - |
| VM_VMALERTMANAGER_ALERTMANAGERDEFAULTBASEIMAGE | prom/alertmanager | false | - |
| VM_VMALERTMANAGER_ALERTMANAGERVERSION | v0.22.2 | false | - |
| VM_VMALERTMANAGER_ALERTMANAGERVERSION | v0.24.0 | false | - |
| VM_VMALERTMANAGER_LOCALHOST | 127.0.0.1 | false | - |
| VM_VMALERTMANAGER_USEDEFAULTRESOURCES | true | false | - |
| VM_VMALERTMANAGER_RESOURCE_LIMIT_MEM | 256Mi | false | - |
@ -87,7 +88,7 @@ aliases:
| VM_VMALERTMANAGER_RESOURCE_REQUEST_CPU | 30m | false | - |
| VM_DISABLESELFSERVICESCRAPECREATION | false | false | - |
| VM_VMBACKUP_IMAGE | victoriametrics/vmbackupmanager | false | - |
| VM_VMBACKUP_VERSION | v1.72.0-enterprise | false | - |
| VM_VMBACKUP_VERSION | v1.85.3-enterprise | false | - |
| VM_VMBACKUP_PORT | 8300 | false | - |
| VM_VMBACKUP_USEDEFAULTRESOURCES | true | false | - |
| VM_VMBACKUP_RESOURCE_LIMIT_MEM | 500Mi | false | - |
@ -96,7 +97,7 @@ aliases:
| VM_VMBACKUP_RESOURCE_REQUEST_CPU | 150m | false | - |
| VM_VMBACKUP_LOGLEVEL | INFO | false | - |
| VM_VMAUTHDEFAULT_IMAGE | victoriametrics/vmauth | false | - |
| VM_VMAUTHDEFAULT_VERSION | v1.72.0 | false | - |
| VM_VMAUTHDEFAULT_VERSION | v1.85.3 | false | - |
| VM_VMAUTHDEFAULT_CONFIGRELOADIMAGE | quay.io/prometheus-operator/prometheus-config-reloader:v0.48.1 | false | - |
| VM_VMAUTHDEFAULT_PORT | 8427 | false | - |
| VM_VMAUTHDEFAULT_USEDEFAULTRESOURCES | true | false | - |
@ -110,7 +111,13 @@ aliases:
| VM_ENABLEDPROMETHEUSCONVERTER_SERVICESCRAPE | true | false | - |
| VM_ENABLEDPROMETHEUSCONVERTER_PROMETHEUSRULE | true | false | - |
| VM_ENABLEDPROMETHEUSCONVERTER_PROBE | true | false | - |
| VM_ENABLEDPROMETHEUSCONVERTER_ALERTMANAGERCONFIG | true | false | - |
| VM_FILTERCHILDLABELPREFIXES | - | false | - |
| VM_FILTERCHILDANNOTATIONPREFIXES | - | false | - |
| VM_PROMETHEUSCONVERTERADDARGOCDIGNOREANNOTATIONS | false | false | adds compare-options and sync-options for prometheus objects converted by operatorit helps to properly use converter with ArgoCD |
| VM_ENABLEDPROMETHEUSCONVERTEROWNERREFERENCES | false | false | - |
| VM_FILTERPROMETHEUSCONVERTERLABELPREFIXES | - | false | allows filtering for converted labels, labels with matched prefix will be ignored |
| VM_FILTERPROMETHEUSCONVERTERANNOTATIONPREFIXES | - | false | allows filtering for converted annotations, annotations with matched prefix will be ignored |
| VM_HOST | 0.0.0.0 | false | - |
| VM_LISTENADDRESS | 0.0.0.0 | false | - |
| VM_DEFAULTLABELS | managed-by=vm-operator | false | - |
@ -119,3 +126,4 @@ aliases:
| VM_PODWAITREADYTIMEOUT | 80s | false | - |
| VM_PODWAITREADYINTERVALCHECK | 5s | false | - |
| VM_PODWAITREADYINITDELAY | 10s | false | - |
| VM_FORCERESYNCINTERVAL | 60s | false | configures force resync interval for VMAgent, VMAlert and VMAlertmanager |

12
go.mod
View file

@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics
go 1.19
require (
cloud.google.com/go/storage v1.29.0
cloud.google.com/go/storage v1.30.0
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0
github.com/VictoriaMetrics/fastcache v1.12.1
@ -24,7 +24,7 @@ require (
github.com/go-kit/kit v0.12.0
github.com/gogo/protobuf v1.3.2
github.com/golang/snappy v0.0.4
github.com/googleapis/gax-go/v2 v2.7.1
github.com/googleapis/gax-go/v2 v2.8.0
github.com/influxdata/influxdb v1.11.0
github.com/klauspost/compress v1.16.3
github.com/mattn/go-colorable v0.1.13 // indirect
@ -42,7 +42,7 @@ require (
golang.org/x/net v0.8.0
golang.org/x/oauth2 v0.6.0
golang.org/x/sys v0.6.0
google.golang.org/api v0.112.0
google.golang.org/api v0.113.0
gopkg.in/yaml.v2 v2.4.0
)
@ -50,11 +50,11 @@ require (
cloud.google.com/go v0.110.0 // indirect
cloud.google.com/go/compute v1.18.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
cloud.google.com/go/iam v0.12.0 // indirect
cloud.google.com/go/iam v0.13.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect
github.com/aws/aws-sdk-go v1.44.221 // indirect
github.com/aws/aws-sdk-go v1.44.222 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.13.17 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.0 // indirect
@ -107,7 +107,7 @@ require (
go.opentelemetry.io/otel/trace v1.14.0 // indirect
go.uber.org/atomic v1.10.0 // indirect
go.uber.org/goleak v1.2.1 // indirect
golang.org/x/exp v0.0.0-20230314191032-db074128a8ec // indirect
golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/time v0.3.0 // indirect

24
go.sum
View file

@ -27,8 +27,8 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE=
cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
@ -39,8 +39,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.29.0 h1:6weCgzRvMg7lzuUurI4697AqIRPU1SvzHhynwpW31jI=
cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
cloud.google.com/go/storage v1.30.0 h1:g1yrbxAWOrvg/594228pETWkOi00MLTrOWfh56veU5o=
cloud.google.com/go/storage v1.30.0/go.mod h1:xAVretHSROm1BQX4IIsoVgJqw0LqOyX+I/O2GzRAzdE=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY=
@ -86,8 +86,8 @@ github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu
github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/armon/go-metrics v0.3.10 h1:FR+drcQStOe+32sYyJYyZ7FIdgoGGBnwLl+flodp8Uo=
github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/aws/aws-sdk-go v1.44.221 h1:yndn4uvLolKXPoXIwKHhO5XtwlTnJfXLBKXs84C5+hQ=
github.com/aws/aws-sdk-go v1.44.221/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go v1.44.222 h1:hagcC+MrGo60DKEbX0g6/ge4pIj7vBbsIb+vrhA/54I=
github.com/aws/aws-sdk-go v1.44.222/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/aws/aws-sdk-go-v2 v1.17.6 h1:Y773UK7OBqhzi5VDXMi1zVGsoj+CVHs2eaC2bDsLwi0=
github.com/aws/aws-sdk-go-v2 v1.17.6/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.10 h1:dK82zF6kkPeCo8J1e+tGx4JdvDIQzj7ygIoLg8WMuGs=
@ -270,8 +270,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9
github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A=
github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc=
github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
github.com/gophercloud/gophercloud v1.1.1 h1:MuGyqbSxiuVBqkPZ3+Nhbytk1xZxhmfCB2Rg1cJWFWM=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd h1:PpuIBO5P3e9hpqBD0O/HjhShYuM6XE0i/lbE6J94kww=
@ -481,8 +481,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230314191032-db074128a8ec h1:pAv+d8BM2JNnNctsLJ6nnZ6NqXT8N4+eauvZSb3P0I0=
golang.org/x/exp v0.0.0-20230314191032-db074128a8ec/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 h1:pVgRXcIictcr+lBQIFeiwuwtDIs4eL21OuM9nyAADmo=
golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -693,8 +693,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.112.0 h1:iDmzvZ4C086R3+en4nSyIf07HlQKMOX1Xx2dmia/+KQ=
google.golang.org/api v0.112.0/go.mod h1:737UfWHNsOq4F3REUTmb+GN9pugkgNLCayLTfoIKpPc=
google.golang.org/api v0.113.0 h1:3zLZyS9hgne8yoXUFy871yWdQcA2tA6wp59aaCT6Cp4=
google.golang.org/api v0.113.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=

View file

@ -247,21 +247,16 @@ var atomicDirRemoveCounter = uint64(time.Now().UnixNano())
//
// Such directories may be left on unclean shutdown during MustRemoveDirAtomic call.
func MustRemoveTemporaryDirs(dir string) {
d, err := os.Open(dir)
des, err := os.ReadDir(dir)
if err != nil {
logger.Panicf("FATAL: cannot open dir: %s", err)
logger.Panicf("FATAL: cannot read dir: %s", err)
}
defer MustClose(d)
fis, err := d.Readdir(-1)
if err != nil {
logger.Panicf("FATAL: cannot read dir %q: %s", dir, err)
}
for _, fi := range fis {
if !IsDirOrSymlink(fi) {
for _, de := range des {
if !IsDirOrSymlink(de) {
// Skip non-directories
continue
}
dirName := fi.Name()
dirName := de.Name()
if IsScheduledForRemoval(dirName) {
fullPath := dir + "/" + dirName
MustRemoveAll(fullPath)
@ -276,26 +271,16 @@ func HardLinkFiles(srcDir, dstDir string) error {
return fmt.Errorf("cannot create dstDir=%q: %w", dstDir, err)
}
d, err := os.Open(srcDir)
des, err := os.ReadDir(srcDir)
if err != nil {
return fmt.Errorf("cannot open srcDir: %w", err)
return fmt.Errorf("cannot read files in scrDir: %w", err)
}
defer func() {
if err := d.Close(); err != nil {
logger.Panicf("FATAL: cannot close %q: %s", srcDir, err)
}
}()
fis, err := d.Readdir(-1)
if err != nil {
return fmt.Errorf("cannot read files in scrDir=%q: %w", srcDir, err)
}
for _, fi := range fis {
if IsDirOrSymlink(fi) {
for _, de := range des {
if IsDirOrSymlink(de) {
// Skip directories.
continue
}
fn := fi.Name()
fn := de.Name()
srcPath := srcDir + "/" + fn
dstPath := dstDir + "/" + fn
if err := os.Link(srcPath, dstPath); err != nil {
@ -307,9 +292,9 @@ func HardLinkFiles(srcDir, dstDir string) error {
return nil
}
// IsDirOrSymlink returns true if fi is directory or symlink.
func IsDirOrSymlink(fi os.FileInfo) bool {
return fi.IsDir() || (fi.Mode()&os.ModeSymlink == os.ModeSymlink)
// IsDirOrSymlink returns true if de is directory or symlink.
func IsDirOrSymlink(de os.DirEntry) bool {
return de.IsDir() || (de.Type()&os.ModeSymlink == os.ModeSymlink)
}
// SymlinkRelative creates relative symlink for srcPath in dstPath.

View file

@ -143,8 +143,8 @@ func (bsr *blockStreamReader) InitFromFilePart(path string) error {
path = filepath.Clean(path)
if err := bsr.ph.ParseFromPath(path); err != nil {
return fmt.Errorf("cannot parse partHeader data from %q: %w", path, err)
if err := bsr.ph.ReadMetadata(path); err != nil {
return fmt.Errorf("cannot read metadata from %q: %w", path, err)
}
metaindexPath := path + "/metaindex.bin"

View file

@ -2,7 +2,6 @@ package mergeset
import (
"fmt"
"path/filepath"
"sync"
"unsafe"
@ -68,11 +67,9 @@ type part struct {
}
func openFilePart(path string) (*part, error) {
path = filepath.Clean(path)
var ph partHeader
if err := ph.ParseFromPath(path); err != nil {
return nil, fmt.Errorf("cannot parse path to part: %w", err)
if err := ph.ReadMetadata(path); err != nil {
return nil, fmt.Errorf("cannot read part metadata: %w", err)
}
metaindexPath := path + "/metaindex.bin"

View file

@ -5,11 +5,9 @@ import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
type partHeader struct {
@ -79,50 +77,10 @@ func (ph *partHeader) CopyFrom(src *partHeader) {
ph.lastItem = append(ph.lastItem[:0], src.lastItem...)
}
func (ph *partHeader) ParseFromPath(partPath string) error {
func (ph *partHeader) ReadMetadata(partPath string) error {
ph.Reset()
partPath = filepath.Clean(partPath)
// Extract encoded part name.
n := strings.LastIndexByte(partPath, '/')
if n < 0 {
return fmt.Errorf("cannot find encoded part name in the path %q", partPath)
}
partName := partPath[n+1:]
// PartName must have the following form:
// itemsCount_blocksCount_Garbage
a := strings.Split(partName, "_")
if len(a) != 3 {
return fmt.Errorf("unexpected number of substrings in the part name %q: got %d; want %d", partName, len(a), 3)
}
// Read itemsCount from partName.
itemsCount, err := strconv.ParseUint(a[0], 10, 64)
if err != nil {
return fmt.Errorf("cannot parse itemsCount from partName %q: %w", partName, err)
}
ph.itemsCount = itemsCount
if ph.itemsCount <= 0 {
return fmt.Errorf("part %q cannot contain zero items", partPath)
}
// Read blocksCount from partName.
blocksCount, err := strconv.ParseUint(a[1], 10, 64)
if err != nil {
return fmt.Errorf("cannot parse blocksCount from partName %q: %w", partName, err)
}
ph.blocksCount = blocksCount
if ph.blocksCount <= 0 {
return fmt.Errorf("part %q cannot contain zero blocks", partPath)
}
if ph.blocksCount > ph.itemsCount {
return fmt.Errorf("the number of blocks cannot exceed the number of items in the part %q; got blocksCount=%d, itemsCount=%d",
partPath, ph.blocksCount, ph.itemsCount)
}
// Read other ph fields from metadata.
// Read ph fields from metadata.
metadataPath := partPath + "/metadata.json"
metadata, err := os.ReadFile(metadataPath)
if err != nil {
@ -133,12 +91,20 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
if err := json.Unmarshal(metadata, &phj); err != nil {
return fmt.Errorf("cannot parse %q: %w", metadataPath, err)
}
if ph.itemsCount != phj.ItemsCount {
return fmt.Errorf("invalid ItemsCount in %q; got %d; want %d", metadataPath, phj.ItemsCount, ph.itemsCount)
if phj.ItemsCount <= 0 {
return fmt.Errorf("part %q cannot contain zero items", partPath)
}
if ph.blocksCount != phj.BlocksCount {
return fmt.Errorf("invalid BlocksCount in %q; got %d; want %d", metadataPath, phj.BlocksCount, ph.blocksCount)
ph.itemsCount = phj.ItemsCount
if phj.BlocksCount <= 0 {
return fmt.Errorf("part %q cannot contain zero blocks", partPath)
}
if phj.BlocksCount > phj.ItemsCount {
return fmt.Errorf("the number of blocks cannot exceed the number of items in the part %q; got blocksCount=%d, itemsCount=%d",
partPath, phj.BlocksCount, phj.ItemsCount)
}
ph.blocksCount = phj.BlocksCount
ph.firstItem = append(ph.firstItem[:0], phj.FirstItem...)
ph.lastItem = append(ph.lastItem[:0], phj.LastItem...)
@ -146,11 +112,6 @@ func (ph *partHeader) ParseFromPath(partPath string) error {
return nil
}
func (ph *partHeader) Path(tablePath string, suffix uint64) string {
tablePath = filepath.Clean(tablePath)
return fmt.Sprintf("%s/%d_%d_%016X", tablePath, ph.itemsCount, ph.blocksCount, suffix)
}
func (ph *partHeader) WriteMetadata(partPath string) error {
phj := &partHeaderJSON{
ItemsCount: ph.itemsCount,
@ -158,9 +119,9 @@ func (ph *partHeader) WriteMetadata(partPath string) error {
FirstItem: append([]byte{}, ph.firstItem...),
LastItem: append([]byte{}, ph.lastItem...),
}
metadata, err := json.MarshalIndent(&phj, "", "\t")
metadata, err := json.Marshal(&phj)
if err != nil {
return fmt.Errorf("cannot marshal metadata: %w", err)
logger.Panicf("BUG: cannot marshal partHeader metadata: %s", err)
}
metadataPath := partPath + "/metadata.json"
if err := fs.WriteFileAtomically(metadataPath, metadata, false); err != nil {

Some files were not shown because too many files have changed in this diff Show more