mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-30 15:22:07 +00:00
app/vmctl: enable range steps in reverse order (#5444)
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5376
(cherry picked from commit 6af732b6f7
)
This commit is contained in:
parent
14117f2f90
commit
63fc200f16
8 changed files with 326 additions and 28 deletions
|
@ -323,6 +323,7 @@ const (
|
||||||
vmNativeFilterMatch = "vm-native-filter-match"
|
vmNativeFilterMatch = "vm-native-filter-match"
|
||||||
vmNativeFilterTimeStart = "vm-native-filter-time-start"
|
vmNativeFilterTimeStart = "vm-native-filter-time-start"
|
||||||
vmNativeFilterTimeEnd = "vm-native-filter-time-end"
|
vmNativeFilterTimeEnd = "vm-native-filter-time-end"
|
||||||
|
vmNativeFilterTimeReverse = "vm-native-filter-time-reverse"
|
||||||
vmNativeStepInterval = "vm-native-step-interval"
|
vmNativeStepInterval = "vm-native-step-interval"
|
||||||
|
|
||||||
vmNativeDisableBinaryProtocol = "vm-native-disable-binary-protocol"
|
vmNativeDisableBinaryProtocol = "vm-native-disable-binary-protocol"
|
||||||
|
@ -362,10 +363,15 @@ var (
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: vmNativeStepInterval,
|
Name: vmNativeStepInterval,
|
||||||
Usage: fmt.Sprintf("Split export data into chunks. Requires setting --%s. Valid values are '%s','%s','%s','%s','%s'.", vmNativeFilterTimeStart,
|
Usage: fmt.Sprintf("Split export data into chunks. By default, the migration will start from the oldest to the newest intervals. See also '--vm-native-filter-time-reverse'. Requires setting --%s. Valid values are '%s','%s','%s','%s','%s'.", vmNativeFilterTimeStart,
|
||||||
stepper.StepMonth, stepper.StepWeek, stepper.StepDay, stepper.StepHour, stepper.StepMinute),
|
stepper.StepMonth, stepper.StepWeek, stepper.StepDay, stepper.StepHour, stepper.StepMinute),
|
||||||
Value: stepper.StepMonth,
|
Value: stepper.StepMonth,
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: vmNativeFilterTimeReverse,
|
||||||
|
Usage: "Whether to reverse order of time intervals split by '--vm-native-step-interval' cmd-line flag. When set, the migration will start from the newest to the oldest intervals.",
|
||||||
|
Value: false,
|
||||||
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: vmNativeDisableHTTPKeepAlive,
|
Name: vmNativeDisableHTTPKeepAlive,
|
||||||
Usage: "Disable HTTP persistent connections for requests made to VictoriaMetrics components during export",
|
Usage: "Disable HTTP persistent connections for requests made to VictoriaMetrics components during export",
|
||||||
|
@ -469,6 +475,7 @@ const (
|
||||||
remoteReadConcurrency = "remote-read-concurrency"
|
remoteReadConcurrency = "remote-read-concurrency"
|
||||||
remoteReadFilterTimeStart = "remote-read-filter-time-start"
|
remoteReadFilterTimeStart = "remote-read-filter-time-start"
|
||||||
remoteReadFilterTimeEnd = "remote-read-filter-time-end"
|
remoteReadFilterTimeEnd = "remote-read-filter-time-end"
|
||||||
|
remoteReadFilterTimeReverse = "remote-read-filter-time-reverse"
|
||||||
remoteReadFilterLabel = "remote-read-filter-label"
|
remoteReadFilterLabel = "remote-read-filter-label"
|
||||||
remoteReadFilterLabelValue = "remote-read-filter-label-value"
|
remoteReadFilterLabelValue = "remote-read-filter-label-value"
|
||||||
remoteReadStepInterval = "remote-read-step-interval"
|
remoteReadStepInterval = "remote-read-step-interval"
|
||||||
|
@ -521,9 +528,14 @@ var (
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: remoteReadStepInterval,
|
Name: remoteReadStepInterval,
|
||||||
Usage: fmt.Sprintf("Split export data into chunks. Requires setting --%s. Valid values are %q,%q,%q,%q.", remoteReadFilterTimeStart, stepper.StepMonth, stepper.StepDay, stepper.StepHour, stepper.StepMinute),
|
Usage: fmt.Sprintf("Split export data into chunks. By default, the migration will start from the oldest to the newest intervals. See also '--remote-read-filter-time-reverse'. Requires setting --%s. Valid values are %q,%q,%q,%q.", remoteReadFilterTimeStart, stepper.StepMonth, stepper.StepDay, stepper.StepHour, stepper.StepMinute),
|
||||||
Required: true,
|
Required: true,
|
||||||
},
|
},
|
||||||
|
&cli.BoolFlag{
|
||||||
|
Name: remoteReadFilterTimeReverse,
|
||||||
|
Usage: "Whether to reverse order of time intervals split by '--remote-read-step-interval' cmd-line flag. When set, the migration will start from the newest to the oldest intervals.",
|
||||||
|
Value: false,
|
||||||
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: remoteReadSrcAddr,
|
Name: remoteReadSrcAddr,
|
||||||
Usage: "Remote read address to perform read from.",
|
Usage: "Remote read address to perform read from.",
|
||||||
|
|
|
@ -12,11 +12,12 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/auth"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/auth"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/influx"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/opentsdb"
|
||||||
|
@ -153,6 +154,7 @@ func main() {
|
||||||
timeStart: c.Timestamp(remoteReadFilterTimeStart),
|
timeStart: c.Timestamp(remoteReadFilterTimeStart),
|
||||||
timeEnd: c.Timestamp(remoteReadFilterTimeEnd),
|
timeEnd: c.Timestamp(remoteReadFilterTimeEnd),
|
||||||
chunk: c.String(remoteReadStepInterval),
|
chunk: c.String(remoteReadStepInterval),
|
||||||
|
timeReverse: c.Bool(remoteReadFilterTimeReverse),
|
||||||
},
|
},
|
||||||
cc: c.Int(remoteReadConcurrency),
|
cc: c.Int(remoteReadConcurrency),
|
||||||
isSilent: c.Bool(globalSilent),
|
isSilent: c.Bool(globalSilent),
|
||||||
|
@ -238,6 +240,7 @@ func main() {
|
||||||
TimeStart: c.String(vmNativeFilterTimeStart),
|
TimeStart: c.String(vmNativeFilterTimeStart),
|
||||||
TimeEnd: c.String(vmNativeFilterTimeEnd),
|
TimeEnd: c.String(vmNativeFilterTimeEnd),
|
||||||
Chunk: c.String(vmNativeStepInterval),
|
Chunk: c.String(vmNativeStepInterval),
|
||||||
|
TimeReverse: c.Bool(vmNativeFilterTimeReverse),
|
||||||
},
|
},
|
||||||
src: &native.Client{
|
src: &native.Client{
|
||||||
AuthCfg: srcAuthConfig,
|
AuthCfg: srcAuthConfig,
|
||||||
|
|
|
@ -8,6 +8,7 @@ type Filter struct {
|
||||||
TimeStart string
|
TimeStart string
|
||||||
TimeEnd string
|
TimeEnd string
|
||||||
Chunk string
|
Chunk string
|
||||||
|
TimeReverse bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f Filter) String() string {
|
func (f Filter) String() string {
|
||||||
|
|
|
@ -7,11 +7,12 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cheggaaa/pb/v3"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||||
"github.com/cheggaaa/pb/v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type remoteReadProcessor struct {
|
type remoteReadProcessor struct {
|
||||||
|
@ -29,6 +30,7 @@ type remoteReadFilter struct {
|
||||||
timeStart *time.Time
|
timeStart *time.Time
|
||||||
timeEnd *time.Time
|
timeEnd *time.Time
|
||||||
chunk string
|
chunk string
|
||||||
|
timeReverse bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rrp *remoteReadProcessor) run(ctx context.Context) error {
|
func (rrp *remoteReadProcessor) run(ctx context.Context) error {
|
||||||
|
@ -41,7 +43,7 @@ func (rrp *remoteReadProcessor) run(ctx context.Context) error {
|
||||||
rrp.cc = 1
|
rrp.cc = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
ranges, err := stepper.SplitDateRange(*rrp.filter.timeStart, *rrp.filter.timeEnd, rrp.filter.chunk)
|
ranges, err := stepper.SplitDateRange(*rrp.filter.timeStart, *rrp.filter.timeEnd, rrp.filter.chunk, rrp.filter.timeReverse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create date ranges for the given time filters: %v", err)
|
return fmt.Errorf("failed to create date ranges for the given time filters: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package stepper
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -20,7 +21,7 @@ const (
|
||||||
|
|
||||||
// SplitDateRange splits start-end range in a subset of ranges respecting the given step
|
// SplitDateRange splits start-end range in a subset of ranges respecting the given step
|
||||||
// Ranges with granularity of StepMonth are aligned to 1st of each month in order to improve export efficiency at block transfer level
|
// Ranges with granularity of StepMonth are aligned to 1st of each month in order to improve export efficiency at block transfer level
|
||||||
func SplitDateRange(start, end time.Time, step string) ([][]time.Time, error) {
|
func SplitDateRange(start, end time.Time, step string, timeReverse bool) ([][]time.Time, error) {
|
||||||
|
|
||||||
if start.After(end) {
|
if start.After(end) {
|
||||||
return nil, fmt.Errorf("start time %q should come before end time %q", start.Format(time.RFC3339), end.Format(time.RFC3339))
|
return nil, fmt.Errorf("start time %q should come before end time %q", start.Format(time.RFC3339), end.Format(time.RFC3339))
|
||||||
|
@ -71,6 +72,11 @@ func SplitDateRange(start, end time.Time, step string) ([][]time.Time, error) {
|
||||||
ranges = append(ranges, []time.Time{s, e})
|
ranges = append(ranges, []time.Time{s, e})
|
||||||
currentStep = e
|
currentStep = e
|
||||||
}
|
}
|
||||||
|
if timeReverse {
|
||||||
|
sort.SliceStable(ranges, func(i, j int) bool {
|
||||||
|
return ranges[i][0].After(ranges[j][0])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
return ranges, nil
|
return ranges, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -252,7 +252,280 @@ func Test_splitDateRange(t *testing.T) {
|
||||||
start := mustParseDatetime(tt.args.start)
|
start := mustParseDatetime(tt.args.start)
|
||||||
end := mustParseDatetime(tt.args.end)
|
end := mustParseDatetime(tt.args.end)
|
||||||
|
|
||||||
got, err := SplitDateRange(start, end, tt.args.granularity)
|
got, err := SplitDateRange(start, end, tt.args.granularity, false)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("splitDateRange() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var testExpectedResults [][]time.Time
|
||||||
|
if tt.want != nil {
|
||||||
|
testExpectedResults = make([][]time.Time, 0)
|
||||||
|
for _, dr := range tt.want {
|
||||||
|
testExpectedResults = append(testExpectedResults, []time.Time{
|
||||||
|
mustParseDatetime(dr[0]),
|
||||||
|
mustParseDatetime(dr[1]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(got, testExpectedResults) {
|
||||||
|
t.Errorf("splitDateRange() got = %v, want %v", got, testExpectedResults)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_splitDateRange_reverse(t *testing.T) {
|
||||||
|
type args struct {
|
||||||
|
start string
|
||||||
|
end string
|
||||||
|
granularity string
|
||||||
|
timeReverse bool
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
args args
|
||||||
|
want []testTimeRange
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "validates start is before end",
|
||||||
|
args: args{
|
||||||
|
start: "2022-02-01T00:00:00Z",
|
||||||
|
end: "2022-01-01T00:00:00Z",
|
||||||
|
granularity: StepMonth,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: nil,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "validates granularity value",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-01T00:00:00Z",
|
||||||
|
end: "2022-02-01T00:00:00Z",
|
||||||
|
granularity: "non-existent-format",
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: nil,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "month chunking",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-03T11:11:11Z",
|
||||||
|
end: "2022-03-03T12:12:12Z",
|
||||||
|
granularity: StepMonth,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2022-03-01T00:00:00Z",
|
||||||
|
"2022-03-03T12:12:12Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-02-01T00:00:00Z",
|
||||||
|
"2022-02-28T23:59:59.999999999Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-03T11:11:11Z",
|
||||||
|
"2022-01-31T23:59:59.999999999Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "daily chunking",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-03T11:11:11Z",
|
||||||
|
end: "2022-01-05T12:12:12Z",
|
||||||
|
granularity: StepDay,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2022-01-05T11:11:11Z",
|
||||||
|
"2022-01-05T12:12:12Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-04T11:11:11Z",
|
||||||
|
"2022-01-05T11:11:11Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-03T11:11:11Z",
|
||||||
|
"2022-01-04T11:11:11Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "hourly chunking",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-03T11:11:11Z",
|
||||||
|
end: "2022-01-03T14:14:14Z",
|
||||||
|
granularity: StepHour,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2022-01-03T14:11:11Z",
|
||||||
|
"2022-01-03T14:14:14Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-03T13:11:11Z",
|
||||||
|
"2022-01-03T14:11:11Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-03T12:11:11Z",
|
||||||
|
"2022-01-03T13:11:11Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-03T11:11:11Z",
|
||||||
|
"2022-01-03T12:11:11Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "month chunking with one day time range",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-03T11:11:11Z",
|
||||||
|
end: "2022-01-04T12:12:12Z",
|
||||||
|
granularity: StepMonth,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2022-01-03T11:11:11Z",
|
||||||
|
"2022-01-04T12:12:12Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "month chunking with same day time range",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-03T11:11:11Z",
|
||||||
|
end: "2022-01-03T12:12:12Z",
|
||||||
|
granularity: StepMonth,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2022-01-03T11:11:11Z",
|
||||||
|
"2022-01-03T12:12:12Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "month chunking with one month and two days range",
|
||||||
|
args: args{
|
||||||
|
start: "2022-01-03T11:11:11Z",
|
||||||
|
end: "2022-02-03T00:00:00Z",
|
||||||
|
granularity: StepMonth,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2022-02-01T00:00:00Z",
|
||||||
|
"2022-02-03T00:00:00Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2022-01-03T11:11:11Z",
|
||||||
|
"2022-01-31T23:59:59.999999999Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "week chunking with not full week",
|
||||||
|
args: args{
|
||||||
|
start: "2023-07-30T00:00:00Z",
|
||||||
|
end: "2023-08-05T23:59:59.999999999Z",
|
||||||
|
granularity: StepWeek,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2023-07-30T00:00:00Z",
|
||||||
|
"2023-08-05T23:59:59.999999999Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "week chunking with start of the week and end of the week",
|
||||||
|
args: args{
|
||||||
|
start: "2023-07-30T00:00:00Z",
|
||||||
|
end: "2023-08-06T00:00:00Z",
|
||||||
|
granularity: StepWeek,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2023-07-30T00:00:00Z",
|
||||||
|
"2023-08-06T00:00:00Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "week chunking with next one day week",
|
||||||
|
args: args{
|
||||||
|
start: "2023-07-30T00:00:00Z",
|
||||||
|
end: "2023-08-07T01:12:00Z",
|
||||||
|
granularity: StepWeek,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2023-08-06T00:00:00Z",
|
||||||
|
"2023-08-07T01:12:00Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2023-07-30T00:00:00Z",
|
||||||
|
"2023-08-06T00:00:00Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "week chunking with month and not full week representation",
|
||||||
|
args: args{
|
||||||
|
start: "2023-07-30T00:00:00Z",
|
||||||
|
end: "2023-09-01T01:12:00Z",
|
||||||
|
granularity: StepWeek,
|
||||||
|
timeReverse: true,
|
||||||
|
},
|
||||||
|
want: []testTimeRange{
|
||||||
|
{
|
||||||
|
"2023-08-27T00:00:00Z",
|
||||||
|
"2023-09-01T01:12:00Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2023-08-20T00:00:00Z",
|
||||||
|
"2023-08-27T00:00:00Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2023-08-13T00:00:00Z",
|
||||||
|
"2023-08-20T00:00:00Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2023-08-06T00:00:00Z",
|
||||||
|
"2023-08-13T00:00:00Z",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"2023-07-30T00:00:00Z",
|
||||||
|
"2023-08-06T00:00:00Z",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
start := mustParseDatetime(tt.args.start)
|
||||||
|
end := mustParseDatetime(tt.args.end)
|
||||||
|
|
||||||
|
got, err := SplitDateRange(start, end, tt.args.granularity, tt.args.timeReverse)
|
||||||
if (err != nil) != tt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("splitDateRange() error = %v, wantErr %v", err, tt.wantErr)
|
t.Errorf("splitDateRange() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
return
|
return
|
||||||
|
|
|
@ -9,6 +9,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/cheggaaa/pb/v3"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
||||||
|
@ -18,7 +20,6 @@ import (
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||||
"github.com/cheggaaa/pb/v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type vmNativeProcessor struct {
|
type vmNativeProcessor struct {
|
||||||
|
@ -67,12 +68,11 @@ func (p *vmNativeProcessor) run(ctx context.Context) error {
|
||||||
|
|
||||||
ranges := [][]time.Time{{start, end}}
|
ranges := [][]time.Time{{start, end}}
|
||||||
if p.filter.Chunk != "" {
|
if p.filter.Chunk != "" {
|
||||||
ranges, err = stepper.SplitDateRange(start, end, p.filter.Chunk)
|
ranges, err = stepper.SplitDateRange(start, end, p.filter.Chunk, p.filter.TimeReverse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create date ranges for the given time filters: %w", err)
|
return fmt.Errorf("failed to create date ranges for the given time filters: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tenants := []string{""}
|
tenants := []string{""}
|
||||||
if p.interCluster {
|
if p.interCluster {
|
||||||
log.Printf("Discovering tenants...")
|
log.Printf("Discovering tenants...")
|
||||||
|
|
|
@ -56,6 +56,7 @@ The sandbox cluster installation is running under the constant load generated by
|
||||||
* FEATURE: add field `version` to the response for `/api/v1/status/buildinfo` API for using more efficient API in Grafana for receiving label values. Add additional info about setup Grafana datasource. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5370) and [these docs](https://docs.victoriametrics.com/#grafana-setup) for details.
|
* FEATURE: add field `version` to the response for `/api/v1/status/buildinfo` API for using more efficient API in Grafana for receiving label values. Add additional info about setup Grafana datasource. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5370) and [these docs](https://docs.victoriametrics.com/#grafana-setup) for details.
|
||||||
* FEATURE: add `-search.maxResponseSeries` command-line flag for limiting the number of time series a single query to [`/api/v1/query`](https://docs.victoriametrics.com/keyConcepts.html#instant-query) or [`/api/v1/query_range`](https://docs.victoriametrics.com/keyConcepts.html#range-query) can return. This limit can protect Grafana from high memory usage when the query returns too many series. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5372).
|
* FEATURE: add `-search.maxResponseSeries` command-line flag for limiting the number of time series a single query to [`/api/v1/query`](https://docs.victoriametrics.com/keyConcepts.html#instant-query) or [`/api/v1/query_range`](https://docs.victoriametrics.com/keyConcepts.html#range-query) can return. This limit can protect Grafana from high memory usage when the query returns too many series. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5372).
|
||||||
* FEATURE: [Alerting rules for VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker#alerts): ease aggregation for certain alerting rules to keep more useful labels for the context. Before, all extra labels except `job` and `instance` were ignored. See this [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5429) and this [follow-up commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/8fb68152e67712ed2c16dcfccf7cf4d0af140835). Thanks to @7840vz.
|
* FEATURE: [Alerting rules for VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics/tree/master/deployment/docker#alerts): ease aggregation for certain alerting rules to keep more useful labels for the context. Before, all extra labels except `job` and `instance` were ignored. See this [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5429) and this [follow-up commit](https://github.com/VictoriaMetrics/VictoriaMetrics/commit/8fb68152e67712ed2c16dcfccf7cf4d0af140835). Thanks to @7840vz.
|
||||||
|
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html) add `vm-native-filter-time-reverse` command-line flag for `native` mode and `remote-read-filter-time-reverse` command-line flag for `remote-read` mode which allows reversing order of migrated data chunks. See: https://docs.victoriametrics.com/vmctl.html#using-time-based-chunking-of-migration and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5376).
|
||||||
|
|
||||||
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate values for the first point on the graph for queries, which do not use [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). For example, previously `count(up)` could return lower than expected values for the first point on the graph. This also could result in lower than expected values in the middle of the graph like in [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5388) when the response caching isn't disabled. The issue has been introduced in [v1.95.0](https://docs.victoriametrics.com/CHANGELOG.html#v1950).
|
* BUGFIX: [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html): properly calculate values for the first point on the graph for queries, which do not use [rollup functions](https://docs.victoriametrics.com/MetricsQL.html#rollup-functions). For example, previously `count(up)` could return lower than expected values for the first point on the graph. This also could result in lower than expected values in the middle of the graph like in [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5388) when the response caching isn't disabled. The issue has been introduced in [v1.95.0](https://docs.victoriametrics.com/CHANGELOG.html#v1950).
|
||||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): prevent from `FATAL: cannot flush metainfo` panic when [`-remoteWrite.multitenantURL`](https://docs.victoriametrics.com/vmagent.html#multitenancy) command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5357).
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): prevent from `FATAL: cannot flush metainfo` panic when [`-remoteWrite.multitenantURL`](https://docs.victoriametrics.com/vmagent.html#multitenancy) command-line flag is set. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5357).
|
||||||
|
|
Loading…
Reference in a new issue