app/vmctl/testdata: fix tests broken after updating Prometheus dependencies in the commit 7c40b95224

This is a follow-up for 765ce1b181

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7700
This commit is contained in:
Aliaksandr Valialkin 2024-11-29 22:05:43 +01:00
parent 6d0420b454
commit f8cb2cf1a0
No known key found for this signature in database
GPG key ID: 52C003EE2BCDB9EB
3 changed files with 813 additions and 713 deletions

View file

@ -1,348 +1,348 @@
package main package main
// import ( import (
// "context" "context"
// "net/http" "net/http"
// "testing" "testing"
// "time" "time"
//
// "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
//
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/remoteread"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/testdata/servers_integration_test"
// "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
// ) )
//
// func TestRemoteRead(t *testing.T) { func TestRemoteRead(t *testing.T) {
// barpool.Disable(true) barpool.Disable(true)
// defer func() { defer func() {
// barpool.Disable(false) barpool.Disable(false)
// }() }()
// defer func() { isSilent = false }() defer func() { isSilent = false }()
//
// var testCases = []struct { var testCases = []struct {
// name string name string
// remoteReadConfig remoteread.Config remoteReadConfig remoteread.Config
// vmCfg vm.Config vmCfg vm.Config
// start string start string
// end string end string
// numOfSamples int64 numOfSamples int64
// numOfSeries int64 numOfSeries int64
// rrp remoteReadProcessor rrp remoteReadProcessor
// chunk string chunk string
// remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries
// expectedSeries []vm.TimeSeries expectedSeries []vm.TimeSeries
// }{ }{
// { {
// name: "step minute on minute time range", name: "step minute on minute time range",
// remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"}, remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
// vmCfg: vm.Config{Addr: "", Concurrency: 1}, vmCfg: vm.Config{Addr: "", Concurrency: 1},
// start: "2022-11-26T11:23:05+02:00", start: "2022-11-26T11:23:05+02:00",
// end: "2022-11-26T11:24:05+02:00", end: "2022-11-26T11:24:05+02:00",
// numOfSamples: 2, numOfSamples: 2,
// numOfSeries: 3, numOfSeries: 3,
// chunk: stepper.StepMinute, chunk: stepper.StepMinute,
// remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries, remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
// expectedSeries: []vm.TimeSeries{ expectedSeries: []vm.TimeSeries{
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1669454585000, 1669454615000}, Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{0, 0}, Values: []float64{0, 0},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1669454585000, 1669454615000}, Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{100, 100}, Values: []float64{100, 100},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1669454585000, 1669454615000}, Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{200, 200}, Values: []float64{200, 200},
// }, },
// }, },
// }, },
// { {
// name: "step month on month time range", name: "step month on month time range",
// remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"}, remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*"},
// vmCfg: vm.Config{Addr: "", Concurrency: 1, vmCfg: vm.Config{Addr: "", Concurrency: 1,
// Transport: http.DefaultTransport.(*http.Transport)}, Transport: http.DefaultTransport.(*http.Transport)},
// start: "2022-09-26T11:23:05+02:00", start: "2022-09-26T11:23:05+02:00",
// end: "2022-11-26T11:24:05+02:00", end: "2022-11-26T11:24:05+02:00",
// numOfSamples: 2, numOfSamples: 2,
// numOfSeries: 3, numOfSeries: 3,
// chunk: stepper.StepMonth, chunk: stepper.StepMonth,
// remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries, remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
// expectedSeries: []vm.TimeSeries{ expectedSeries: []vm.TimeSeries{
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1664184185000}, Timestamps: []int64{1664184185000},
// Values: []float64{0}, Values: []float64{0},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1664184185000}, Timestamps: []int64{1664184185000},
// Values: []float64{100}, Values: []float64{100},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1664184185000}, Timestamps: []int64{1664184185000},
// Values: []float64{200}, Values: []float64{200},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1666819415000}, Timestamps: []int64{1666819415000},
// Values: []float64{0}, Values: []float64{0},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1666819415000}, Timestamps: []int64{1666819415000},
// Values: []float64{100}, Values: []float64{100},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1666819415000}, Timestamps: []int64{1666819415000},
// Values: []float64{200}}, Values: []float64{200}},
// }, },
// }, },
// } }
//
// for _, tt := range testCases { for _, tt := range testCases {
// t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
// ctx := context.Background() ctx := context.Background()
// remoteReadServer := remote_read_integration.NewRemoteReadServer(t) remoteReadServer := remote_read_integration.NewRemoteReadServer(t)
// defer remoteReadServer.Close() defer remoteReadServer.Close()
// remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t) remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t)
// defer remoteWriteServer.Close() defer remoteWriteServer.Close()
//
// tt.remoteReadConfig.Addr = remoteReadServer.URL() tt.remoteReadConfig.Addr = remoteReadServer.URL()
//
// rr, err := remoteread.NewClient(tt.remoteReadConfig) rr, err := remoteread.NewClient(tt.remoteReadConfig)
// if err != nil { if err != nil {
// t.Fatalf("error create remote read client: %s", err) t.Fatalf("error create remote read client: %s", err)
// } }
//
// start, err := time.Parse(time.RFC3339, tt.start) start, err := time.Parse(time.RFC3339, tt.start)
// if err != nil { if err != nil {
// t.Fatalf("Error parse start time: %s", err) t.Fatalf("Error parse start time: %s", err)
// } }
//
// end, err := time.Parse(time.RFC3339, tt.end) end, err := time.Parse(time.RFC3339, tt.end)
// if err != nil { if err != nil {
// t.Fatalf("Error parse end time: %s", err) t.Fatalf("Error parse end time: %s", err)
// } }
//
// rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples) rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples)
//
// remoteReadServer.SetRemoteReadSeries(rrs) remoteReadServer.SetRemoteReadSeries(rrs)
// remoteWriteServer.ExpectedSeries(tt.expectedSeries) remoteWriteServer.ExpectedSeries(tt.expectedSeries)
//
// tt.vmCfg.Addr = remoteWriteServer.URL() tt.vmCfg.Addr = remoteWriteServer.URL()
//
// b, err := backoff.New(10, 1.8, time.Second*2) b, err := backoff.New(10, 1.8, time.Second*2)
// if err != nil { if err != nil {
// t.Fatalf("failed to create backoff: %s", err) t.Fatalf("failed to create backoff: %s", err)
// } }
// tt.vmCfg.Backoff = b tt.vmCfg.Backoff = b
//
// importer, err := vm.NewImporter(ctx, tt.vmCfg) importer, err := vm.NewImporter(ctx, tt.vmCfg)
// if err != nil { if err != nil {
// t.Fatalf("failed to create VM importer: %s", err) t.Fatalf("failed to create VM importer: %s", err)
// } }
// defer importer.Close() defer importer.Close()
//
// rmp := remoteReadProcessor{ rmp := remoteReadProcessor{
// src: rr, src: rr,
// dst: importer, dst: importer,
// filter: remoteReadFilter{ filter: remoteReadFilter{
// timeStart: &start, timeStart: &start,
// timeEnd: &end, timeEnd: &end,
// chunk: tt.chunk, chunk: tt.chunk,
// }, },
// cc: 1, cc: 1,
// isVerbose: false, isVerbose: false,
// } }
//
// err = rmp.run(ctx) err = rmp.run(ctx)
// if err != nil { if err != nil {
// t.Fatalf("failed to run remote read processor: %s", err) t.Fatalf("failed to run remote read processor: %s", err)
// } }
// }) })
// } }
// } }
//
// func TestSteamRemoteRead(t *testing.T) { func TestSteamRemoteRead(t *testing.T) {
// barpool.Disable(true) barpool.Disable(true)
// defer func() { defer func() {
// barpool.Disable(false) barpool.Disable(false)
// }() }()
// defer func() { isSilent = false }() defer func() { isSilent = false }()
//
// var testCases = []struct { var testCases = []struct {
// name string name string
// remoteReadConfig remoteread.Config remoteReadConfig remoteread.Config
// vmCfg vm.Config vmCfg vm.Config
// start string start string
// end string end string
// numOfSamples int64 numOfSamples int64
// numOfSeries int64 numOfSeries int64
// rrp remoteReadProcessor rrp remoteReadProcessor
// chunk string chunk string
// remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries remoteReadSeries func(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries
// expectedSeries []vm.TimeSeries expectedSeries []vm.TimeSeries
// }{ }{
// { {
// name: "step minute on minute time range", name: "step minute on minute time range",
// remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true}, remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true},
// vmCfg: vm.Config{Addr: "", Concurrency: 1}, vmCfg: vm.Config{Addr: "", Concurrency: 1},
// start: "2022-11-26T11:23:05+02:00", start: "2022-11-26T11:23:05+02:00",
// end: "2022-11-26T11:24:05+02:00", end: "2022-11-26T11:24:05+02:00",
// numOfSamples: 2, numOfSamples: 2,
// numOfSeries: 3, numOfSeries: 3,
// chunk: stepper.StepMinute, chunk: stepper.StepMinute,
// remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries, remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
// expectedSeries: []vm.TimeSeries{ expectedSeries: []vm.TimeSeries{
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1669454585000, 1669454615000}, Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{0, 0}, Values: []float64{0, 0},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1669454585000, 1669454615000}, Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{100, 100}, Values: []float64{100, 100},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1669454585000, 1669454615000}, Timestamps: []int64{1669454585000, 1669454615000},
// Values: []float64{200, 200}, Values: []float64{200, 200},
// }, },
// }, },
// }, },
// { {
// name: "step month on month time range", name: "step month on month time range",
// remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true}, remoteReadConfig: remoteread.Config{Addr: "", LabelName: "__name__", LabelValue: ".*", UseStream: true},
// vmCfg: vm.Config{Addr: "", Concurrency: 1}, vmCfg: vm.Config{Addr: "", Concurrency: 1},
// start: "2022-09-26T11:23:05+02:00", start: "2022-09-26T11:23:05+02:00",
// end: "2022-11-26T11:24:05+02:00", end: "2022-11-26T11:24:05+02:00",
// numOfSamples: 2, numOfSamples: 2,
// numOfSeries: 3, numOfSeries: 3,
// chunk: stepper.StepMonth, chunk: stepper.StepMonth,
// remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries, remoteReadSeries: remote_read_integration.GenerateRemoteReadSeries,
// expectedSeries: []vm.TimeSeries{ expectedSeries: []vm.TimeSeries{
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1664184185000}, Timestamps: []int64{1664184185000},
// Values: []float64{0}, Values: []float64{0},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1664184185000}, Timestamps: []int64{1664184185000},
// Values: []float64{100}, Values: []float64{100},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1664184185000}, Timestamps: []int64{1664184185000},
// Values: []float64{200}, Values: []float64{200},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "0"}},
// Timestamps: []int64{1666819415000}, Timestamps: []int64{1666819415000},
// Values: []float64{0}, Values: []float64{0},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "1"}},
// Timestamps: []int64{1666819415000}, Timestamps: []int64{1666819415000},
// Values: []float64{100}, Values: []float64{100},
// }, },
// { {
// Name: "vm_metric_1", Name: "vm_metric_1",
// LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}}, LabelPairs: []vm.LabelPair{{Name: "job", Value: "2"}},
// Timestamps: []int64{1666819415000}, Timestamps: []int64{1666819415000},
// Values: []float64{200}}, Values: []float64{200}},
// }, },
// }, },
// } }
//
// for _, tt := range testCases { for _, tt := range testCases {
// t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
// ctx := context.Background() ctx := context.Background()
// remoteReadServer := remote_read_integration.NewRemoteReadStreamServer(t) remoteReadServer := remote_read_integration.NewRemoteReadStreamServer(t)
// defer remoteReadServer.Close() defer remoteReadServer.Close()
// remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t) remoteWriteServer := remote_read_integration.NewRemoteWriteServer(t)
// defer remoteWriteServer.Close() defer remoteWriteServer.Close()
//
// tt.remoteReadConfig.Addr = remoteReadServer.URL() tt.remoteReadConfig.Addr = remoteReadServer.URL()
//
// rr, err := remoteread.NewClient(tt.remoteReadConfig) rr, err := remoteread.NewClient(tt.remoteReadConfig)
// if err != nil { if err != nil {
// t.Fatalf("error create remote read client: %s", err) t.Fatalf("error create remote read client: %s", err)
// } }
//
// start, err := time.Parse(time.RFC3339, tt.start) start, err := time.Parse(time.RFC3339, tt.start)
// if err != nil { if err != nil {
// t.Fatalf("Error parse start time: %s", err) t.Fatalf("Error parse start time: %s", err)
// } }
//
// end, err := time.Parse(time.RFC3339, tt.end) end, err := time.Parse(time.RFC3339, tt.end)
// if err != nil { if err != nil {
// t.Fatalf("Error parse end time: %s", err) t.Fatalf("Error parse end time: %s", err)
// } }
//
// rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples) rrs := tt.remoteReadSeries(start.Unix(), end.Unix(), tt.numOfSeries, tt.numOfSamples)
//
// remoteReadServer.InitMockStorage(rrs) remoteReadServer.InitMockStorage(rrs)
// remoteWriteServer.ExpectedSeries(tt.expectedSeries) remoteWriteServer.ExpectedSeries(tt.expectedSeries)
//
// tt.vmCfg.Addr = remoteWriteServer.URL() tt.vmCfg.Addr = remoteWriteServer.URL()
//
// b, err := backoff.New(10, 1.8, time.Second*2) b, err := backoff.New(10, 1.8, time.Second*2)
// if err != nil { if err != nil {
// t.Fatalf("failed to create backoff: %s", err) t.Fatalf("failed to create backoff: %s", err)
// } }
//
// tt.vmCfg.Backoff = b tt.vmCfg.Backoff = b
// importer, err := vm.NewImporter(ctx, tt.vmCfg) importer, err := vm.NewImporter(ctx, tt.vmCfg)
// if err != nil { if err != nil {
// t.Fatalf("failed to create VM importer: %s", err) t.Fatalf("failed to create VM importer: %s", err)
// } }
// defer importer.Close() defer importer.Close()
//
// rmp := remoteReadProcessor{ rmp := remoteReadProcessor{
// src: rr, src: rr,
// dst: importer, dst: importer,
// filter: remoteReadFilter{ filter: remoteReadFilter{
// timeStart: &start, timeStart: &start,
// timeEnd: &end, timeEnd: &end,
// chunk: tt.chunk, chunk: tt.chunk,
// }, },
// cc: 1, cc: 1,
// isVerbose: false, isVerbose: false,
// } }
//
// err = rmp.run(ctx) err = rmp.run(ctx)
// if err != nil { if err != nil {
// t.Fatalf("failed to run remote read processor: %s", err) t.Fatalf("failed to run remote read processor: %s", err)
// } }
// }) })
// } }
// } }

View file

@ -18,7 +18,6 @@ import (
"github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/storage/remote"
"github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/prometheus/prometheus/tsdb/chunkenc"
) )

View file

@ -1,368 +1,469 @@
package remote_read_integration package remote_read_integration
// import ( import (
// "context" "context"
// "fmt" "fmt"
// "io" "io"
// "net/http" "net/http"
// "net/http/httptest" "net/http/httptest"
// "strconv" "strconv"
// "strings" "strings"
// "testing" "testing"
//
// "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
// "github.com/golang/snappy" "github.com/golang/snappy"
// "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/histogram"
// "github.com/prometheus/prometheus/prompb" "github.com/prometheus/prometheus/model/labels"
// "github.com/prometheus/prometheus/storage/remote" "github.com/prometheus/prometheus/prompb"
// "github.com/prometheus/prometheus/tsdb/chunks" "github.com/prometheus/prometheus/storage"
// ) "github.com/prometheus/prometheus/storage/remote"
// "github.com/prometheus/prometheus/tsdb/chunkenc"
// const ( "github.com/prometheus/prometheus/tsdb/chunks"
// maxBytesInFrame = 1024 * 1024 "github.com/prometheus/prometheus/util/annotations"
// ) )
//
// type RemoteReadServer struct { const (
// server *httptest.Server maxBytesInFrame = 1024 * 1024
// series []*prompb.TimeSeries )
// storage *MockStorage
// } type RemoteReadServer struct {
// server *httptest.Server
// // NewRemoteReadServer creates a remote read server. It exposes a single endpoint and responds with the series []*prompb.TimeSeries
// // passed series based on the request to the read endpoint. It returns a server which should be closed after storage *MockStorage
// // being used. }
// func NewRemoteReadServer(t *testing.T) *RemoteReadServer {
// rrs := &RemoteReadServer{ // NewRemoteReadServer creates a remote read server. It exposes a single endpoint and responds with the
// series: make([]*prompb.TimeSeries, 0), // passed series based on the request to the read endpoint. It returns a server which should be closed after
// } // being used.
// rrs.server = httptest.NewServer(rrs.getReadHandler(t)) func NewRemoteReadServer(t *testing.T) *RemoteReadServer {
// return rrs rrs := &RemoteReadServer{
// } series: make([]*prompb.TimeSeries, 0),
// }
// // Close closes the server. rrs.server = httptest.NewServer(rrs.getReadHandler(t))
// func (rrs *RemoteReadServer) Close() { return rrs
// rrs.server.Close() }
// }
// // Close closes the server.
// func (rrs *RemoteReadServer) URL() string { func (rrs *RemoteReadServer) Close() {
// return rrs.server.URL rrs.server.Close()
// } }
//
// func (rrs *RemoteReadServer) SetRemoteReadSeries(series []*prompb.TimeSeries) { func (rrs *RemoteReadServer) URL() string {
// rrs.series = append(rrs.series, series...) return rrs.server.URL
// } }
//
// func (rrs *RemoteReadServer) getReadHandler(t *testing.T) http.Handler { func (rrs *RemoteReadServer) SetRemoteReadSeries(series []*prompb.TimeSeries) {
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rrs.series = append(rrs.series, series...)
// if !validateReadHeaders(t, r) { }
// t.Fatalf("invalid read headers")
// } func (rrs *RemoteReadServer) getReadHandler(t *testing.T) http.Handler {
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// compressed, err := io.ReadAll(r.Body) if !validateReadHeaders(t, r) {
// if err != nil { t.Fatalf("invalid read headers")
// t.Fatalf("error read body: %s", err) }
// }
// compressed, err := io.ReadAll(r.Body)
// reqBuf, err := snappy.Decode(nil, compressed) if err != nil {
// if err != nil { t.Fatalf("error read body: %s", err)
// t.Fatalf("error decode compressed data:%s", err) }
// }
// reqBuf, err := snappy.Decode(nil, compressed)
// var req prompb.ReadRequest if err != nil {
// if err := proto.Unmarshal(reqBuf, &req); err != nil { t.Fatalf("error decode compressed data:%s", err)
// t.Fatalf("error unmarshal read request: %s", err) }
// }
// var req prompb.ReadRequest
// resp := &prompb.ReadResponse{ if err := proto.Unmarshal(reqBuf, &req); err != nil {
// Results: make([]*prompb.QueryResult, len(req.Queries)), t.Fatalf("error unmarshal read request: %s", err)
// } }
//
// for i, r := range req.Queries { resp := &prompb.ReadResponse{
// startTs := r.StartTimestampMs Results: make([]*prompb.QueryResult, len(req.Queries)),
// endTs := r.EndTimestampMs }
// ts := make([]*prompb.TimeSeries, len(rrs.series))
// for i, s := range rrs.series { for i, r := range req.Queries {
// var samples []prompb.Sample startTs := r.StartTimestampMs
// for _, sample := range s.Samples { endTs := r.EndTimestampMs
// if sample.Timestamp >= startTs && sample.Timestamp < endTs { ts := make([]*prompb.TimeSeries, len(rrs.series))
// samples = append(samples, sample) for i, s := range rrs.series {
// } var samples []prompb.Sample
// } for _, sample := range s.Samples {
// var series prompb.TimeSeries if sample.Timestamp >= startTs && sample.Timestamp < endTs {
// if len(samples) > 0 { samples = append(samples, sample)
// series.Labels = s.Labels }
// series.Samples = samples }
// } var series prompb.TimeSeries
// ts[i] = &series if len(samples) > 0 {
// } series.Labels = s.Labels
// series.Samples = samples
// resp.Results[i] = &prompb.QueryResult{Timeseries: ts} }
// data, err := proto.Marshal(resp) ts[i] = &series
// if err != nil { }
// t.Fatalf("error marshal response: %s", err)
// } resp.Results[i] = &prompb.QueryResult{Timeseries: ts}
// data, err := proto.Marshal(resp)
// compressed = snappy.Encode(nil, data) if err != nil {
// t.Fatalf("error marshal response: %s", err)
// w.Header().Set("Content-Type", "application/x-protobuf") }
// w.Header().Set("Content-Encoding", "snappy")
// w.WriteHeader(http.StatusOK) compressed = snappy.Encode(nil, data)
//
// if _, err := w.Write(compressed); err != nil { w.Header().Set("Content-Type", "application/x-protobuf")
// t.Fatalf("snappy encode error: %s", err) w.Header().Set("Content-Encoding", "snappy")
// } w.WriteHeader(http.StatusOK)
// }
// }) if _, err := w.Write(compressed); err != nil {
// } t.Fatalf("snappy encode error: %s", err)
// }
// func NewRemoteReadStreamServer(t *testing.T) *RemoteReadServer { }
// rrs := &RemoteReadServer{ })
// series: make([]*prompb.TimeSeries, 0), }
// }
// rrs.server = httptest.NewServer(rrs.getStreamReadHandler(t)) func NewRemoteReadStreamServer(t *testing.T) *RemoteReadServer {
// return rrs rrs := &RemoteReadServer{
// } series: make([]*prompb.TimeSeries, 0),
// }
// func (rrs *RemoteReadServer) InitMockStorage(series []*prompb.TimeSeries) { rrs.server = httptest.NewServer(rrs.getStreamReadHandler(t))
// rrs.storage = NewMockStorage(series) return rrs
// } }
//
// func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler { func (rrs *RemoteReadServer) InitMockStorage(series []*prompb.TimeSeries) {
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { rrs.storage = NewMockStorage(series)
// if !validateStreamReadHeaders(t, r) { }
// t.Fatalf("invalid read headers")
// } func (rrs *RemoteReadServer) getStreamReadHandler(t *testing.T) http.Handler {
// return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// f, ok := w.(http.Flusher) if !validateStreamReadHeaders(t, r) {
// if !ok { t.Fatalf("invalid read headers")
// t.Fatalf("internal http.ResponseWriter does not implement http.Flusher interface") }
// }
// f, ok := w.(http.Flusher)
// stream := remote.NewChunkedWriter(w, f) if !ok {
// t.Fatalf("internal http.ResponseWriter does not implement http.Flusher interface")
// data, err := io.ReadAll(r.Body) }
// if err != nil {
// t.Fatalf("error read body: %s", err) stream := remote.NewChunkedWriter(w, f)
// }
// data, err := io.ReadAll(r.Body)
// decodedData, err := snappy.Decode(nil, data) if err != nil {
// if err != nil { t.Fatalf("error read body: %s", err)
// t.Fatalf("error decode compressed data:%s", err) }
// }
// decodedData, err := snappy.Decode(nil, data)
// var req prompb.ReadRequest if err != nil {
// if err := proto.Unmarshal(decodedData, &req); err != nil { t.Fatalf("error decode compressed data:%s", err)
// t.Fatalf("error unmarshal read request: %s", err) }
// }
// var req prompb.ReadRequest
// var chks []prompb.Chunk if err := proto.Unmarshal(decodedData, &req); err != nil {
// ctx := context.Background() t.Fatalf("error unmarshal read request: %s", err)
// for idx, r := range req.Queries { }
// startTs := r.StartTimestampMs
// endTs := r.EndTimestampMs var chks []prompb.Chunk
// ctx := context.Background()
// var matchers []*labels.Matcher for idx, r := range req.Queries {
// cb := func() (int64, error) { return 0, nil } startTs := r.StartTimestampMs
// endTs := r.EndTimestampMs
// c := remote.NewSampleAndChunkQueryableClient(rrs.storage, nil, matchers, true, cb)
// var matchers []*labels.Matcher
// q, err := c.ChunkQuerier(startTs, endTs) cb := func() (int64, error) { return 0, nil }
// if err != nil {
// t.Fatalf("error init chunk querier: %s", err) c := remote.NewSampleAndChunkQueryableClient(rrs.storage, nil, matchers, true, cb)
// }
// q, err := c.ChunkQuerier(startTs, endTs)
// ss := q.Select(ctx, false, nil, matchers...) if err != nil {
// var iter chunks.Iterator t.Fatalf("error init chunk querier: %s", err)
// for ss.Next() { }
// series := ss.At()
// iter = series.Iterator(iter) ss := q.Select(ctx, false, nil, matchers...)
// labels := remote.MergeLabels(labelsToLabelsProto(series.Labels()), nil) var iter chunks.Iterator
// for ss.Next() {
// frameBytesLeft := maxBytesInFrame series := ss.At()
// for _, lb := range labels { iter = series.Iterator(iter)
// frameBytesLeft -= lb.Size() labels := remote.MergeLabels(labelsToLabelsProto(series.Labels()), nil)
// }
// frameBytesLeft := maxBytesInFrame
// isNext := iter.Next() for _, lb := range labels {
// frameBytesLeft -= lb.Size()
// for isNext { }
// chunk := iter.At()
// isNext := iter.Next()
// if chunk.Chunk == nil {
// t.Fatalf("error found not populated chunk returned by SeriesSet at ref: %v", chunk.Ref) for isNext {
// } chunk := iter.At()
//
// chks = append(chks, prompb.Chunk{ if chunk.Chunk == nil {
// MinTimeMs: chunk.MinTime, t.Fatalf("error found not populated chunk returned by SeriesSet at ref: %v", chunk.Ref)
// MaxTimeMs: chunk.MaxTime, }
// Type: prompb.Chunk_Encoding(chunk.Chunk.Encoding()),
// Data: chunk.Chunk.Bytes(), chks = append(chks, prompb.Chunk{
// }) MinTimeMs: chunk.MinTime,
// MaxTimeMs: chunk.MaxTime,
// frameBytesLeft -= chks[len(chks)-1].Size() Type: prompb.Chunk_Encoding(chunk.Chunk.Encoding()),
// Data: chunk.Chunk.Bytes(),
// // We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size. })
// isNext = iter.Next()
// if frameBytesLeft > 0 && isNext { frameBytesLeft -= chks[len(chks)-1].Size()
// continue
// } // We are fine with minor inaccuracy of max bytes per frame. The inaccuracy will be max of full chunk size.
// isNext = iter.Next()
// resp := &prompb.ChunkedReadResponse{ if frameBytesLeft > 0 && isNext {
// ChunkedSeries: []*prompb.ChunkedSeries{ continue
// {Labels: labels, Chunks: chks}, }
// },
// QueryIndex: int64(idx), resp := &prompb.ChunkedReadResponse{
// } ChunkedSeries: []*prompb.ChunkedSeries{
// {Labels: labels, Chunks: chks},
// b, err := proto.Marshal(resp) },
// if err != nil { QueryIndex: int64(idx),
// t.Fatalf("error marshal response: %s", err) }
// }
// b, err := proto.Marshal(resp)
// if _, err := stream.Write(b); err != nil { if err != nil {
// t.Fatalf("error write to stream: %s", err) t.Fatalf("error marshal response: %s", err)
// } }
// chks = chks[:0]
// rrs.storage.Reset() if _, err := stream.Write(b); err != nil {
// } t.Fatalf("error write to stream: %s", err)
// if err := iter.Err(); err != nil { }
// t.Fatalf("error iterate over chunk series: %s", err) chks = chks[:0]
// } rrs.storage.Reset()
// } }
// } if err := iter.Err(); err != nil {
// }) t.Fatalf("error iterate over chunk series: %s", err)
// } }
// }
// func validateReadHeaders(t *testing.T, r *http.Request) bool { }
// if r.Method != http.MethodPost { })
// t.Fatalf("got %q method, expected %q", r.Method, http.MethodPost) }
// }
// if r.Header.Get("Content-Encoding") != "snappy" { func validateReadHeaders(t *testing.T, r *http.Request) bool {
// t.Fatalf("got %q content encoding header, expected %q", r.Header.Get("Content-Encoding"), "snappy") if r.Method != http.MethodPost {
// } t.Fatalf("got %q method, expected %q", r.Method, http.MethodPost)
// if r.Header.Get("Content-Type") != "application/x-protobuf" { }
// t.Fatalf("got %q content type header, expected %q", r.Header.Get("Content-Type"), "application/x-protobuf") if r.Header.Get("Content-Encoding") != "snappy" {
// } t.Fatalf("got %q content encoding header, expected %q", r.Header.Get("Content-Encoding"), "snappy")
// }
// remoteReadVersion := r.Header.Get("X-Prometheus-Remote-Read-Version") if r.Header.Get("Content-Type") != "application/x-protobuf" {
// if remoteReadVersion == "" { t.Fatalf("got %q content type header, expected %q", r.Header.Get("Content-Type"), "application/x-protobuf")
// t.Fatalf("got empty prometheus remote read header") }
// }
// if !strings.HasPrefix(remoteReadVersion, "0.1.") { remoteReadVersion := r.Header.Get("X-Prometheus-Remote-Read-Version")
// t.Fatalf("wrong remote version defined") if remoteReadVersion == "" {
// } t.Fatalf("got empty prometheus remote read header")
// }
// return true if !strings.HasPrefix(remoteReadVersion, "0.1.") {
// } t.Fatalf("wrong remote version defined")
// }
// func validateStreamReadHeaders(t *testing.T, r *http.Request) bool {
// if r.Method != http.MethodPost { return true
// t.Fatalf("got %q method, expected %q", r.Method, http.MethodPost) }
// }
// if r.Header.Get("Content-Encoding") != "snappy" { func validateStreamReadHeaders(t *testing.T, r *http.Request) bool {
// t.Fatalf("got %q content encoding header, expected %q", r.Header.Get("Content-Encoding"), "snappy") if r.Method != http.MethodPost {
// } t.Fatalf("got %q method, expected %q", r.Method, http.MethodPost)
// if r.Header.Get("Content-Type") != "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" { }
// t.Fatalf("got %q content type header, expected %q", r.Header.Get("Content-Type"), "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") if r.Header.Get("Content-Encoding") != "snappy" {
// } t.Fatalf("got %q content encoding header, expected %q", r.Header.Get("Content-Encoding"), "snappy")
// }
// remoteReadVersion := r.Header.Get("X-Prometheus-Remote-Read-Version") if r.Header.Get("Content-Type") != "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" {
// if remoteReadVersion == "" { t.Fatalf("got %q content type header, expected %q", r.Header.Get("Content-Type"), "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse")
// t.Fatalf("got empty prometheus remote read header") }
// }
// if !strings.HasPrefix(remoteReadVersion, "0.1.") { remoteReadVersion := r.Header.Get("X-Prometheus-Remote-Read-Version")
// t.Fatalf("wrong remote version defined") if remoteReadVersion == "" {
// } t.Fatalf("got empty prometheus remote read header")
// return true }
// } if !strings.HasPrefix(remoteReadVersion, "0.1.") {
// t.Fatalf("wrong remote version defined")
// func GenerateRemoteReadSeries(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries { }
// var ts []*prompb.TimeSeries return true
// j := 0 }
// for i := 0; i < int(numOfSeries); i++ {
// if i%3 == 0 { func GenerateRemoteReadSeries(start, end, numOfSeries, numOfSamples int64) []*prompb.TimeSeries {
// j++ var ts []*prompb.TimeSeries
// } j := 0
// for i := 0; i < int(numOfSeries); i++ {
// timeSeries := prompb.TimeSeries{ if i%3 == 0 {
// Labels: []prompb.Label{ j++
// {Name: labels.MetricName, Value: fmt.Sprintf("vm_metric_%d", j)}, }
// {Name: "job", Value: strconv.Itoa(i)},
// }, timeSeries := prompb.TimeSeries{
// } Labels: []prompb.Label{
// {Name: labels.MetricName, Value: fmt.Sprintf("vm_metric_%d", j)},
// ts = append(ts, &timeSeries) {Name: "job", Value: strconv.Itoa(i)},
// } },
// }
// for i := range ts {
// ts[i].Samples = generateRemoteReadSamples(i, start, end, numOfSamples) ts = append(ts, &timeSeries)
// } }
//
// return ts for i := range ts {
// } ts[i].Samples = generateRemoteReadSamples(i, start, end, numOfSamples)
// }
// func generateRemoteReadSamples(idx int, startTime, endTime, numOfSamples int64) []prompb.Sample {
// samples := make([]prompb.Sample, 0) return ts
// delta := (endTime - startTime) / numOfSamples }
//
// t := startTime func generateRemoteReadSamples(idx int, startTime, endTime, numOfSamples int64) []prompb.Sample {
// for t != endTime { samples := make([]prompb.Sample, 0)
// v := 100 * int64(idx) delta := (endTime - startTime) / numOfSamples
// samples = append(samples, prompb.Sample{
// Timestamp: t * 1000, t := startTime
// Value: float64(v), for t != endTime {
// }) v := 100 * int64(idx)
// t = t + delta samples = append(samples, prompb.Sample{
// } Timestamp: t * 1000,
// Value: float64(v),
// return samples })
// } t = t + delta
// }
// type MockStorage struct {
// query *prompb.Query return samples
// store []*prompb.TimeSeries }
// }
// type MockStorage struct {
// func NewMockStorage(series []*prompb.TimeSeries) *MockStorage { query *prompb.Query
// return &MockStorage{store: series} store []*prompb.TimeSeries
// } }
//
// func (ms *MockStorage) Read(_ context.Context, query *prompb.Query) (*prompb.QueryResult, error) { func NewMockStorage(series []*prompb.TimeSeries) *MockStorage {
// if ms.query != nil { return &MockStorage{store: series}
// return nil, fmt.Errorf("expected only one call to remote client got: %v", query) }
// }
// ms.query = query func (ms *MockStorage) Read(_ context.Context, query *prompb.Query, sortSeries bool) (storage.SeriesSet, error) {
// if sortSeries {
// q := &prompb.QueryResult{Timeseries: make([]*prompb.TimeSeries, 0, len(ms.store))} return nil, fmt.Errorf("unexpected sortSeries=true")
// for _, s := range ms.store { }
// var samples []prompb.Sample if ms.query != nil {
// for _, sample := range s.Samples { return nil, fmt.Errorf("expected only one call to remote client got: %v", query)
// if sample.Timestamp >= query.StartTimestampMs && sample.Timestamp < query.EndTimestampMs { }
// samples = append(samples, sample) ms.query = query
// }
// } tss := make([]*prompb.TimeSeries, 0, len(ms.store))
// var series prompb.TimeSeries for _, s := range ms.store {
// if len(samples) > 0 { var samples []prompb.Sample
// series.Labels = s.Labels for _, sample := range s.Samples {
// series.Samples = samples if sample.Timestamp >= query.StartTimestampMs && sample.Timestamp < query.EndTimestampMs {
// } samples = append(samples, sample)
// }
// q.Timeseries = append(q.Timeseries, &series) }
// } var series prompb.TimeSeries
// return q, nil if len(samples) > 0 {
// } series.Labels = s.Labels
// series.Samples = samples
// func (ms *MockStorage) Reset() { }
// ms.query = nil
// } tss = append(tss, &series)
// }
// func labelsToLabelsProto(labels labels.Labels) []prompb.Label { return &mockSeriesSet{
// result := make([]prompb.Label, 0, len(labels)) tss: tss,
// for _, l := range labels { }, nil
// result = append(result, prompb.Label{ }
// Name: l.Name,
// Value: l.Value, func (ms *MockStorage) Reset() {
// }) ms.query = nil
// } }
// return result
// } type mockSeriesSet struct {
tss []*prompb.TimeSeries
next int
}
func (ss *mockSeriesSet) Next() bool {
if ss.next >= len(ss.tss) {
return false
}
ss.next++
return true
}
func (ss *mockSeriesSet) At() storage.Series {
return &mockSeries{
s: ss.tss[ss.next-1],
}
}
func (ss *mockSeriesSet) Err() error {
return nil
}
func (ss *mockSeriesSet) Warnings() annotations.Annotations {
return nil
}
type mockSeries struct {
s *prompb.TimeSeries
}
func (s *mockSeries) Labels() labels.Labels {
a := make(labels.Labels, len(s.s.Labels))
for i, label := range s.s.Labels {
a[i] = labels.Label{
Name: label.Name,
Value: label.Value,
}
}
return a
}
func (s *mockSeries) Iterator(chunkenc.Iterator) chunkenc.Iterator {
return &mockSamplesIterator{
samples: s.s.Samples,
}
}
type mockSamplesIterator struct {
samples []prompb.Sample
next int
}
func (si *mockSamplesIterator) Next() chunkenc.ValueType {
if si.next >= len(si.samples) {
return chunkenc.ValNone
}
si.next++
return chunkenc.ValFloat
}
func (si *mockSamplesIterator) Seek(t int64) chunkenc.ValueType {
for i := range si.samples {
if si.samples[i].Timestamp >= t {
si.next = i + 1
return chunkenc.ValFloat
}
}
return chunkenc.ValNone
}
func (si *mockSamplesIterator) At() (int64, float64) {
s := si.samples[si.next-1]
return s.Timestamp, s.Value
}
func (si *mockSamplesIterator) AtHistogram(*histogram.Histogram) (int64, *histogram.Histogram) {
panic("BUG: musn't be called")
}
func (si *mockSamplesIterator) AtFloatHistogram(*histogram.FloatHistogram) (int64, *histogram.FloatHistogram) {
panic("BUG: mustn't be called")
}
func (si *mockSamplesIterator) AtT() int64 {
return si.samples[si.next-1].Timestamp
}
func (si *mockSamplesIterator) Err() error {
return nil
}
func labelsToLabelsProto(labels labels.Labels) []prompb.Label {
result := make([]prompb.Label, 0, len(labels))
for _, l := range labels {
result = append(result, prompb.Label{
Name: l.Name,
Value: l.Value,
})
}
return result
}