Add OpenTSDB and Prometheus integration tests (#168)

* [WIP] open tsdb and prometheus integration tests

* app/victoria-metrics: fix race condition on parallel tests
This commit is contained in:
Artem Navoiev 2019-09-05 17:55:38 +03:00 committed by Aliaksandr Valialkin
parent 88f8670ede
commit 22e3fabefd
7 changed files with 445 additions and 21 deletions

View file

@ -7,6 +7,7 @@ import (
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net"
@ -18,6 +19,7 @@ import (
"testing"
"time"
testutil "github.com/VictoriaMetrics/VictoriaMetrics/app/victoria-metrics/test"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage"
@ -27,18 +29,21 @@ import (
)
const (
testFixturesDir = "testdata"
testStorageSuffix = "vm-test-storage"
testHTTPListenAddr = ":7654"
testStatsDListenAddr = ":2003"
testOpenTSDBListenAddr = ":4242"
testLogLevel = "INFO"
testFixturesDir = "testdata"
testStorageSuffix = "vm-test-storage"
testHTTPListenAddr = ":7654"
testStatsDListenAddr = ":2003"
testOpenTSDBListenAddr = ":4242"
testOpenTSDBHTTPListenAddr = ":4243"
testLogLevel = "INFO"
)
const (
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
testReadHTTPPath = "http://127.0.0.1" + testHTTPListenAddr
testWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/write"
testOpenTSDBWriteHTTPPath = "http://127.0.0.1" + testOpenTSDBHTTPListenAddr + "/api/put"
testPromWriteHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/api/v1/write"
testHealthHTTPPath = "http://127.0.0.1" + testHTTPListenAddr + "/health"
)
const (
@ -102,6 +107,7 @@ func processFlags() {
{flag: "graphiteListenAddr", value: testStatsDListenAddr},
{flag: "opentsdbListenAddr", value: testOpenTSDBListenAddr},
{flag: "loggerLevel", value: testLogLevel},
{flag: "opentsdbHTTPListenAddr", value: testOpenTSDBHTTPListenAddr},
} {
// panics if flag doesn't exist
if err := flag.Lookup(fv.flag).Value.Set(fv.value); err != nil {
@ -123,7 +129,7 @@ func waitFor(timeout time.Duration, f func() bool) error {
func tearDown() {
if err := httpserver.Stop(*httpListenAddr); err != nil {
log.Fatalf("cannot stop the webservice: %s", err)
log.Printf("cannot stop the webservice: %s", err)
}
vminsert.Stop()
vmstorage.Stop()
@ -143,16 +149,33 @@ func TestWriteRead(t *testing.T) {
}
func testWrite(t *testing.T) {
t.Run("prometheus", func(t *testing.T) {
for _, test := range readIn("prometheus", t, fmt.Sprintf("%d", timeToMillis(insertionTime))) {
s := newSuite(t)
r := testutil.WriteRequest{}
s.noError(json.Unmarshal([]byte(test.Data), &r.Timeseries))
data, err := testutil.Compress(r)
s.greaterThan(len(r.Timeseries), 0)
if err != nil {
t.Errorf("error compressing %v %s", r, err)
t.Fail()
}
httpWrite(t, testPromWriteHTTPPath, bytes.NewBuffer(data))
}
})
t.Run("influxdb", func(t *testing.T) {
for _, test := range readIn("influxdb", t, fmt.Sprintf("%d", insertionTime.UnixNano())) {
for _, x := range readIn("influxdb", t, fmt.Sprintf("%d", insertionTime.UnixNano())) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
httpWrite(t, testWriteHTTPPath, test.Data)
httpWrite(t, testWriteHTTPPath, bytes.NewBufferString(test.Data))
})
}
})
t.Run("graphite", func(t *testing.T) {
for _, test := range readIn("graphite", t, fmt.Sprintf("%d", insertionTime.Unix())) {
for _, x := range readIn("graphite", t, fmt.Sprintf("%d", insertionTime.Unix())) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
tcpWrite(t, "127.0.0.1"+testStatsDListenAddr, test.Data)
@ -160,20 +183,31 @@ func testWrite(t *testing.T) {
}
})
t.Run("opentsdb", func(t *testing.T) {
for _, test := range readIn("opentsdb", t, fmt.Sprintf("%d", insertionTime.Unix())) {
for _, x := range readIn("opentsdb", t, fmt.Sprintf("%d", insertionTime.Unix())) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
tcpWrite(t, "127.0.0.1"+testOpenTSDBListenAddr, test.Data)
})
}
})
t.Run("opentsdbhttp", func(t *testing.T) {
for _, x := range readIn("opentsdbhttp", t, fmt.Sprintf("%d", insertionTime.Unix())) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
logger.Infof("writing %s", test.Data)
httpWrite(t, testOpenTSDBWriteHTTPPath, bytes.NewBufferString(test.Data))
})
}
})
}
func testRead(t *testing.T) {
for _, engine := range []string{"graphite", "opentsdb", "influxdb"} {
for _, engine := range []string{"prometheus", "graphite", "opentsdb", "influxdb", "opentsdbhttp"} {
t.Run(engine, func(t *testing.T) {
for _, test := range readIn(engine, t, fmt.Sprintf("%d", insertionTime.UnixNano())) {
test := test
for _, x := range readIn(engine, t, fmt.Sprintf("%d", insertionTime.UnixNano())) {
test := x
t.Run(test.Name, func(t *testing.T) {
t.Parallel()
rowContains(t, httpRead(t, testReadHTTPPath, test.Query), test.Result)
@ -195,7 +229,7 @@ func readIn(readFor string, t *testing.T, timeStr string) []test {
s.noError(err)
item := test{}
s.noError(json.Unmarshal(b, &item))
item.Data = strings.Replace(item.Data, "{TIME}", timeStr, 1)
item.Data = strings.Replace(item.Data, "{TIME}", timeStr, -1)
tt = append(tt, item)
return nil
}))
@ -205,10 +239,10 @@ func readIn(readFor string, t *testing.T, timeStr string) []test {
return tt
}
func httpWrite(t *testing.T, address string, data string) {
func httpWrite(t *testing.T, address string, r io.Reader) {
t.Helper()
s := newSuite(t)
resp, err := http.Post(address, "", bytes.NewBufferString(data))
resp, err := http.Post(address, "", r)
s.noError(err)
s.noError(resp.Body.Close())
s.equalInt(resp.StatusCode, 204)
@ -281,3 +315,15 @@ func (s *suite) equalInt(a, b int) {
s.t.FailNow()
}
}
func (s *suite) greaterThan(a, b int) {
s.t.Helper()
if a <= b {
s.t.Errorf("%d less or equal then %d", a, b)
s.t.FailNow()
}
}
func timeToMillis(t time.Time) int64 {
return t.UnixNano() / 1e6
}

View file

@ -0,0 +1,340 @@
// +build integration
// Source https://github.com/prometheus/prometheus/blob/master/prompb/remote.pb.go . Code is copy pasted and cleaned up
package test
import (
"encoding/binary"
"log"
"math"
"math/bits"
)
type WriteRequest struct {
Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"`
}
func (m *WriteRequest) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Timeseries) > 0 {
for _, e := range m.Timeseries {
l = e.Size()
n += 1 + l + sovRemote(uint64(l))
}
}
return n
}
func sovRemote(x uint64) (n int) {
return (bits.Len64(x|1) + 6) / 7
}
func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if len(m.Timeseries) > 0 {
for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintRemote(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintRemote(dAtA []byte, offset int, v uint64) int {
offset -= sovRemote(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
type Sample struct {
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
}
func (m *Sample) Reset() { *m = Sample{} }
// TimeSeries represents samples and labels for a single time series.
type TimeSeries struct {
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
}
func (m *TimeSeries) Reset() { *m = TimeSeries{} }
type Label struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Label) Reset() { *m = Label{} }
type Labels struct {
Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"`
}
func (m *Labels) Reset() { *m = Labels{} }
func (m *Sample) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if m.Timestamp != 0 {
log.Printf("prom types %d", m.Timestamp)
i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp))
i--
dAtA[i] = 0x10
}
if m.Value != 0 {
i -= 8
binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
i--
dAtA[i] = 0x9
}
return len(dAtA) - i, nil
}
func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if len(m.Samples) > 0 {
for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0x12
}
}
if len(m.Labels) > 0 {
for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func (m *Label) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Label) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if len(m.Value) > 0 {
i -= len(m.Value)
copy(dAtA[i:], m.Value)
i = encodeVarintTypes(dAtA, i, uint64(len(m.Value)))
i--
dAtA[i] = 0x12
}
if len(m.Name) > 0 {
i -= len(m.Name)
copy(dAtA[i:], m.Name)
i = encodeVarintTypes(dAtA, i, uint64(len(m.Name)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *Labels) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Labels) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Labels) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
if len(m.Labels) > 0 {
for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
{
size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintTypes(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
}
}
return len(dAtA) - i, nil
}
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
offset -= sovTypes(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *Sample) Size() (n int) {
if m == nil {
return 0
}
if m.Value != 0 {
n += 9
}
if m.Timestamp != 0 {
n += 1 + sovTypes(uint64(m.Timestamp))
}
return n
}
func (m *TimeSeries) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Labels) > 0 {
for _, e := range m.Labels {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
if len(m.Samples) > 0 {
for _, e := range m.Samples {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
return n
}
func (m *Label) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Value)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
func (m *Labels) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if len(m.Labels) > 0 {
for _, e := range m.Labels {
l = e.Size()
n += 1 + l + sovTypes(uint64(l))
}
}
return n
}
func sovTypes(x uint64) (n int) {
return (bits.Len64(x|1) + 6) / 7
}

View file

@ -0,0 +1,13 @@
// +build integration
package test
import "github.com/golang/snappy"
func Compress(wr WriteRequest) ([]byte, error) {
data, err := wr.Marshal()
if err != nil {
return nil, err
}
return snappy.Encode(nil, data), nil
}

View file

@ -1,6 +1,6 @@
{
"name": "basic_insertion",
"data": "measurement,tag1=value1,tag2=value2 field1=1.23,field2=123",
"data": "measurement,tag1=value1,tag2=value2 field1=1.23,field2=123 {TIME}",
"query": "/api/v1/export?match={__name__!=\"\"}",
"result": [
{"metric":{"__name__":"measurement_field2","tag1":"value1","tag2":"value2"},"values":[123]},

View file

@ -0,0 +1,8 @@
{
"name": "basic_insertion",
"data": "{\"metric\": \"opentsdbhttp.foo\", \"value\": 1001, \"timestamp\": {TIME}, \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}",
"query": "/api/v1/export?match={__name__!=\"\"}",
"result": [
{"metric":{"__name__":"opentsdbhttp.foo","bar":"baz","x":"y"},"values":[1001]}
]
}

View file

@ -0,0 +1,9 @@
{
"name": "multiline",
"data": "[{\"metric\": \"opentsdbhttp.multiline1\", \"value\": 1001, \"timestamp\": \"{TIME}\", \"tags\": {\"bar\":\"baz\", \"x\": \"y\"}}, {\"metric\": \"opentsdbhttp.multiline2\", \"value\": 1002, \"timestamp\": {TIME}}]",
"query": "/api/v1/export?match={__name__!=\"\"}",
"result": [
{"metric":{"__name__":"opentsdbhttp.multiline1","bar":"baz","x":"y"},"values":[1001]},
{"metric":{"__name__":"opentsdbhttp.multiline2"},"values":[1002]}
]
}

View file

@ -0,0 +1,8 @@
{
"name": "basic_insertion",
"data": "[{\"labels\":[{\"name\":\"__name__\",\"value\":\"prometheus.bar\"},{\"name\":\"baz\",\"value\":\"qux\"}],\"samples\":[{\"value\":100000,\"timestamp\":{TIME}}]}]",
"query": "/api/v1/export?match={__name__!=\"\"}",
"result": [
{"metric":{"__name__":"prometheus.bar","baz":"qux"},"values":[100000]}
]
}