2021-01-31 23:10:16 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"sync"
|
|
|
|
|
2022-05-02 07:06:34 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
2021-01-31 23:10:16 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/prometheus"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
2021-01-31 23:31:25 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb"
|
2022-11-30 05:03:57 +00:00
|
|
|
"github.com/prometheus/prometheus/tsdb/chunkenc"
|
2021-01-31 23:10:16 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type prometheusProcessor struct {
|
|
|
|
// prometheus client fetches and reads
|
|
|
|
// snapshot blocks
|
|
|
|
cl *prometheus.Client
|
|
|
|
// importer performs import requests
|
|
|
|
// for timeseries data returned from
|
|
|
|
// snapshot blocks
|
|
|
|
im *vm.Importer
|
|
|
|
// cc stands for concurrency
|
|
|
|
// and defines number of concurrently
|
|
|
|
// running snapshot block readers
|
|
|
|
cc int
|
|
|
|
}
|
|
|
|
|
2022-01-03 19:12:01 +00:00
|
|
|
func (pp *prometheusProcessor) run(silent, verbose bool) error {
|
2021-01-31 23:10:16 +00:00
|
|
|
blocks, err := pp.cl.Explore()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("explore failed: %s", err)
|
|
|
|
}
|
|
|
|
if len(blocks) < 1 {
|
|
|
|
return fmt.Errorf("found no blocks to import")
|
|
|
|
}
|
|
|
|
question := fmt.Sprintf("Found %d blocks to import. Continue?", len(blocks))
|
|
|
|
if !silent && !prompt(question) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-02 07:06:34 +00:00
|
|
|
bar := barpool.AddWithTemplate(fmt.Sprintf(barTpl, "Processing blocks"), len(blocks))
|
|
|
|
|
|
|
|
if err := barpool.Start(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-08-26 16:20:27 +00:00
|
|
|
defer barpool.Stop()
|
2022-05-02 07:06:34 +00:00
|
|
|
|
2021-01-31 23:10:16 +00:00
|
|
|
blockReadersCh := make(chan tsdb.BlockReader)
|
|
|
|
errCh := make(chan error, pp.cc)
|
|
|
|
pp.im.ResetStats()
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(pp.cc)
|
|
|
|
for i := 0; i < pp.cc; i++ {
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for br := range blockReadersCh {
|
|
|
|
if err := pp.do(br); err != nil {
|
|
|
|
errCh <- fmt.Errorf("read failed for block %q: %s", br.Meta().ULID, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
bar.Increment()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
// any error breaks the import
|
|
|
|
for _, br := range blocks {
|
|
|
|
select {
|
|
|
|
case promErr := <-errCh:
|
|
|
|
close(blockReadersCh)
|
|
|
|
return fmt.Errorf("prometheus error: %s", promErr)
|
|
|
|
case vmErr := <-pp.im.Errors():
|
|
|
|
close(blockReadersCh)
|
2022-01-03 19:12:01 +00:00
|
|
|
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
2021-01-31 23:10:16 +00:00
|
|
|
case blockReadersCh <- br:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
close(blockReadersCh)
|
|
|
|
wg.Wait()
|
|
|
|
// wait for all buffers to flush
|
|
|
|
pp.im.Close()
|
2022-05-03 05:03:41 +00:00
|
|
|
close(errCh)
|
2021-01-31 23:10:16 +00:00
|
|
|
// drain import errors channel
|
|
|
|
for vmErr := range pp.im.Errors() {
|
2022-01-03 19:12:01 +00:00
|
|
|
if vmErr.Err != nil {
|
|
|
|
return fmt.Errorf("import process failed: %s", wrapErr(vmErr, verbose))
|
|
|
|
}
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
2022-05-03 05:03:41 +00:00
|
|
|
for err := range errCh {
|
|
|
|
return fmt.Errorf("import process failed: %s", err)
|
|
|
|
}
|
2022-08-26 16:20:27 +00:00
|
|
|
|
2021-01-31 23:10:16 +00:00
|
|
|
log.Println("Import finished!")
|
|
|
|
log.Print(pp.im.Stats())
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pp *prometheusProcessor) do(b tsdb.BlockReader) error {
|
|
|
|
ss, err := pp.cl.Read(b)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to read block: %s", err)
|
|
|
|
}
|
|
|
|
for ss.Next() {
|
|
|
|
var name string
|
|
|
|
var labels []vm.LabelPair
|
|
|
|
series := ss.At()
|
|
|
|
|
|
|
|
for _, label := range series.Labels() {
|
|
|
|
if label.Name == "__name__" {
|
|
|
|
name = label.Value
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
labels = append(labels, vm.LabelPair{
|
|
|
|
Name: label.Name,
|
|
|
|
Value: label.Value,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if name == "" {
|
|
|
|
return fmt.Errorf("failed to find `__name__` label in labelset for block %v", b.Meta().ULID)
|
|
|
|
}
|
|
|
|
|
|
|
|
var timestamps []int64
|
|
|
|
var values []float64
|
|
|
|
it := series.Iterator()
|
2022-11-30 05:03:57 +00:00
|
|
|
for {
|
|
|
|
typ := it.Next()
|
|
|
|
if typ == chunkenc.ValNone {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if typ != chunkenc.ValFloat {
|
|
|
|
// Skip unsupported values
|
|
|
|
continue
|
|
|
|
}
|
2021-01-31 23:10:16 +00:00
|
|
|
t, v := it.At()
|
|
|
|
timestamps = append(timestamps, t)
|
|
|
|
values = append(values, v)
|
|
|
|
}
|
|
|
|
if err := it.Err(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-05-03 05:03:41 +00:00
|
|
|
ts := vm.TimeSeries{
|
2021-01-31 23:10:16 +00:00
|
|
|
Name: name,
|
|
|
|
LabelPairs: labels,
|
|
|
|
Timestamps: timestamps,
|
|
|
|
Values: values,
|
|
|
|
}
|
2022-05-03 05:03:41 +00:00
|
|
|
if err := pp.im.Input(&ts); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
|
|
|
return ss.Err()
|
|
|
|
}
|