2021-01-31 23:10:16 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2022-05-03 05:03:41 +00:00
|
|
|
"context"
|
2021-01-31 23:10:16 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"log"
|
2023-04-14 06:34:54 +00:00
|
|
|
"strings"
|
2023-03-02 12:19:45 +00:00
|
|
|
"sync"
|
2022-09-06 07:09:34 +00:00
|
|
|
"time"
|
|
|
|
|
2023-12-12 12:05:44 +00:00
|
|
|
"github.com/cheggaaa/pb/v3"
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/backoff"
|
2023-06-02 12:57:08 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/barpool"
|
2021-12-24 10:18:07 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/limiter"
|
2023-03-02 12:19:45 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/native"
|
2022-09-06 07:09:34 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/stepper"
|
2023-04-24 16:33:30 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/utils"
|
2021-12-24 10:18:07 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmctl/vm"
|
2023-04-14 06:34:54 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
2023-03-02 12:19:45 +00:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
2021-01-31 23:10:16 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type vmNativeProcessor struct {
|
2023-03-02 12:19:45 +00:00
|
|
|
filter native.Filter
|
2021-01-31 23:10:16 +00:00
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
dst *native.Client
|
|
|
|
src *native.Client
|
|
|
|
backoff *backoff.Backoff
|
2021-01-31 23:10:16 +00:00
|
|
|
|
2023-12-15 11:36:28 +00:00
|
|
|
s *stats
|
|
|
|
rateLimit int64
|
|
|
|
interCluster bool
|
|
|
|
cc int
|
|
|
|
isSilent bool
|
|
|
|
isNative bool
|
|
|
|
|
|
|
|
disablePerMetricRequests bool
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const (
|
2023-08-01 07:45:50 +00:00
|
|
|
nativeExportAddr = "api/v1/export"
|
|
|
|
nativeImportAddr = "api/v1/import"
|
2023-04-14 06:34:54 +00:00
|
|
|
nativeWithBackoffTpl = `{{ blue "%s:" }} {{ counters . }} {{ bar . "[" "█" (cycle . "█") "▒" "]" }} {{ percent . }}`
|
|
|
|
nativeSingleProcessTpl = `Total: {{counters . }} {{ cycle . "↖" "↗" "↘" "↙" }} Speed: {{speed . }} {{string . "suffix"}}`
|
2021-01-31 23:10:16 +00:00
|
|
|
)
|
|
|
|
|
2023-06-02 12:57:08 +00:00
|
|
|
func (p *vmNativeProcessor) run(ctx context.Context) error {
|
2023-03-02 12:19:45 +00:00
|
|
|
if p.cc == 0 {
|
|
|
|
p.cc = 1
|
|
|
|
}
|
|
|
|
p.s = &stats{
|
|
|
|
startTime: time.Now(),
|
2022-09-06 07:09:34 +00:00
|
|
|
}
|
|
|
|
|
2023-04-24 16:33:30 +00:00
|
|
|
start, err := utils.GetTime(p.filter.TimeStart)
|
2022-09-06 07:09:34 +00:00
|
|
|
if err != nil {
|
2023-04-24 16:33:30 +00:00
|
|
|
return fmt.Errorf("failed to parse %s, provided: %s, error: %w", vmNativeFilterTimeStart, p.filter.TimeStart, err)
|
2022-09-06 07:09:34 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
end := time.Now().In(start.Location())
|
|
|
|
if p.filter.TimeEnd != "" {
|
2023-04-24 16:33:30 +00:00
|
|
|
end, err = utils.GetTime(p.filter.TimeEnd)
|
2022-09-06 07:09:34 +00:00
|
|
|
if err != nil {
|
2023-04-24 16:33:30 +00:00
|
|
|
return fmt.Errorf("failed to parse %s, provided: %s, error: %w", vmNativeFilterTimeEnd, p.filter.TimeEnd, err)
|
2022-09-06 07:09:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
ranges := [][]time.Time{{start, end}}
|
|
|
|
if p.filter.Chunk != "" {
|
2023-12-12 12:05:44 +00:00
|
|
|
ranges, err = stepper.SplitDateRange(start, end, p.filter.Chunk, p.filter.TimeReverse)
|
2023-03-02 12:19:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to create date ranges for the given time filters: %w", err)
|
|
|
|
}
|
2022-09-06 07:09:34 +00:00
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
tenants := []string{""}
|
|
|
|
if p.interCluster {
|
|
|
|
log.Printf("Discovering tenants...")
|
|
|
|
tenants, err = p.src.GetSourceTenants(ctx, p.filter)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get tenants: %w", err)
|
2022-09-06 07:09:34 +00:00
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
question := fmt.Sprintf("The following tenants were discovered: %s.\n Continue?", tenants)
|
2023-06-02 12:57:08 +00:00
|
|
|
if !p.isSilent && !prompt(question) {
|
2023-03-02 12:19:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
2022-09-06 07:09:34 +00:00
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
for _, tenantID := range tenants {
|
2023-06-02 12:57:08 +00:00
|
|
|
err := p.runBackfilling(ctx, tenantID, ranges, p.isSilent)
|
2022-09-06 07:09:34 +00:00
|
|
|
if err != nil {
|
2023-03-02 12:19:45 +00:00
|
|
|
return fmt.Errorf("migration failed: %s", err)
|
2022-09-06 07:09:34 +00:00
|
|
|
}
|
|
|
|
}
|
2021-01-31 23:10:16 +00:00
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
log.Println("Import finished!")
|
|
|
|
log.Print(p.s)
|
2021-01-31 23:10:16 +00:00
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
return nil
|
|
|
|
}
|
2022-12-06 01:18:09 +00:00
|
|
|
|
2023-04-14 06:34:54 +00:00
|
|
|
func (p *vmNativeProcessor) do(ctx context.Context, f native.Filter, srcURL, dstURL string, bar *pb.ProgressBar) error {
|
2022-12-06 01:18:09 +00:00
|
|
|
|
2023-04-14 06:34:54 +00:00
|
|
|
retryableFunc := func() error { return p.runSingle(ctx, f, srcURL, dstURL, bar) }
|
2023-03-02 12:19:45 +00:00
|
|
|
attempts, err := p.backoff.Retry(ctx, retryableFunc)
|
|
|
|
p.s.Lock()
|
|
|
|
p.s.retries += attempts
|
|
|
|
p.s.Unlock()
|
2021-01-31 23:10:16 +00:00
|
|
|
if err != nil {
|
2023-05-11 14:34:57 +00:00
|
|
|
return fmt.Errorf("failed to migrate from %s to %s (retry attempts: %d): %w\nwith filter %s", srcURL, dstURL, attempts, err, f)
|
2022-12-06 01:18:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-04-14 06:34:54 +00:00
|
|
|
func (p *vmNativeProcessor) runSingle(ctx context.Context, f native.Filter, srcURL, dstURL string, bar *pb.ProgressBar) error {
|
|
|
|
reader, err := p.src.ExportPipe(ctx, srcURL, f)
|
2022-12-06 01:18:09 +00:00
|
|
|
if err != nil {
|
2023-03-02 12:19:45 +00:00
|
|
|
return fmt.Errorf("failed to init export pipe: %w", err)
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
|
|
|
|
2023-12-15 11:36:28 +00:00
|
|
|
if p.disablePerMetricRequests && bar != nil {
|
2023-04-14 06:34:54 +00:00
|
|
|
fmt.Printf("Continue import process with filter %s:\n", f.String())
|
|
|
|
reader = bar.NewProxyReader(reader)
|
|
|
|
}
|
|
|
|
|
2022-12-06 01:18:09 +00:00
|
|
|
pr, pw := io.Pipe()
|
2023-12-15 10:43:41 +00:00
|
|
|
importCh := make(chan error)
|
2021-01-31 23:10:16 +00:00
|
|
|
go func() {
|
2023-12-15 10:43:41 +00:00
|
|
|
importCh <- p.dst.ImportPipe(ctx, dstURL, pr)
|
|
|
|
close(importCh)
|
2022-09-06 07:09:34 +00:00
|
|
|
}()
|
2021-01-31 23:10:16 +00:00
|
|
|
|
2021-12-24 10:18:07 +00:00
|
|
|
w := io.Writer(pw)
|
|
|
|
if p.rateLimit > 0 {
|
|
|
|
rl := limiter.NewLimiter(p.rateLimit)
|
|
|
|
w = limiter.NewWriteLimiter(pw, rl)
|
|
|
|
}
|
2022-05-03 05:03:41 +00:00
|
|
|
|
2023-04-14 06:34:54 +00:00
|
|
|
written, err := io.Copy(w, reader)
|
2021-01-31 23:10:16 +00:00
|
|
|
if err != nil {
|
2023-03-02 12:19:45 +00:00
|
|
|
return fmt.Errorf("failed to write into %q: %s", p.dst.Addr, err)
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
2022-05-03 05:03:41 +00:00
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
p.s.Lock()
|
|
|
|
p.s.bytes += uint64(written)
|
|
|
|
p.s.requests++
|
|
|
|
p.s.Unlock()
|
|
|
|
|
2021-01-31 23:10:16 +00:00
|
|
|
if err := pw.Close(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-12-15 10:43:41 +00:00
|
|
|
return <-importCh
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
func (p *vmNativeProcessor) runBackfilling(ctx context.Context, tenantID string, ranges [][]time.Time, silent bool) error {
|
|
|
|
exportAddr := nativeExportAddr
|
2023-08-01 07:45:50 +00:00
|
|
|
importAddr := nativeImportAddr
|
|
|
|
if p.isNative {
|
|
|
|
exportAddr += "/native"
|
|
|
|
importAddr += "/native"
|
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
srcURL := fmt.Sprintf("%s/%s", p.src.Addr, exportAddr)
|
|
|
|
|
2023-08-01 07:45:50 +00:00
|
|
|
importAddr, err := vm.AddExtraLabelsToImportPath(importAddr, p.dst.ExtraLabels)
|
2022-12-06 01:18:09 +00:00
|
|
|
if err != nil {
|
2023-03-02 12:19:45 +00:00
|
|
|
return fmt.Errorf("failed to add labels to import path: %s", err)
|
2022-12-06 01:18:09 +00:00
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
dstURL := fmt.Sprintf("%s/%s", p.dst.Addr, importAddr)
|
2022-12-06 01:18:09 +00:00
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
if p.interCluster {
|
|
|
|
srcURL = fmt.Sprintf("%s/select/%s/prometheus/%s", p.src.Addr, tenantID, exportAddr)
|
|
|
|
dstURL = fmt.Sprintf("%s/insert/%s/prometheus/%s", p.dst.Addr, tenantID, importAddr)
|
2022-12-06 01:18:09 +00:00
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
|
|
|
|
barPrefix := "Requests to make"
|
|
|
|
initMessage := "Initing import process from %q to %q with filter %s"
|
|
|
|
initParams := []interface{}{srcURL, dstURL, p.filter.String()}
|
|
|
|
if p.interCluster {
|
|
|
|
barPrefix = fmt.Sprintf("Requests to make for tenant %s", tenantID)
|
|
|
|
initMessage = "Initing import process from %q to %q with filter %s for tenant %s"
|
|
|
|
initParams = []interface{}{srcURL, dstURL, p.filter.String(), tenantID}
|
2022-12-06 01:18:09 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
fmt.Println("") // extra line for better output formatting
|
|
|
|
log.Printf(initMessage, initParams...)
|
|
|
|
|
2023-04-14 06:34:54 +00:00
|
|
|
var foundSeriesMsg string
|
2022-12-06 01:18:09 +00:00
|
|
|
|
2023-04-14 06:34:54 +00:00
|
|
|
metrics := []string{p.filter.Match}
|
2023-12-15 11:36:28 +00:00
|
|
|
if !p.disablePerMetricRequests {
|
2023-04-14 06:34:54 +00:00
|
|
|
log.Printf("Exploring metrics...")
|
|
|
|
metrics, err = p.src.Explore(ctx, p.filter, tenantID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("cannot get metrics from source %s: %w", p.src.Addr, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(metrics) == 0 {
|
2023-08-16 12:54:51 +00:00
|
|
|
errMsg := "no metrics found"
|
|
|
|
if tenantID != "" {
|
|
|
|
errMsg = fmt.Sprintf("%s for tenant id: %s", errMsg, tenantID)
|
|
|
|
}
|
|
|
|
log.Println(errMsg)
|
|
|
|
return nil
|
2023-04-14 06:34:54 +00:00
|
|
|
}
|
|
|
|
foundSeriesMsg = fmt.Sprintf("Found %d metrics to import", len(metrics))
|
2022-12-06 01:18:09 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
if !p.interCluster {
|
|
|
|
// do not prompt for intercluster because there could be many tenants,
|
|
|
|
// and we don't want to interrupt the process when moving to the next tenant.
|
|
|
|
question := foundSeriesMsg + ". Continue?"
|
|
|
|
if !silent && !prompt(question) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Print(foundSeriesMsg)
|
2022-12-06 01:18:09 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
processingMsg := fmt.Sprintf("Requests to make: %d", len(metrics)*len(ranges))
|
|
|
|
if len(ranges) > 1 {
|
|
|
|
processingMsg = fmt.Sprintf("Selected time range will be split into %d ranges according to %q step. %s", len(ranges), p.filter.Chunk, processingMsg)
|
|
|
|
}
|
|
|
|
log.Print(processingMsg)
|
2022-12-06 01:18:09 +00:00
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
var bar *pb.ProgressBar
|
|
|
|
if !silent {
|
2023-06-02 12:57:08 +00:00
|
|
|
bar = barpool.NewSingleProgress(fmt.Sprintf(nativeWithBackoffTpl, barPrefix), len(metrics)*len(ranges))
|
2023-12-15 11:36:28 +00:00
|
|
|
if p.disablePerMetricRequests {
|
2023-06-02 12:57:08 +00:00
|
|
|
bar = barpool.NewSingleProgress(nativeSingleProcessTpl, 0)
|
2023-04-14 06:34:54 +00:00
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
bar.Start()
|
|
|
|
defer bar.Finish()
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
filterCh := make(chan native.Filter)
|
|
|
|
errCh := make(chan error, p.cc)
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < p.cc; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for f := range filterCh {
|
2023-12-15 11:36:28 +00:00
|
|
|
if !p.disablePerMetricRequests {
|
2023-04-14 06:34:54 +00:00
|
|
|
if err := p.do(ctx, f, srcURL, dstURL, nil); err != nil {
|
|
|
|
errCh <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if bar != nil {
|
|
|
|
bar.Increment()
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := p.runSingle(ctx, f, srcURL, dstURL, bar); err != nil {
|
|
|
|
errCh <- err
|
|
|
|
return
|
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
|
|
|
|
// any error breaks the import
|
2023-04-14 06:34:54 +00:00
|
|
|
for _, s := range metrics {
|
2023-04-06 22:06:52 +00:00
|
|
|
|
|
|
|
match, err := buildMatchWithFilter(p.filter.Match, s)
|
|
|
|
if err != nil {
|
|
|
|
logger.Errorf("failed to build export filters: %s", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
for _, times := range ranges {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return fmt.Errorf("context canceled")
|
|
|
|
case infErr := <-errCh:
|
|
|
|
return fmt.Errorf("native error: %s", infErr)
|
|
|
|
case filterCh <- native.Filter{
|
2023-04-06 22:06:52 +00:00
|
|
|
Match: match,
|
2023-03-02 12:19:45 +00:00
|
|
|
TimeStart: times[0].Format(time.RFC3339),
|
|
|
|
TimeEnd: times[1].Format(time.RFC3339),
|
|
|
|
}:
|
|
|
|
}
|
|
|
|
}
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
close(filterCh)
|
|
|
|
wg.Wait()
|
|
|
|
close(errCh)
|
|
|
|
|
|
|
|
for err := range errCh {
|
|
|
|
return fmt.Errorf("import process failed: %s", err)
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
|
|
|
|
return nil
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
// stats represents client statistic
|
|
|
|
// when processing data
|
|
|
|
type stats struct {
|
|
|
|
sync.Mutex
|
|
|
|
startTime time.Time
|
|
|
|
bytes uint64
|
|
|
|
requests uint64
|
|
|
|
retries uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *stats) String() string {
|
|
|
|
s.Lock()
|
|
|
|
defer s.Unlock()
|
|
|
|
|
|
|
|
totalImportDuration := time.Since(s.startTime)
|
|
|
|
totalImportDurationS := totalImportDuration.Seconds()
|
|
|
|
bytesPerS := byteCountSI(0)
|
|
|
|
if s.bytes > 0 && totalImportDurationS > 0 {
|
|
|
|
bytesPerS = byteCountSI(int64(float64(s.bytes) / totalImportDurationS))
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
|
|
|
|
2023-03-02 12:19:45 +00:00
|
|
|
return fmt.Sprintf("VictoriaMetrics importer stats:\n"+
|
|
|
|
" time spent while importing: %v;\n"+
|
|
|
|
" total bytes: %s;\n"+
|
|
|
|
" bytes/s: %s;\n"+
|
|
|
|
" requests: %d;\n"+
|
|
|
|
" requests retries: %d;",
|
|
|
|
totalImportDuration,
|
|
|
|
byteCountSI(int64(s.bytes)), bytesPerS,
|
|
|
|
s.requests, s.retries)
|
|
|
|
}
|
|
|
|
|
|
|
|
func byteCountSI(b int64) string {
|
|
|
|
const unit = 1000
|
|
|
|
if b < unit {
|
|
|
|
return fmt.Sprintf("%d B", b)
|
|
|
|
}
|
|
|
|
div, exp := int64(unit), 0
|
|
|
|
for n := b / unit; n >= unit; n /= unit {
|
|
|
|
div *= unit
|
|
|
|
exp++
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
2023-03-02 12:19:45 +00:00
|
|
|
return fmt.Sprintf("%.1f %cB",
|
|
|
|
float64(b)/float64(div), "kMGTPE"[exp])
|
2021-01-31 23:10:16 +00:00
|
|
|
}
|
2023-04-06 22:06:52 +00:00
|
|
|
|
|
|
|
func buildMatchWithFilter(filter string, metricName string) (string, error) {
|
2023-04-14 06:34:54 +00:00
|
|
|
if filter == metricName {
|
|
|
|
return filter, nil
|
|
|
|
}
|
2023-07-16 06:48:21 +00:00
|
|
|
nameFilter := fmt.Sprintf("__name__=%q", metricName)
|
2023-04-14 06:34:54 +00:00
|
|
|
|
2023-07-16 06:48:21 +00:00
|
|
|
tfss, err := searchutils.ParseMetricSelector(filter)
|
2023-04-06 22:06:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2023-07-16 06:48:21 +00:00
|
|
|
var filters []string
|
|
|
|
for _, tfs := range tfss {
|
|
|
|
var a []string
|
|
|
|
for _, tf := range tfs {
|
|
|
|
if len(tf.Key) == 0 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
a = append(a, tf.String())
|
2023-04-14 06:34:54 +00:00
|
|
|
}
|
2023-07-16 06:48:21 +00:00
|
|
|
a = append(a, nameFilter)
|
|
|
|
filters = append(filters, strings.Join(a, ","))
|
2023-04-14 06:34:54 +00:00
|
|
|
}
|
|
|
|
|
2023-07-16 06:48:21 +00:00
|
|
|
match := "{" + strings.Join(filters, " or ") + "}"
|
2023-04-14 06:34:54 +00:00
|
|
|
return match, nil
|
2023-04-06 22:06:52 +00:00
|
|
|
}
|